summaryrefslogtreecommitdiff
path: root/openstackclient
diff options
context:
space:
mode:
Diffstat (limited to 'openstackclient')
-rw-r--r--openstackclient/api/api.py2
-rw-r--r--openstackclient/api/compute_v2.py2
-rw-r--r--openstackclient/api/image_v2.py17
-rw-r--r--openstackclient/common/project_purge.py4
-rw-r--r--openstackclient/common/quota.py231
-rw-r--r--openstackclient/common/versions.py102
-rw-r--r--openstackclient/compute/v2/flavor.py28
-rw-r--r--openstackclient/compute/v2/host.py2
-rw-r--r--openstackclient/compute/v2/server.py257
-rw-r--r--openstackclient/compute/v2/service.py26
-rw-r--r--openstackclient/compute/v2/usage.py50
-rw-r--r--openstackclient/identity/common.py19
-rw-r--r--openstackclient/identity/v3/endpoint.py2
-rw-r--r--openstackclient/identity/v3/implied_role.py6
-rw-r--r--openstackclient/identity/v3/limit.py272
-rw-r--r--openstackclient/identity/v3/registered_limit.py299
-rw-r--r--openstackclient/identity/v3/role.py33
-rw-r--r--openstackclient/identity/v3/role_assignment.py35
-rw-r--r--openstackclient/identity/v3/token.py6
-rw-r--r--openstackclient/image/v2/image.py110
-rw-r--r--openstackclient/network/client.py52
-rw-r--r--openstackclient/network/common.py25
-rw-r--r--openstackclient/network/v2/floating_ip.py153
-rw-r--r--openstackclient/network/v2/floating_ip_pool.py25
-rw-r--r--openstackclient/network/v2/network.py22
-rw-r--r--openstackclient/network/v2/network_agent.py2
-rw-r--r--openstackclient/network/v2/network_qos_policy.py2
-rw-r--r--openstackclient/network/v2/network_qos_rule.py5
-rw-r--r--openstackclient/network/v2/port.py61
-rw-r--r--openstackclient/network/v2/router.py89
-rw-r--r--openstackclient/network/v2/security_group.py43
-rw-r--r--openstackclient/network/v2/subnet.py7
-rw-r--r--openstackclient/network/v2/subnet_pool.py35
-rw-r--r--openstackclient/tests/functional/base.py7
-rw-r--r--openstackclient/tests/functional/common/test_quota.py32
-rw-r--r--openstackclient/tests/functional/common/test_versions.py31
-rw-r--r--openstackclient/tests/functional/compute/v2/test_aggregate.py18
-rw-r--r--openstackclient/tests/functional/compute/v2/test_flavor.py6
-rw-r--r--openstackclient/tests/functional/compute/v2/test_server.py50
-rw-r--r--openstackclient/tests/functional/identity/v2/test_project.py2
-rw-r--r--openstackclient/tests/functional/identity/v3/common.py101
-rw-r--r--openstackclient/tests/functional/identity/v3/test_limit.py221
-rw-r--r--openstackclient/tests/functional/identity/v3/test_registered_limit.py198
-rw-r--r--openstackclient/tests/functional/identity/v3/test_role.py25
-rw-r--r--openstackclient/tests/functional/image/base.py24
-rw-r--r--openstackclient/tests/functional/image/v1/test_image.py55
-rw-r--r--openstackclient/tests/functional/image/v2/test_image.py132
-rw-r--r--openstackclient/tests/functional/network/v2/test_address_scope.py25
-rw-r--r--openstackclient/tests/functional/network/v2/test_network.py71
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_flavor.py10
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_flavor_profile.py30
-rw-r--r--openstackclient/tests/functional/network/v2/test_network_meter.py28
-rw-r--r--openstackclient/tests/functional/network/v2/test_router.py5
-rwxr-xr-xopenstackclient/tests/functional/run_stestr.sh16
-rw-r--r--openstackclient/tests/functional/volume/v1/test_snapshot.py2
-rw-r--r--openstackclient/tests/functional/volume/v1/test_transfer_request.py37
-rw-r--r--openstackclient/tests/functional/volume/v1/test_volume_type.py115
-rw-r--r--openstackclient/tests/functional/volume/v2/test_backup.py58
-rw-r--r--openstackclient/tests/functional/volume/v2/test_qos.py5
-rw-r--r--openstackclient/tests/functional/volume/v2/test_snapshot.py2
-rw-r--r--openstackclient/tests/functional/volume/v2/test_transfer_request.py122
-rw-r--r--openstackclient/tests/functional/volume/v2/test_volume_type.py132
-rw-r--r--openstackclient/tests/functional/volume/v3/test_transfer_request.py2
-rw-r--r--openstackclient/tests/unit/common/test_project_purge.py30
-rw-r--r--openstackclient/tests/unit/common/test_quota.py87
-rw-r--r--openstackclient/tests/unit/compute/v2/fakes.py37
-rw-r--r--openstackclient/tests/unit/compute/v2/test_flavor.py136
-rw-r--r--openstackclient/tests/unit/compute/v2/test_host.py7
-rw-r--r--openstackclient/tests/unit/compute/v2/test_server.py437
-rw-r--r--openstackclient/tests/unit/compute/v2/test_service.py10
-rw-r--r--openstackclient/tests/unit/compute/v2/test_usage.py26
-rw-r--r--openstackclient/tests/unit/fakes.py1
-rw-r--r--openstackclient/tests/unit/identity/v3/fakes.py50
-rw-r--r--openstackclient/tests/unit/identity/v3/test_endpoint.py41
-rw-r--r--openstackclient/tests/unit/identity/v3/test_implied_role.py32
-rw-r--r--openstackclient/tests/unit/identity/v3/test_limit.py383
-rw-r--r--openstackclient/tests/unit/identity/v3/test_registered_limit.py510
-rw-r--r--openstackclient/tests/unit/identity/v3/test_role_assignment.py42
-rw-r--r--openstackclient/tests/unit/image/v2/test_image.py89
-rw-r--r--openstackclient/tests/unit/network/v2/fakes.py28
-rw-r--r--openstackclient/tests/unit/network/v2/test_floating_ip_network.py86
-rw-r--r--openstackclient/tests/unit/network/v2/test_network.py12
-rw-r--r--openstackclient/tests/unit/network/v2/test_port.py57
-rw-r--r--openstackclient/tests/unit/network/v2/test_router.py199
-rw-r--r--openstackclient/tests/unit/network/v2/test_security_group_network.py174
-rw-r--r--openstackclient/tests/unit/network/v2/test_subnet.py30
-rw-r--r--openstackclient/tests/unit/network/v2/test_subnet_pool.py35
-rw-r--r--openstackclient/tests/unit/volume/test_find_resource.py4
-rw-r--r--openstackclient/tests/unit/volume/v2/fakes.py185
-rw-r--r--openstackclient/tests/unit/volume/v2/test_backup.py6
-rw-r--r--openstackclient/tests/unit/volume/v2/test_backup_record.py114
-rw-r--r--openstackclient/tests/unit/volume/v2/test_volume.py214
-rw-r--r--openstackclient/tests/unit/volume/v2/test_volume_backend.py168
-rw-r--r--openstackclient/volume/client.py21
-rw-r--r--openstackclient/volume/v2/backup.py4
-rw-r--r--openstackclient/volume/v2/backup_record.py82
-rw-r--r--openstackclient/volume/v2/volume.py111
-rw-r--r--openstackclient/volume/v2/volume_backend.py113
98 files changed, 5995 insertions, 1152 deletions
diff --git a/openstackclient/api/api.py b/openstackclient/api/api.py
index 04d88f31..7e2fe38f 100644
--- a/openstackclient/api/api.py
+++ b/openstackclient/api/api.py
@@ -186,7 +186,6 @@ class BaseAPI(KeystoneSession):
ret = self._request(
'POST',
path,
- # service=self.service_type,
json=body,
params=params,
)
@@ -194,7 +193,6 @@ class BaseAPI(KeystoneSession):
ret = self._request(
'GET',
path,
- # service=self.service_type,
params=params,
)
try:
diff --git a/openstackclient/api/compute_v2.py b/openstackclient/api/compute_v2.py
index 0c89e912..7dc4e446 100644
--- a/openstackclient/api/compute_v2.py
+++ b/openstackclient/api/compute_v2.py
@@ -52,7 +52,7 @@ class APIv2(api.BaseAPI):
value = int(value)
except (TypeError, ValueError):
if not msg:
- msg = "%s is not an integer" % value
+ msg = _("%s is not an integer") % value
raise InvalidValue(msg)
return value
diff --git a/openstackclient/api/image_v2.py b/openstackclient/api/image_v2.py
index c3628121..d0163189 100644
--- a/openstackclient/api/image_v2.py
+++ b/openstackclient/api/image_v2.py
@@ -31,6 +31,7 @@ class APIv2(image_v1.APIv1):
detailed=False,
public=False,
private=False,
+ community=False,
shared=False,
**filter
):
@@ -44,25 +45,29 @@ class APIv2(image_v1.APIv1):
Return public images if True
:param private:
Return private images if True
+ :param community:
+ Return commuity images if True
:param shared:
Return shared images if True
- If public, private and shared are all True or all False then all
- images are returned. All arguments False is equivalent to no filter
- and all images are returned. All arguments True is a filter that
- includes all public, private and shared images which is the same set
- as all images.
+ If public, private, community and shared are all True or all False
+ then all images are returned. All arguments False is equivalent to no
+ filter and all images are returned. All arguments True is a filter
+ that includes all public, private, community and shared images which
+ is the same set as all images.
http://docs.openstack.org/api/openstack-image-service/2.0/content/list-images.html
"""
- if not public and not private and not shared:
+ if not public and not private and not community and not shared:
# No filtering for all False
filter.pop('visibility', None)
elif public:
filter['visibility'] = 'public'
elif private:
filter['visibility'] = 'private'
+ elif community:
+ filter['visibility'] = 'community'
elif shared:
filter['visibility'] = 'shared'
diff --git a/openstackclient/common/project_purge.py b/openstackclient/common/project_purge.py
index 5b1d0072..76ed4563 100644
--- a/openstackclient/common/project_purge.py
+++ b/openstackclient/common/project_purge.py
@@ -85,7 +85,7 @@ class ProjectPurge(command.Command):
# servers
try:
compute_client = self.app.client_manager.compute
- search_opts = {'tenant_id': project_id}
+ search_opts = {'tenant_id': project_id, 'all_tenants': True}
data = compute_client.servers.list(search_opts=search_opts)
self.delete_objects(
compute_client.servers.delete, data, 'server', dry_run)
@@ -110,7 +110,7 @@ class ProjectPurge(command.Command):
# volumes, snapshots, backups
volume_client = self.app.client_manager.volume
- search_opts = {'project_id': project_id}
+ search_opts = {'project_id': project_id, 'all_tenants': True}
try:
data = volume_client.volume_snapshots.list(search_opts=search_opts)
self.delete_objects(
diff --git a/openstackclient/common/quota.py b/openstackclient/common/quota.py
index 282ea428..dba6873f 100644
--- a/openstackclient/common/quota.py
+++ b/openstackclient/common/quota.py
@@ -97,12 +97,164 @@ def _xform_get_quota(data, value, keys):
return res
-class ListQuota(command.Lister):
- _description = _("List quotas for all projects "
- "with non-default quota values")
+class BaseQuota(object):
+ def _get_project(self, parsed_args):
+ if parsed_args.project is not None:
+ identity_client = self.app.client_manager.identity
+ project = utils.find_resource(
+ identity_client.projects,
+ parsed_args.project,
+ )
+ project_id = project.id
+ project_name = project.name
+ elif self.app.client_manager.auth_ref:
+ # Get the project from the current auth
+ project = self.app.client_manager.auth_ref
+ project_id = project.project_id
+ project_name = project.project_name
+ else:
+ project = None
+ project_id = None
+ project_name = None
+ project_info = {}
+ project_info['id'] = project_id
+ project_info['name'] = project_name
+ return project_info
+
+ def get_compute_quota(self, client, parsed_args):
+ quota_class = (
+ parsed_args.quota_class if 'quota_class' in parsed_args else False)
+ detail = parsed_args.detail if 'detail' in parsed_args else False
+ default = parsed_args.default if 'default' in parsed_args else False
+ try:
+ if quota_class:
+ quota = client.quota_classes.get(parsed_args.project)
+ else:
+ project_info = self._get_project(parsed_args)
+ project = project_info['id']
+ if default:
+ quota = client.quotas.defaults(project)
+ else:
+ quota = client.quotas.get(project, detail=detail)
+ except Exception as e:
+ if type(e).__name__ == 'EndpointNotFound':
+ return {}
+ else:
+ raise
+ return quota._info
+
+ def get_volume_quota(self, client, parsed_args):
+ quota_class = (
+ parsed_args.quota_class if 'quota_class' in parsed_args else False)
+ default = parsed_args.default if 'default' in parsed_args else False
+ try:
+ if quota_class:
+ quota = client.quota_classes.get(parsed_args.project)
+ else:
+ project_info = self._get_project(parsed_args)
+ project = project_info['id']
+ if default:
+ quota = client.quotas.defaults(project)
+ else:
+ quota = client.quotas.get(project)
+ except Exception as e:
+ if type(e).__name__ == 'EndpointNotFound':
+ return {}
+ else:
+ raise
+ return quota._info
+
+ def get_network_quota(self, parsed_args):
+ quota_class = (
+ parsed_args.quota_class if 'quota_class' in parsed_args else False)
+ detail = parsed_args.detail if 'detail' in parsed_args else False
+ default = parsed_args.default if 'default' in parsed_args else False
+ if quota_class:
+ return {}
+ if self.app.client_manager.is_network_endpoint_enabled():
+ project_info = self._get_project(parsed_args)
+ project = project_info['id']
+ client = self.app.client_manager.network
+ if default:
+ network_quota = client.get_quota_default(project)
+ if type(network_quota) is not dict:
+ network_quota = network_quota.to_dict()
+ else:
+ network_quota = client.get_quota(project,
+ details=detail)
+ if type(network_quota) is not dict:
+ network_quota = network_quota.to_dict()
+ if detail:
+ # NOTE(slaweq): Neutron returns values with key "used" but
+ # Nova for example returns same data with key "in_use"
+ # instead.
+ # Because of that we need to convert Neutron key to
+ # the same as is returned from Nova to make result
+ # more consistent
+ for key, values in network_quota.items():
+ if type(values) is dict and "used" in values:
+ values[u'in_use'] = values.pop("used")
+ network_quota[key] = values
+ return network_quota
+ else:
+ return {}
+
+
+class ListQuota(command.Lister, BaseQuota):
+ _description = _(
+ "List quotas for all projects with non-default quota values or "
+ "list detailed quota informations for requested project")
+
+ def _get_detailed_quotas(self, parsed_args):
+ columns = (
+ 'resource',
+ 'in_use',
+ 'reserved',
+ 'limit'
+ )
+ column_headers = (
+ 'Resource',
+ 'In Use',
+ 'Reserved',
+ 'Limit'
+ )
+ quotas = {}
+ if parsed_args.compute:
+ quotas.update(self.get_compute_quota(
+ self.app.client_manager.compute, parsed_args))
+ if parsed_args.network:
+ quotas.update(self.get_network_quota(parsed_args))
+
+ result = []
+ for resource, values in quotas.items():
+ # NOTE(slaweq): there is no detailed quotas info for some resources
+ # and it should't be displayed here
+ if type(values) is dict:
+ result.append({
+ 'resource': resource,
+ 'in_use': values.get('in_use'),
+ 'reserved': values.get('reserved'),
+ 'limit': values.get('limit')
+ })
+ return (column_headers,
+ (utils.get_dict_properties(
+ s, columns,
+ ) for s in result))
def get_parser(self, prog_name):
parser = super(ListQuota, self).get_parser(prog_name)
+ parser.add_argument(
+ '--project',
+ metavar='<project>',
+ help=_('List quotas for this project <project> (name or ID)'),
+ )
+ parser.add_argument(
+ '--detail',
+ dest='detail',
+ action='store_true',
+ default=False,
+ help=_('Show details about quotas usage')
+ )
option = parser.add_mutually_exclusive_group(required=True)
option.add_argument(
'--compute',
@@ -130,6 +282,8 @@ class ListQuota(command.Lister):
project_ids = [getattr(p, 'id', '') for p in projects]
if parsed_args.compute:
+ if parsed_args.detail:
+ return self._get_detailed_quotas(parsed_args)
compute_client = self.app.client_manager.compute
for p in project_ids:
try:
@@ -193,6 +347,9 @@ class ListQuota(command.Lister):
) for s in result))
if parsed_args.volume:
+ if parsed_args.detail:
+ LOG.warning("Volume service doesn't provide detailed quota"
+ " information")
volume_client = self.app.client_manager.volume
for p in project_ids:
try:
@@ -243,6 +400,8 @@ class ListQuota(command.Lister):
) for s in result))
if parsed_args.network:
+ if parsed_args.detail:
+ return self._get_detailed_quotas(parsed_args)
client = self.app.client_manager.network
for p in project_ids:
try:
@@ -410,7 +569,7 @@ class SetQuota(command.Command):
**network_kwargs)
-class ShowQuota(command.ShowOne):
+class ShowQuota(command.ShowOne, BaseQuota):
_description = _("Show quotas for project or class")
def get_parser(self, prog_name):
@@ -438,62 +597,6 @@ class ShowQuota(command.ShowOne):
)
return parser
- def _get_project(self, parsed_args):
- if parsed_args.project is not None:
- identity_client = self.app.client_manager.identity
- project = utils.find_resource(
- identity_client.projects,
- parsed_args.project,
- )
- project_id = project.id
- project_name = project.name
- elif self.app.client_manager.auth_ref:
- # Get the project from the current auth
- project = self.app.client_manager.auth_ref
- project_id = project.project_id
- project_name = project.project_name
- else:
- project = None
- project_id = None
- project_name = None
- project_info = {}
- project_info['id'] = project_id
- project_info['name'] = project_name
- return project_info
-
- def get_compute_volume_quota(self, client, parsed_args):
- try:
- if parsed_args.quota_class:
- quota = client.quota_classes.get(parsed_args.project)
- else:
- project_info = self._get_project(parsed_args)
- project = project_info['id']
- if parsed_args.default:
- quota = client.quotas.defaults(project)
- else:
- quota = client.quotas.get(project)
- except Exception as e:
- if type(e).__name__ == 'EndpointNotFound':
- return {}
- else:
- raise
- return quota._info
-
- def get_network_quota(self, parsed_args):
- if parsed_args.quota_class:
- return {}
- if self.app.client_manager.is_network_endpoint_enabled():
- project_info = self._get_project(parsed_args)
- project = project_info['id']
- client = self.app.client_manager.network
- if parsed_args.default:
- network_quota = client.get_quota_default(project)
- else:
- network_quota = client.get_quota(project)
- return network_quota
- else:
- return {}
-
def take_action(self, parsed_args):
compute_client = self.app.client_manager.compute
@@ -504,10 +607,10 @@ class ShowQuota(command.ShowOne):
# does not exist. If this is determined to be the
# intended behaviour of the API we will validate
# the argument with Identity ourselves later.
- compute_quota_info = self.get_compute_volume_quota(compute_client,
- parsed_args)
- volume_quota_info = self.get_compute_volume_quota(volume_client,
- parsed_args)
+ compute_quota_info = self.get_compute_quota(compute_client,
+ parsed_args)
+ volume_quota_info = self.get_volume_quota(volume_client,
+ parsed_args)
network_quota_info = self.get_network_quota(parsed_args)
# NOTE(reedip): Remove the below check once requirement for
# Openstack SDK is fixed to version 0.9.12 and above
diff --git a/openstackclient/common/versions.py b/openstackclient/common/versions.py
new file mode 100644
index 00000000..3c267bfe
--- /dev/null
+++ b/openstackclient/common/versions.py
@@ -0,0 +1,102 @@
+# Copyright 2018 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Versions Action Implementation"""
+
+from osc_lib.command import command
+
+from openstackclient.i18n import _
+
+
+class ShowVersions(command.Lister):
+ _description = _("Show available versions of services")
+
+ def get_parser(self, prog_name):
+ parser = super(ShowVersions, self).get_parser(prog_name)
+ interface_group = parser.add_mutually_exclusive_group()
+ interface_group.add_argument(
+ "--all-interfaces",
+ dest="is_all_interfaces",
+ action="store_true",
+ default=False,
+ help=_("Show values for all interfaces"),
+ )
+ interface_group.add_argument(
+ '--interface',
+ default='public',
+ metavar='<interface>',
+ help=_('Show versions for a specific interface.'),
+ )
+ parser.add_argument(
+ '--region-name',
+ metavar='<region_name>',
+ help=_('Show versions for a specific region.'),
+ )
+ parser.add_argument(
+ '--service',
+ metavar='<region_name>',
+ help=_('Show versions for a specific service.'),
+ )
+ parser.add_argument(
+ '--status',
+ metavar='<region_name>',
+ help=_('Show versions for a specific status.'
+ ' [Valid values are SUPPORTED, CURRENT,'
+ ' DEPRECATED, EXPERIMENTAL]'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+
+ interface = parsed_args.interface
+ if parsed_args.is_all_interfaces:
+ interface = None
+
+ session = self.app.client_manager.session
+ version_data = session.get_all_version_data(
+ interface=interface,
+ region_name=parsed_args.region_name,
+ service_type=parsed_args.service)
+
+ columns = [
+ "Region Name",
+ "Service Type",
+ "Version",
+ "Status",
+ "Endpoint",
+ "Min Microversion",
+ "Max Microversion",
+ ]
+
+ status = parsed_args.status
+ if status:
+ status = status.upper()
+
+ versions = []
+ for region_name, interfaces in version_data.items():
+ for interface, services in interfaces.items():
+ for service_type, service_versions in services.items():
+ for data in service_versions:
+ if status and status != data['status']:
+ continue
+ versions.append((
+ region_name,
+ service_type,
+ data['version'],
+ data['status'],
+ data['url'],
+ data['min_microversion'],
+ data['max_microversion'],
+ ))
+ return (columns, versions)
diff --git a/openstackclient/compute/v2/flavor.py b/openstackclient/compute/v2/flavor.py
index 0f5dd742..2cc5f1e8 100644
--- a/openstackclient/compute/v2/flavor.py
+++ b/openstackclient/compute/v2/flavor.py
@@ -17,6 +17,7 @@
import logging
+from novaclient import api_versions
from osc_lib.cli import parseractions
from osc_lib.command import command
from osc_lib import exceptions
@@ -134,6 +135,12 @@ class CreateFlavor(command.ShowOne):
help=_("Allow <project> to access private flavor (name or ID) "
"(Must be used with --private option)"),
)
+ parser.add_argument(
+ '--description',
+ metavar='<description>',
+ help=_("Description for the flavor.(Supported by API versions "
+ "'2.55' - '2.latest'")
+ )
identity_common.add_project_domain_option_to_parser(parser)
return parser
@@ -145,6 +152,11 @@ class CreateFlavor(command.ShowOne):
msg = _("--project is only allowed with --private")
raise exceptions.CommandError(msg)
+ if parsed_args.description:
+ if compute_client.api_version < api_versions.APIVersion("2.55"):
+ msg = _("--os-compute-api-version 2.55 or later is required")
+ raise exceptions.CommandError(msg)
+
args = (
parsed_args.name,
parsed_args.ram,
@@ -154,7 +166,8 @@ class CreateFlavor(command.ShowOne):
parsed_args.ephemeral,
parsed_args.swap,
parsed_args.rxtx_factor,
- parsed_args.public
+ parsed_args.public,
+ parsed_args.description
)
flavor = compute_client.flavors.create(*args)
@@ -332,6 +345,12 @@ class SetFlavor(command.Command):
help=_('Set flavor access to project (name or ID) '
'(admin only)'),
)
+ parser.add_argument(
+ '--description',
+ metavar='<description>',
+ help=_("Set description for the flavor.(Supported by API "
+ "versions '2.55' - '2.latest'")
+ )
identity_common.add_project_domain_option_to_parser(parser)
return parser
@@ -380,6 +399,13 @@ class SetFlavor(command.Command):
raise exceptions.CommandError(_("Command Failed: One or more of"
" the operations failed"))
+ if parsed_args.description:
+ if compute_client.api_version < api_versions.APIVersion("2.55"):
+ msg = _("--os-compute-api-version 2.55 or later is required")
+ raise exceptions.CommandError(msg)
+ compute_client.flavors.update(flavor=parsed_args.flavor,
+ description=parsed_args.description)
+
class ShowFlavor(command.ShowOne):
_description = _("Display flavor details")
diff --git a/openstackclient/compute/v2/host.py b/openstackclient/compute/v2/host.py
index 9fdfd927..07c92a8c 100644
--- a/openstackclient/compute/v2/host.py
+++ b/openstackclient/compute/v2/host.py
@@ -97,7 +97,7 @@ class SetHost(command.Command):
compute_client.api.host_set(
parsed_args.host,
- kwargs
+ **kwargs
)
diff --git a/openstackclient/compute/v2/server.py b/openstackclient/compute/v2/server.py
index c80b5a3c..306345bd 100644
--- a/openstackclient/compute/v2/server.py
+++ b/openstackclient/compute/v2/server.py
@@ -21,7 +21,9 @@ import io
import logging
import os
+from novaclient import api_versions
from novaclient.v2 import servers
+from openstack import exceptions as sdk_exceptions
from osc_lib.cli import parseractions
from osc_lib.command import command
from osc_lib import exceptions
@@ -120,17 +122,21 @@ def _prefix_checked_value(prefix):
return func
-def _prep_server_detail(compute_client, image_client, server):
+def _prep_server_detail(compute_client, image_client, server, refresh=True):
"""Prepare the detailed server dict for printing
:param compute_client: a compute client instance
+ :param image_client: an image client instance
:param server: a Server resource
+ :param refresh: Flag indicating if ``server`` is already the latest version
+ or if it needs to be refreshed, for example when showing
+ the latest details of a server after creating it.
:rtype: a dict of server details
"""
- info = server._info.copy()
-
- server = utils.find_resource(compute_client.servers, info['id'])
- info.update(server._info)
+ info = server.to_dict()
+ if refresh:
+ server = utils.find_resource(compute_client.servers, info['id'])
+ info.update(server.to_dict())
# Convert the image blob to a name
image_info = info.get('image', {})
@@ -144,12 +150,18 @@ def _prep_server_detail(compute_client, image_client, server):
# Convert the flavor blob to a name
flavor_info = info.get('flavor', {})
- flavor_id = flavor_info.get('id', '')
- try:
- flavor = utils.find_resource(compute_client.flavors, flavor_id)
- info['flavor'] = "%s (%s)" % (flavor.name, flavor_id)
- except Exception:
- info['flavor'] = flavor_id
+ # Microversion 2.47 puts the embedded flavor into the server response
+ # body but omits the id, so if not present we just expose the flavor
+ # dict in the server output.
+ if 'id' in flavor_info:
+ flavor_id = flavor_info.get('id', '')
+ try:
+ flavor = utils.find_resource(compute_client.flavors, flavor_id)
+ info['flavor'] = "%s (%s)" % (flavor.name, flavor_id)
+ except Exception:
+ info['flavor'] = flavor_id
+ else:
+ info['flavor'] = utils.format_dict(flavor_info)
if 'os-extended-volumes:volumes_attached' in info:
info.update(
@@ -178,7 +190,7 @@ def _prep_server_detail(compute_client, image_client, server):
if 'tenant_id' in info:
info['project_id'] = info.pop('tenant_id')
- # Map power state num to meanful string
+ # Map power state num to meaningful string
if 'OS-EXT-STS:power_state' in info:
info['OS-EXT-STS:power_state'] = _format_servers_list_power_state(
info['OS-EXT-STS:power_state'])
@@ -240,13 +252,16 @@ class AddFloatingIP(network_common.NetworkAndComputeCommand):
parser.add_argument(
"ip_address",
metavar="<ip-address>",
- help=_("Floating IP address to assign to server (IP only)"),
+ help=_("Floating IP address to assign to the first available "
+ "server port (IP only)"),
)
parser.add_argument(
"--fixed-ip-address",
metavar="<ip-address>",
help=_(
- "Fixed IP address to associate with this floating IP address"
+ "Fixed IP address to associate with this floating IP address. "
+ "The first server port containing the fixed IP address will "
+ "be used"
),
)
return parser
@@ -263,12 +278,45 @@ class AddFloatingIP(network_common.NetworkAndComputeCommand):
compute_client.servers,
parsed_args.server,
)
- port = list(client.ports(device_id=server.id))[0]
- attrs['port_id'] = port.id
+ ports = list(client.ports(device_id=server.id))
+ # If the fixed IP address was specified, we need to find the
+ # corresponding port.
if parsed_args.fixed_ip_address:
- attrs['fixed_ip_address'] = parsed_args.fixed_ip_address
-
- client.update_ip(obj, **attrs)
+ fip_address = parsed_args.fixed_ip_address
+ attrs['fixed_ip_address'] = fip_address
+ for port in ports:
+ for ip in port.fixed_ips:
+ if ip['ip_address'] == fip_address:
+ attrs['port_id'] = port.id
+ break
+ else:
+ continue
+ break
+ if 'port_id' not in attrs:
+ msg = _('No port found for fixed IP address %s')
+ raise exceptions.CommandError(msg % fip_address)
+ client.update_ip(obj, **attrs)
+ else:
+ # It's possible that one or more ports are not connected to a
+ # router and thus could fail association with a floating IP.
+ # Try each port until one succeeds. If none succeed, re-raise the
+ # last exception.
+ error = None
+ for port in ports:
+ attrs['port_id'] = port.id
+ try:
+ client.update_ip(obj, **attrs)
+ except sdk_exceptions.NotFoundException as exp:
+ # 404 ExternalGatewayForFloatingIPNotFound from neutron
+ LOG.info('Skipped port %s because it is not attached to '
+ 'an external gateway', port.id)
+ error = exp
+ continue
+ else:
+ error = None
+ break
+ if error:
+ raise error
def take_action_compute(self, client, parsed_args):
client.api.floating_ip_add(
@@ -556,7 +604,7 @@ class CreateServer(command.ShowOne):
type=_prefix_checked_value('port-id='),
help=_("Create a NIC on the server and connect it to port. "
"Specify option multiple times to create multiple NICs. "
- "This is a wrapper for the '--nic port-id=<pord>' "
+ "This is a wrapper for the '--nic port-id=<port>' "
"parameter that provides simple syntax for the standard "
"use case of connecting a new server to a given port. For "
"more advanced use cases, refer to the '--nic' parameter."),
@@ -798,9 +846,14 @@ class CreateServer(command.ShowOne):
raise exceptions.CommandError(msg)
nics = nics[0]
else:
- # Default to empty list if nothing was specified, let nova side to
- # decide the default behavior.
- nics = []
+ # Compute API version >= 2.37 requires a value, so default to
+ # 'auto' to maintain legacy behavior if a nic wasn't specified.
+ if compute_client.api_version >= api_versions.APIVersion('2.37'):
+ nics = 'auto'
+ else:
+ # Default to empty list if nothing was specified, let nova
+ # side to decide the default behavior.
+ nics = []
# Check security group exist and convert ID to name
security_group_names = []
@@ -946,13 +999,9 @@ class DeleteServer(command.Command):
compute_client.servers, server)
compute_client.servers.delete(server_obj.id)
if parsed_args.wait:
- if utils.wait_for_delete(
- compute_client.servers,
- server_obj.id,
- callback=_show_progress,
- ):
- self.app.stdout.write('\n')
- else:
+ if not utils.wait_for_delete(compute_client.servers,
+ server_obj.id,
+ callback=_show_progress):
LOG.error(_('Error deleting server: %s'),
server_obj.id)
self.app.stdout.write(_('Error deleting server\n'))
@@ -1034,11 +1083,22 @@ class ListServer(command.Lister):
default=False,
help=_('List additional fields in output'),
)
- parser.add_argument(
+ name_lookup_group = parser.add_mutually_exclusive_group()
+ name_lookup_group.add_argument(
'-n', '--no-name-lookup',
action='store_true',
default=False,
- help=_('Skip flavor and image name lookup.'),
+ help=_('Skip flavor and image name lookup.'
+ 'Mutually exclusive with "--name-lookup-one-by-one"'
+ ' option.'),
+ )
+ name_lookup_group.add_argument(
+ '--name-lookup-one-by-one',
+ action='store_true',
+ default=False,
+ help=_('When looking up flavor and image names, look them up'
+ 'one by one as needed instead of all together (default). '
+ 'Mutually exclusive with "--no-name-lookup|-n" option.'),
)
parser.add_argument(
'--marker',
@@ -1213,32 +1273,55 @@ class ListServer(command.Lister):
limit=parsed_args.limit)
images = {}
- # Create a dict that maps image_id to image object.
- # Needed so that we can display the "Image Name" column.
- # "Image Name" is not crucial, so we swallow any exceptions.
- if not parsed_args.no_name_lookup:
- try:
- images_list = self.app.client_manager.image.images.list()
- for i in images_list:
- images[i.id] = i
- except Exception:
- pass
-
flavors = {}
- # Create a dict that maps flavor_id to flavor object.
- # Needed so that we can display the "Flavor Name" column.
- # "Flavor Name" is not crucial, so we swallow any exceptions.
- if not parsed_args.no_name_lookup:
- try:
- flavors_list = compute_client.flavors.list()
- for i in flavors_list:
- flavors[i.id] = i
- except Exception:
- pass
+ if data and not parsed_args.no_name_lookup:
+ # Create a dict that maps image_id to image object.
+ # Needed so that we can display the "Image Name" column.
+ # "Image Name" is not crucial, so we swallow any exceptions.
+ if parsed_args.name_lookup_one_by_one or image_id:
+ for i_id in set(filter(lambda x: x is not None,
+ (s.image.get('id') for s in data))):
+ try:
+ images[i_id] = image_client.images.get(i_id)
+ except Exception:
+ pass
+ else:
+ try:
+ images_list = image_client.images.list()
+ for i in images_list:
+ images[i.id] = i
+ except Exception:
+ pass
+
+ # Create a dict that maps flavor_id to flavor object.
+ # Needed so that we can display the "Flavor Name" column.
+ # "Flavor Name" is not crucial, so we swallow any exceptions.
+ if parsed_args.name_lookup_one_by_one or flavor_id:
+ for f_id in set(filter(lambda x: x is not None,
+ (s.flavor.get('id') for s in data))):
+ try:
+ flavors[f_id] = compute_client.flavors.get(f_id)
+ except Exception:
+ pass
+ else:
+ try:
+ flavors_list = compute_client.flavors.list(is_public=None)
+ for i in flavors_list:
+ flavors[i.id] = i
+ except Exception:
+ pass
# Populate image_name, image_id, flavor_name and flavor_id attributes
# of server objects so that we can display those columns.
for s in data:
+ if compute_client.api_version >= api_versions.APIVersion('2.69'):
+ # NOTE(tssurya): From 2.69, we will have the keys 'flavor'
+ # and 'image' missing in the server response during
+ # infrastructure failure situations.
+ # For those servers with partial constructs we just skip the
+ # processing of the image and flavor informations.
+ if not hasattr(s, 'image') or not hasattr(s, 'flavor'):
+ continue
if 'id' in s.image:
image = images.get(s.image['id'])
if image:
@@ -1253,6 +1336,10 @@ class ListServer(command.Lister):
s.flavor_name = flavor.name
s.flavor_id = s.flavor['id']
else:
+ # TODO(mriedem): Fix this for microversion >= 2.47 where the
+ # flavor is embedded in the server response without the id.
+ # We likely need to drop the Flavor ID column in that case if
+ # --long is specified.
s.flavor_name = ''
s.flavor_id = ''
@@ -1370,11 +1457,13 @@ class MigrateServer(command.Command):
parsed_args.server,
)
if parsed_args.live:
- server.live_migrate(
- host=parsed_args.live,
- block_migration=parsed_args.block_migration,
- disk_over_commit=parsed_args.disk_overcommit,
- )
+ kwargs = {
+ 'host': parsed_args.live,
+ 'block_migration': parsed_args.block_migration
+ }
+ if compute_client.api_version < api_versions.APIVersion('2.25'):
+ kwargs['disk_over_commit'] = parsed_args.disk_overcommit
+ server.live_migrate(**kwargs)
else:
if parsed_args.block_migration or parsed_args.disk_overcommit:
raise exceptions.CommandError("--live must be specified if "
@@ -1501,10 +1590,33 @@ class RebuildServer(command.ShowOne):
help=_("Set the password on the rebuilt instance"),
)
parser.add_argument(
+ '--property',
+ metavar='<key=value>',
+ action=parseractions.KeyValueAction,
+ help=_('Set a property on the rebuilt instance '
+ '(repeat option to set multiple values)'),
+ )
+ parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for rebuild to complete'),
)
+ key_group = parser.add_mutually_exclusive_group()
+ key_group.add_argument(
+ '--key-name',
+ metavar='<key-name>',
+ help=_("Set the key name of key pair on the rebuilt instance."
+ " Cannot be specified with the '--key-unset' option."
+ " (Supported by API versions '2.54' - '2.latest')"),
+ )
+ key_group.add_argument(
+ '--key-unset',
+ action='store_true',
+ default=False,
+ help=_("Unset the key name of key pair on the rebuilt instance."
+ " Cannot be specified with the '--key-name' option."
+ " (Supported by API versions '2.54' - '2.latest')"),
+ )
return parser
def take_action(self, parsed_args):
@@ -1521,10 +1633,25 @@ class RebuildServer(command.ShowOne):
compute_client.servers, parsed_args.server)
# If parsed_args.image is not set, default to the currently used one.
- image_id = parsed_args.image or server._info.get('image', {}).get('id')
+ image_id = parsed_args.image or server.to_dict().get(
+ 'image', {}).get('id')
image = utils.find_resource(image_client.images, image_id)
- server = server.rebuild(image, parsed_args.password)
+ kwargs = {}
+ if parsed_args.property:
+ kwargs['meta'] = parsed_args.property
+
+ if parsed_args.key_name or parsed_args.key_unset:
+ if compute_client.api_version < api_versions.APIVersion('2.54'):
+ msg = _('--os-compute-api-version 2.54 or later is required')
+ raise exceptions.CommandError(msg)
+
+ if parsed_args.key_unset:
+ kwargs['key_name'] = None
+ if parsed_args.key_name:
+ kwargs['key_name'] = parsed_args.key_name
+
+ server = server.rebuild(image, parsed_args.password, **kwargs)
if parsed_args.wait:
if utils.wait_for_status(
compute_client.servers.get,
@@ -1538,7 +1665,8 @@ class RebuildServer(command.ShowOne):
self.app.stdout.write(_('Error rebuilding server\n'))
raise SystemExit
- details = _prep_server_detail(compute_client, image_client, server)
+ details = _prep_server_detail(compute_client, image_client, server,
+ refresh=False)
return zip(*sorted(six.iteritems(details)))
@@ -1988,7 +2116,9 @@ class ShelveServer(command.Command):
class ShowServer(command.ShowOne):
- _description = _("Show server details")
+ _description = _(
+ "Show server details. Specify ``--os-compute-api-version 2.47`` "
+ "or higher to see the embedded flavor information for the server.")
def get_parser(self, prog_name):
parser = super(ShowServer, self).get_parser(prog_name)
@@ -2019,7 +2149,8 @@ class ShowServer(command.ShowOne):
return ({}, {})
else:
data = _prep_server_detail(compute_client,
- self.app.client_manager.image, server)
+ self.app.client_manager.image, server,
+ refresh=False)
return zip(*sorted(six.iteritems(data)))
diff --git a/openstackclient/compute/v2/service.py b/openstackclient/compute/v2/service.py
index 7331d29d..18e6d9d9 100644
--- a/openstackclient/compute/v2/service.py
+++ b/openstackclient/compute/v2/service.py
@@ -17,6 +17,7 @@
import logging
+from novaclient import api_versions
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
@@ -192,18 +193,23 @@ class SetService(command.Command):
result += 1
force_down = None
- try:
- if parsed_args.down:
- force_down = True
- if parsed_args.up:
- force_down = False
- if force_down is not None:
+ if parsed_args.down:
+ force_down = True
+ if parsed_args.up:
+ force_down = False
+ if force_down is not None:
+ if compute_client.api_version < api_versions.APIVersion(
+ '2.11'):
+ msg = _('--os-compute-api-version 2.11 or later is '
+ 'required')
+ raise exceptions.CommandError(msg)
+ try:
cs.force_down(parsed_args.host, parsed_args.service,
force_down=force_down)
- except Exception:
- state = "down" if force_down else "up"
- LOG.error("Failed to set service state to %s", state)
- result += 1
+ except Exception:
+ state = "down" if force_down else "up"
+ LOG.error("Failed to set service state to %s", state)
+ result += 1
if result > 0:
msg = _("Compute service %(service)s of host %(host)s failed to "
diff --git a/openstackclient/compute/v2/usage.py b/openstackclient/compute/v2/usage.py
index 4320bf90..f84cd61d 100644
--- a/openstackclient/compute/v2/usage.py
+++ b/openstackclient/compute/v2/usage.py
@@ -15,8 +15,10 @@
"""Usage action implementations"""
+import collections
import datetime
+from novaclient import api_versions
from osc_lib.command import command
from osc_lib import utils
import six
@@ -24,6 +26,36 @@ import six
from openstackclient.i18n import _
+def _get_usage_marker(usage):
+ marker = None
+ if hasattr(usage, 'server_usages') and usage.server_usages:
+ marker = usage.server_usages[-1]['instance_id']
+ return marker
+
+
+def _get_usage_list_marker(usage_list):
+ marker = None
+ if usage_list:
+ marker = _get_usage_marker(usage_list[-1])
+ return marker
+
+
+def _merge_usage(usage, next_usage):
+ usage.server_usages.extend(next_usage.server_usages)
+ usage.total_hours += next_usage.total_hours
+ usage.total_memory_mb_usage += next_usage.total_memory_mb_usage
+ usage.total_vcpus_usage += next_usage.total_vcpus_usage
+ usage.total_local_gb_usage += next_usage.total_local_gb_usage
+
+
+def _merge_usage_list(usages, next_usage_list):
+ for next_usage in next_usage_list:
+ if next_usage.tenant_id in usages:
+ _merge_usage(usages[next_usage.tenant_id], next_usage)
+ else:
+ usages[next_usage.tenant_id] = next_usage
+
+
class ListUsage(command.Lister):
_description = _("List resource usage per project")
@@ -83,7 +115,23 @@ class ListUsage(command.Lister):
else:
end = now + datetime.timedelta(days=1)
- usage_list = compute_client.usage.list(start, end, detailed=True)
+ if compute_client.api_version < api_versions.APIVersion("2.40"):
+ usage_list = compute_client.usage.list(start, end, detailed=True)
+ else:
+ # If the number of instances used to calculate the usage is greater
+ # than CONF.api.max_limit, the usage will be split across multiple
+ # requests and the responses will need to be merged back together.
+ usages = collections.OrderedDict()
+ usage_list = compute_client.usage.list(start, end, detailed=True)
+ _merge_usage_list(usages, usage_list)
+ marker = _get_usage_list_marker(usage_list)
+ while marker:
+ next_usage_list = compute_client.usage.list(
+ start, end, detailed=True, marker=marker)
+ marker = _get_usage_list_marker(next_usage_list)
+ if marker:
+ _merge_usage_list(usages, next_usage_list)
+ usage_list = list(usages.values())
# Cache the project list
project_cache = {}
diff --git a/openstackclient/identity/common.py b/openstackclient/identity/common.py
index f36f5f73..e8251166 100644
--- a/openstackclient/identity/common.py
+++ b/openstackclient/identity/common.py
@@ -68,6 +68,25 @@ def find_service(identity_client, name_type_or_id):
raise exceptions.CommandError(msg % name_type_or_id)
+def get_resource(manager, name_type_or_id):
+ # NOTE (vishakha): Due to bug #1799153 and for any another related case
+ # where GET resource API does not support the filter by name,
+ # osc_lib.utils.find_resource() method cannot be used because that method
+ # try to fall back to list all the resource if requested resource cannot
+ # be get via name. Which ends up with NoUniqueMatch error.
+ # This new function is the replacement for osc_lib.utils.find_resource()
+ # for resources does not support GET by name.
+ # For example: identity GET /regions.
+ """Find a resource by id or name."""
+
+ try:
+ return manager.get(name_type_or_id)
+ except identity_exc.NotFound:
+ # raise NotFound exception
+ msg = _("No resource with name or id of '%s' exists")
+ raise exceptions.CommandError(msg % name_type_or_id)
+
+
def _get_token_resource(client, resource, parsed_name, parsed_domain=None):
"""Peek into the user's auth token to get resource IDs
diff --git a/openstackclient/identity/v3/endpoint.py b/openstackclient/identity/v3/endpoint.py
index 3229240e..858b5036 100644
--- a/openstackclient/identity/v3/endpoint.py
+++ b/openstackclient/identity/v3/endpoint.py
@@ -199,7 +199,7 @@ class ListEndpoint(command.Lister):
metavar='<project>',
help=_('Project to list filters (name or ID)'),
)
- common.add_project_domain_option_to_parser(list_group)
+ common.add_project_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
diff --git a/openstackclient/identity/v3/implied_role.py b/openstackclient/identity/v3/implied_role.py
index c7623389..4e3df88a 100644
--- a/openstackclient/identity/v3/implied_role.py
+++ b/openstackclient/identity/v3/implied_role.py
@@ -71,7 +71,7 @@ class CreateImpliedRole(command.ShowOne):
identity_client = self.app.client_manager.identity
(prior_role_id, implied_role_id) = _get_role_ids(
identity_client, parsed_args)
- response = identity_client.roles.create_implied(
+ response = identity_client.inference_rules.create(
prior_role_id, implied_role_id)
response._info.pop('links', None)
return zip(*sorted([(k, v['id'])
@@ -101,7 +101,7 @@ class DeleteImpliedRole(command.Command):
identity_client = self.app.client_manager.identity
(prior_role_id, implied_role_id) = _get_role_ids(
identity_client, parsed_args)
- identity_client.roles.delete_implied(
+ identity_client.inference_rules.delete(
prior_role_id, implied_role_id)
@@ -125,5 +125,5 @@ class ListImpliedRole(command.Lister):
implies['name'])
identity_client = self.app.client_manager.identity
- response = identity_client.roles.list_inference_roles()
+ response = identity_client.inference_rules.list_inference_roles()
return (self._COLUMNS, _list_implied(response))
diff --git a/openstackclient/identity/v3/limit.py b/openstackclient/identity/v3/limit.py
new file mode 100644
index 00000000..f2af81e9
--- /dev/null
+++ b/openstackclient/identity/v3/limit.py
@@ -0,0 +1,272 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Limits action implementations."""
+
+import logging
+
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils
+import six
+
+from openstackclient.i18n import _
+from openstackclient.identity import common as common_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class CreateLimit(command.ShowOne):
+ _description = _("Create a limit")
+
+ def get_parser(self, prog_name):
+ parser = super(CreateLimit, self).get_parser(prog_name)
+ parser.add_argument(
+ '--description',
+ metavar='<description>',
+ help=_('Description of the limit'),
+ )
+ parser.add_argument(
+ '--region',
+ metavar='<region>',
+ help=_('Region for the limit to affect.'),
+ )
+ parser.add_argument(
+ '--project',
+ metavar='<project>',
+ required=True,
+ help=_('Project to associate the resource limit to'),
+ )
+ parser.add_argument(
+ '--service',
+ metavar='<service>',
+ required=True,
+ help=_('Service responsible for the resource to limit'),
+ )
+ parser.add_argument(
+ '--resource-limit',
+ metavar='<resource-limit>',
+ required=True,
+ type=int,
+ help=_('The resource limit for the project to assume'),
+ )
+ parser.add_argument(
+ 'resource_name',
+ metavar='<resource-name>',
+ help=_('The name of the resource to limit'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ identity_client = self.app.client_manager.identity
+
+ project = common_utils.find_project(
+ identity_client, parsed_args.project
+ )
+ service = common_utils.find_service(
+ identity_client, parsed_args.service
+ )
+ region = None
+ if parsed_args.region:
+ val = getattr(parsed_args, 'region', None)
+ if 'None' not in val:
+ # NOTE (vishakha): Due to bug #1799153 and for any another
+ # related case where GET resource API does not support the
+ # filter by name, osc_lib.utils.find_resource() method cannot
+ # be used because that method try to fall back to list all the
+ # resource if requested resource cannot be get via name. Which
+ # ends up with NoUniqueMatch error.
+ # So osc_lib.utils.find_resource() function cannot be used for
+ # 'regions', using common_utils.get_resource() instead.
+ region = common_utils.get_resource(
+ identity_client.regions, parsed_args.region
+ )
+
+ limit = identity_client.limits.create(
+ project,
+ service,
+ parsed_args.resource_name,
+ parsed_args.resource_limit,
+ description=parsed_args.description,
+ region=region
+ )
+
+ limit._info.pop('links', None)
+ return zip(*sorted(six.iteritems(limit._info)))
+
+
+class ListLimit(command.Lister):
+ _description = _("List limits")
+
+ def get_parser(self, prog_name):
+ parser = super(ListLimit, self).get_parser(prog_name)
+ parser.add_argument(
+ '--service',
+ metavar='<service>',
+ help=_('Service responsible for the resource to limit'),
+ )
+ parser.add_argument(
+ '--resource-name',
+ metavar='<resource-name>',
+ dest='resource_name',
+ help=_('The name of the resource to limit'),
+ )
+ parser.add_argument(
+ '--region',
+ metavar='<region>',
+ help=_('Region for the registered limit to affect.'),
+ )
+ parser.add_argument(
+ '--project',
+ metavar='<project>',
+ help=_('List resource limits associated with project'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ identity_client = self.app.client_manager.identity
+
+ service = None
+ if parsed_args.service:
+ service = common_utils.find_service(
+ identity_client, parsed_args.service
+ )
+ region = None
+ if parsed_args.region:
+ region = utils.find_resource(
+ identity_client.regions, parsed_args.region
+ )
+ val = getattr(parsed_args, 'region', None)
+ if 'None' not in val:
+ # NOTE (vishakha): Due to bug #1799153 and for any another
+ # related case where GET resource API does not support the
+ # filter by name, osc_lib.utils.find_resource() method cannot
+ # be used because that method try to fall back to list all the
+ # resource if requested resource cannot be get via name. Which
+ # ends up with NoUniqueMatch error.
+ # So osc_lib.utils.find_resource() function cannot be used for
+ # 'regions', using common_utils.get_resource() instead.
+ region = common_utils.get_resource(
+ identity_client.regions, parsed_args.region
+ )
+ project = None
+ if parsed_args.project:
+ project = utils.find_resource(
+ identity_client.projects, parsed_args.project
+ )
+
+ limits = identity_client.limits.list(
+ service=service,
+ resource_name=parsed_args.resource_name,
+ region=region,
+ project=project
+ )
+
+ columns = (
+ 'ID', 'Project ID', 'Service ID', 'Resource Name',
+ 'Resource Limit', 'Description', 'Region ID'
+ )
+ return (
+ columns,
+ (utils.get_item_properties(s, columns) for s in limits),
+ )
+
+
+class ShowLimit(command.ShowOne):
+ _description = _("Display limit details")
+
+ def get_parser(self, prog_name):
+ parser = super(ShowLimit, self).get_parser(prog_name)
+ parser.add_argument(
+ 'limit_id',
+ metavar='<limit-id>',
+ help=_('Limit to display (ID)'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ identity_client = self.app.client_manager.identity
+ limit = identity_client.limits.get(parsed_args.limit_id)
+ limit._info.pop('links', None)
+ return zip(*sorted(six.iteritems(limit._info)))
+
+
+class SetLimit(command.ShowOne):
+ _description = _("Update information about a limit")
+
+ def get_parser(self, prog_name):
+ parser = super(SetLimit, self).get_parser(prog_name)
+ parser.add_argument(
+ 'limit_id',
+ metavar='<limit-id>',
+ help=_('Limit to update (ID)'),
+ )
+ parser.add_argument(
+ '--description',
+ metavar='<description>',
+ help=_('Description of the limit'),
+ )
+ parser.add_argument(
+ '--resource-limit',
+ metavar='<resource-limit>',
+ dest='resource_limit',
+ type=int,
+ help=_('The resource limit for the project to assume'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ identity_client = self.app.client_manager.identity
+
+ limit = identity_client.limits.update(
+ parsed_args.limit_id,
+ description=parsed_args.description,
+ resource_limit=parsed_args.resource_limit
+ )
+
+ limit._info.pop('links', None)
+
+ return zip(*sorted(six.iteritems(limit._info)))
+
+
+class DeleteLimit(command.Command):
+ _description = _("Delete a limit")
+
+ def get_parser(self, prog_name):
+ parser = super(DeleteLimit, self).get_parser(prog_name)
+ parser.add_argument(
+ 'limit_id',
+ metavar='<limit-id>',
+ nargs="+",
+ help=_('Limit to delete (ID)'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ identity_client = self.app.client_manager.identity
+
+ errors = 0
+ for limit_id in parsed_args.limit_id:
+ try:
+ identity_client.limits.delete(limit_id)
+ except Exception as e:
+ errors += 1
+ LOG.error(_("Failed to delete limit with ID "
+ "'%(id)s': %(e)s"),
+ {'id': limit_id, 'e': e})
+
+ if errors > 0:
+ total = len(parsed_args.limit_id)
+ msg = (_("%(errors)s of %(total)s limits failed to "
+ "delete.") % {'errors': errors, 'total': total})
+ raise exceptions.CommandError(msg)
diff --git a/openstackclient/identity/v3/registered_limit.py b/openstackclient/identity/v3/registered_limit.py
new file mode 100644
index 00000000..9366ec1e
--- /dev/null
+++ b/openstackclient/identity/v3/registered_limit.py
@@ -0,0 +1,299 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Registered limits action implementations."""
+
+import logging
+
+from osc_lib.command import command
+from osc_lib import exceptions
+from osc_lib import utils
+import six
+
+from openstackclient.i18n import _
+from openstackclient.identity import common as common_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class CreateRegisteredLimit(command.ShowOne):
+ _description = _("Create a registered limit")
+
+ def get_parser(self, prog_name):
+ parser = super(CreateRegisteredLimit, self).get_parser(prog_name)
+ parser.add_argument(
+ '--description',
+ metavar='<description>',
+ help=_('Description of the registered limit'),
+ )
+ parser.add_argument(
+ '--region',
+ metavar='<region>',
+ help=_('Region for the registered limit to affect'),
+ )
+ parser.add_argument(
+ '--service',
+ metavar='<service>',
+ required=True,
+ help=_('Service responsible for the resource to limit (required)'),
+ )
+ parser.add_argument(
+ '--default-limit',
+ type=int,
+ metavar='<default-limit>',
+ required=True,
+ help=_('The default limit for the resources to assume (required)'),
+ )
+ parser.add_argument(
+ 'resource_name',
+ metavar='<resource-name>',
+ help=_('The name of the resource to limit'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ identity_client = self.app.client_manager.identity
+
+ service = utils.find_resource(
+ identity_client.services, parsed_args.service
+ )
+ region = None
+ if parsed_args.region:
+ val = getattr(parsed_args, 'region', None)
+ if 'None' not in val:
+ # NOTE (vishakha): Due to bug #1799153 and for any another
+ # related case where GET resource API does not support the
+ # filter by name, osc_lib.utils.find_resource() method cannot
+ # be used because that method try to fall back to list all the
+ # resource if requested resource cannot be get via name. Which
+ # ends up with NoUniqueMatch error.
+ # So osc_lib.utils.find_resource() function cannot be used for
+ # 'regions', using common_utils.get_resource() instead.
+ region = common_utils.get_resource(
+ identity_client.regions, parsed_args.region
+ )
+
+ registered_limit = identity_client.registered_limits.create(
+ service,
+ parsed_args.resource_name,
+ parsed_args.default_limit,
+ description=parsed_args.description,
+ region=region
+ )
+
+ registered_limit._info.pop('links', None)
+ return zip(*sorted(six.iteritems(registered_limit._info)))
+
+
+class DeleteRegisteredLimit(command.Command):
+ _description = _("Delete a registered limit")
+
+ def get_parser(self, prog_name):
+ parser = super(DeleteRegisteredLimit, self).get_parser(prog_name)
+ parser.add_argument(
+ 'registered_limit_id',
+ metavar='<registered-limit-id>',
+ nargs="+",
+ help=_('Registered limit to delete (ID)'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ identity_client = self.app.client_manager.identity
+
+ errors = 0
+ for registered_limit_id in parsed_args.registered_limit_id:
+ try:
+ identity_client.registered_limits.delete(registered_limit_id)
+ except Exception as e:
+ errors += 1
+ from pprint import pprint
+ pprint(type(e))
+ LOG.error(_("Failed to delete registered limit with ID "
+ "'%(id)s': %(e)s"),
+ {'id': registered_limit_id, 'e': e})
+
+ if errors > 0:
+ total = len(parsed_args.registered_limit_id)
+ msg = (_("%(errors)s of %(total)s registered limits failed to "
+ "delete.") % {'errors': errors, 'total': total})
+ raise exceptions.CommandError(msg)
+
+
+class ListRegisteredLimit(command.Lister):
+ _description = _("List registered limits")
+
+ def get_parser(self, prog_name):
+ parser = super(ListRegisteredLimit, self).get_parser(prog_name)
+ parser.add_argument(
+ '--service',
+ metavar='<service>',
+ help=_('Service responsible for the resource to limit'),
+ )
+ parser.add_argument(
+ '--resource-name',
+ metavar='<resource-name>',
+ dest='resource_name',
+ help=_('The name of the resource to limit'),
+ )
+ parser.add_argument(
+ '--region',
+ metavar='<region>',
+ help=_('Region for the limit to affect.'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ identity_client = self.app.client_manager.identity
+
+ service = None
+ if parsed_args.service:
+ service = common_utils.find_service(
+ identity_client, parsed_args.service
+ )
+ region = None
+ if parsed_args.region:
+ val = getattr(parsed_args, 'region', None)
+ if 'None' not in val:
+ # NOTE (vishakha): Due to bug #1799153 and for any another
+ # related case where GET resource API does not support the
+ # filter by name, osc_lib.utils.find_resource() method cannot
+ # be used because that method try to fall back to list all the
+ # resource if requested resource cannot be get via name. Which
+ # ends up with NoUniqueMatch error.
+ # So osc_lib.utils.find_resource() function cannot be used for
+ # 'regions', using common_utils.get_resource() instead.
+ region = common_utils.get_resource(
+ identity_client.regions, parsed_args.region
+ )
+
+ registered_limits = identity_client.registered_limits.list(
+ service=service,
+ resource_name=parsed_args.resource_name,
+ region=region
+ )
+
+ columns = (
+ 'ID', 'Service ID', 'Resource Name', 'Default Limit',
+ 'Description', 'Region ID'
+ )
+ return (
+ columns,
+ (utils.get_item_properties(s, columns) for s in registered_limits),
+ )
+
+
+class SetRegisteredLimit(command.ShowOne):
+ _description = _("Update information about a registered limit")
+
+ def get_parser(self, prog_name):
+ parser = super(SetRegisteredLimit, self).get_parser(prog_name)
+ parser.add_argument(
+ 'registered_limit_id',
+ metavar='<registered-limit-id>',
+ help=_('Registered limit to update (ID)'),
+ )
+ parser.add_argument(
+ '--service',
+ metavar='<service>',
+ help=_('Service to be updated responsible for the resource to '
+ 'limit. Either --service, --resource-name or --region must '
+ 'be different than existing value otherwise it will be '
+ 'duplicate entry')
+ )
+ parser.add_argument(
+ '--resource-name',
+ metavar='<resource-name>',
+ help=_('Resource to be updated responsible for the resource to '
+ 'limit. Either --service, --resource-name or --region must '
+ 'be different than existing value otherwise it will be '
+ 'duplicate entry'),
+ )
+ parser.add_argument(
+ '--default-limit',
+ metavar='<default-limit>',
+ type=int,
+ help=_('The default limit for the resources to assume'),
+ )
+ parser.add_argument(
+ '--description',
+ metavar='<description>',
+ help=_('Description to update of the registered limit'),
+ )
+ parser.add_argument(
+ '--region',
+ metavar='<region>',
+ help=_('Region for the registered limit to affect. Either '
+ '--service, --resource-name or --region must be '
+ 'different than existing value otherwise it will be '
+ 'duplicate entry'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ identity_client = self.app.client_manager.identity
+
+ service = None
+ if parsed_args.service:
+ service = common_utils.find_service(
+ identity_client, parsed_args.service
+ )
+
+ region = None
+ if parsed_args.region:
+ val = getattr(parsed_args, 'region', None)
+ if 'None' not in val:
+ # NOTE (vishakha): Due to bug #1799153 and for any another
+ # related case where GET resource API does not support the
+ # filter by name, osc_lib.utils.find_resource() method cannot
+ # be used because that method try to fall back to list all the
+ # resource if requested resource cannot be get via name. Which
+ # ends up with NoUniqueMatch error.
+ # So osc_lib.utils.find_resource() function cannot be used for
+ # 'regions', using common_utils.get_resource() instead.
+ region = common_utils.get_resource(
+ identity_client.regions, parsed_args.region
+ )
+
+ registered_limit = identity_client.registered_limits.update(
+ parsed_args.registered_limit_id,
+ service=service,
+ resource_name=parsed_args.resource_name,
+ default_limit=parsed_args.default_limit,
+ description=parsed_args.description,
+ region=region
+ )
+
+ registered_limit._info.pop('links', None)
+ return zip(*sorted(six.iteritems(registered_limit._info)))
+
+
+class ShowRegisteredLimit(command.ShowOne):
+ _description = _("Display registered limit details")
+
+ def get_parser(self, prog_name):
+ parser = super(ShowRegisteredLimit, self).get_parser(prog_name)
+ parser.add_argument(
+ 'registered_limit_id',
+ metavar='<registered-limit-id>',
+ help=_('Registered limit to display (ID)'),
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ identity_client = self.app.client_manager.identity
+ registered_limit = identity_client.registered_limits.get(
+ parsed_args.registered_limit_id
+ )
+ registered_limit._info.pop('links', None)
+ return zip(*sorted(six.iteritems(registered_limit._info)))
diff --git a/openstackclient/identity/v3/role.py b/openstackclient/identity/v3/role.py
index 2828a349..58a76f8a 100644
--- a/openstackclient/identity/v3/role.py
+++ b/openstackclient/identity/v3/role.py
@@ -31,13 +31,18 @@ LOG = logging.getLogger(__name__)
def _add_identity_and_resource_options_to_parser(parser):
- domain_or_project = parser.add_mutually_exclusive_group()
- domain_or_project.add_argument(
+ system_or_domain_or_project = parser.add_mutually_exclusive_group()
+ system_or_domain_or_project.add_argument(
+ '--system',
+ metavar='<system>',
+ help=_('Include <system> (all)'),
+ )
+ system_or_domain_or_project.add_argument(
'--domain',
metavar='<domain>',
help=_('Include <domain> (name or ID)'),
)
- domain_or_project.add_argument(
+ system_or_domain_or_project.add_argument(
'--project',
metavar='<project>',
help=_('Include <project> (name or ID)'),
@@ -62,7 +67,14 @@ def _add_identity_and_resource_options_to_parser(parser):
def _process_identity_and_resource_options(parsed_args,
identity_client_manager):
kwargs = {}
- if parsed_args.user and parsed_args.domain:
+ if parsed_args.user and parsed_args.system:
+ kwargs['user'] = common.find_user(
+ identity_client_manager,
+ parsed_args.user,
+ parsed_args.user_domain,
+ ).id
+ kwargs['system'] = parsed_args.system
+ elif parsed_args.user and parsed_args.domain:
kwargs['user'] = common.find_user(
identity_client_manager,
parsed_args.user,
@@ -83,6 +95,13 @@ def _process_identity_and_resource_options(parsed_args,
parsed_args.project,
parsed_args.project_domain,
).id
+ elif parsed_args.group and parsed_args.system:
+ kwargs['group'] = common.find_group(
+ identity_client_manager,
+ parsed_args.group,
+ parsed_args.group_domain,
+ ).id
+ kwargs['system'] = parsed_args.system
elif parsed_args.group and parsed_args.domain:
kwargs['group'] = common.find_group(
identity_client_manager,
@@ -109,8 +128,8 @@ def _process_identity_and_resource_options(parsed_args,
class AddRole(command.Command):
- _description = _("Adds a role assignment to a user or group on a domain "
- "or project")
+ _description = _("Adds a role assignment to a user or group on the "
+ "system, a domain, or a project")
def get_parser(self, prog_name):
parser = super(AddRole, self).get_parser(prog_name)
@@ -381,7 +400,7 @@ class ListRole(command.Lister):
class RemoveRole(command.Command):
- _description = _("Removes a role assignment from domain/project : "
+ _description = _("Removes a role assignment from system/domain/project : "
"user/group")
def get_parser(self, prog_name):
diff --git a/openstackclient/identity/v3/role_assignment.py b/openstackclient/identity/v3/role_assignment.py
index a362adb0..9c2f3d24 100644
--- a/openstackclient/identity/v3/role_assignment.py
+++ b/openstackclient/identity/v3/role_assignment.py
@@ -55,17 +55,22 @@ class ListRoleAssignment(command.Lister):
help=_('Group to filter (name or ID)'),
)
common.add_group_domain_option_to_parser(parser)
- domain_or_project = parser.add_mutually_exclusive_group()
- domain_or_project.add_argument(
+ system_or_domain_or_project = parser.add_mutually_exclusive_group()
+ system_or_domain_or_project.add_argument(
'--domain',
metavar='<domain>',
help=_('Domain to filter (name or ID)'),
)
- domain_or_project.add_argument(
+ system_or_domain_or_project.add_argument(
'--project',
metavar='<project>',
help=_('Project to filter (name or ID)'),
)
+ system_or_domain_or_project.add_argument(
+ '--system',
+ metavar='<system>',
+ help=_('Filter based on system role assignments'),
+ )
common.add_project_domain_option_to_parser(parser)
common.add_inherited_option_to_parser(parser)
parser.add_argument(
@@ -85,7 +90,8 @@ class ListRoleAssignment(command.Lister):
def _as_tuple(self, assignment):
return (assignment.role, assignment.user, assignment.group,
- assignment.project, assignment.domain, assignment.inherited)
+ assignment.project, assignment.domain, assignment.system,
+ assignment.inherited)
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
@@ -117,6 +123,10 @@ class ListRoleAssignment(command.Lister):
auth_ref.user_id
)
+ system = None
+ if parsed_args.system:
+ system = parsed_args.system
+
domain = None
if parsed_args.domain:
domain = common.find_domain(
@@ -149,7 +159,9 @@ class ListRoleAssignment(command.Lister):
include_names = True if parsed_args.names else False
effective = True if parsed_args.effective else False
- columns = ('Role', 'User', 'Group', 'Project', 'Domain', 'Inherited')
+ columns = (
+ 'Role', 'User', 'Group', 'Project', 'Domain', 'System', 'Inherited'
+ )
inherited_to = 'projects' if parsed_args.inherited else None
data = identity_client.role_assignments.list(
@@ -157,6 +169,7 @@ class ListRoleAssignment(command.Lister):
user=user,
group=group,
project=project,
+ system=system,
role=role,
effective=effective,
os_inherit_extension_inherited_to=inherited_to,
@@ -174,14 +187,24 @@ class ListRoleAssignment(command.Lister):
else:
setattr(assignment, 'project', scope['project']['id'])
assignment.domain = ''
+ assignment.system = ''
elif 'domain' in scope:
if include_names:
setattr(assignment, 'domain', scope['domain']['name'])
else:
setattr(assignment, 'domain', scope['domain']['id'])
assignment.project = ''
-
+ assignment.system = ''
+ elif 'system' in scope:
+ # NOTE(lbragstad): If, or when, keystone supports role
+ # assignments on subsets of a system, this will have to evolve
+ # to handle that case instead of hardcoding to the entire
+ # system.
+ setattr(assignment, 'system', 'all')
+ assignment.domain = ''
+ assignment.project = ''
else:
+ assignment.system = ''
assignment.domain = ''
assignment.project = ''
diff --git a/openstackclient/identity/v3/token.py b/openstackclient/identity/v3/token.py
index effb9e35..1933ecad 100644
--- a/openstackclient/identity/v3/token.py
+++ b/openstackclient/identity/v3/token.py
@@ -192,6 +192,12 @@ class IssueToken(command.ShowOne):
data['user_id'] = auth_ref.user_id
if auth_ref.domain_id:
data['domain_id'] = auth_ref.domain_id
+ if auth_ref.system_scoped:
+ # NOTE(lbragstad): This could change in the future when, or if,
+ # keystone supports the ability to scope to a subset of the entire
+ # deployment system. When that happens, this will have to relay
+ # scope information and IDs like we do for projects and domains.
+ data['system'] = 'all'
return zip(*sorted(six.iteritems(data)))
diff --git a/openstackclient/image/v2/image.py b/openstackclient/image/v2/image.py
index 4a51062f..223e55c9 100644
--- a/openstackclient/image/v2/image.py
+++ b/openstackclient/image/v2/image.py
@@ -16,9 +16,11 @@
"""Image V2 Action Implementations"""
import argparse
+from base64 import b64encode
import logging
from glanceclient.common import utils as gc_utils
+from openstack.image import image_signer
from osc_lib.cli import parseractions
from osc_lib.command import command
from osc_lib import exceptions
@@ -35,6 +37,7 @@ DEFAULT_CONTAINER_FORMAT = 'bare'
DEFAULT_DISK_FORMAT = 'raw'
DISK_CHOICES = ["ami", "ari", "aki", "vhd", "vmdk", "raw", "qcow2", "vhdx",
"vdi", "iso", "ploop"]
+MEMBER_STATUS_CHOICES = ["accepted", "pending", "rejected", "all"]
LOG = logging.getLogger(__name__)
@@ -84,7 +87,7 @@ class AddProjectToImage(command.ShowOne):
parser.add_argument(
"project",
metavar="<project>",
- help=_("Project to associate with image (name or ID)"),
+ help=_("Project to associate with image (ID)"),
)
common.add_project_domain_option_to_parser(parser)
return parser
@@ -183,6 +186,22 @@ class CreateImage(command.ShowOne):
help=_("Force image creation if volume is in use "
"(only meaningful with --volume)"),
)
+ parser.add_argument(
+ '--sign-key-path',
+ metavar="<sign-key-path>",
+ default=[],
+ help=_("Sign the image using the specified private key. "
+ "Only use in combination with --sign-cert-id")
+ )
+ parser.add_argument(
+ '--sign-cert-id',
+ metavar="<sign-cert-id>",
+ default=[],
+ help=_("The specified certificate UUID is a reference to "
+ "the certificate in the key manager that corresponds "
+ "to the public key and is used for signature validation. "
+ "Only use in combination with --sign-key-path")
+ )
protected_group = parser.add_mutually_exclusive_group()
protected_group.add_argument(
"--protected",
@@ -335,6 +354,46 @@ class CreateImage(command.ShowOne):
parsed_args.project_domain,
).id
+ # sign an image using a given local private key file
+ if parsed_args.sign_key_path or parsed_args.sign_cert_id:
+ if not parsed_args.file:
+ msg = (_("signing an image requires the --file option, "
+ "passing files via stdin when signing is not "
+ "supported."))
+ raise exceptions.CommandError(msg)
+ if (len(parsed_args.sign_key_path) < 1 or
+ len(parsed_args.sign_cert_id) < 1):
+ msg = (_("'sign-key-path' and 'sign-cert-id' must both be "
+ "specified when attempting to sign an image."))
+ raise exceptions.CommandError(msg)
+ else:
+ sign_key_path = parsed_args.sign_key_path
+ sign_cert_id = parsed_args.sign_cert_id
+ signer = image_signer.ImageSigner()
+ try:
+ pw = utils.get_password(
+ self.app.stdin,
+ prompt=("Please enter private key password, leave "
+ "empty if none: "),
+ confirm=False)
+ if not pw or len(pw) < 1:
+ pw = None
+ signer.load_private_key(
+ sign_key_path,
+ password=pw)
+ except Exception:
+ msg = (_("Error during sign operation: private key could "
+ "not be loaded."))
+ raise exceptions.CommandError(msg)
+
+ signature = signer.generate_signature(fp)
+ signature_b64 = b64encode(signature)
+ kwargs['img_signature'] = signature_b64
+ kwargs['img_signature_certificate_uuid'] = sign_cert_id
+ kwargs['img_signature_hash_method'] = signer.hash_method
+ if signer.padding_method:
+ kwargs['img_signature_key_type'] = signer.padding_method
+
# If a volume is specified.
if parsed_args.volume:
volume_client = self.app.client_manager.volume
@@ -440,6 +499,13 @@ class ListImage(command.Lister):
help=_("List only private images"),
)
public_group.add_argument(
+ "--community",
+ dest="community",
+ action="store_true",
+ default=False,
+ help=_("List only community images"),
+ )
+ public_group.add_argument(
"--shared",
dest="shared",
action="store_true",
@@ -450,7 +516,8 @@ class ListImage(command.Lister):
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
- help=_('Filter output based on property'),
+ help=_('Filter output based on property '
+ '(repeat option to filter on multiple properties)'),
)
parser.add_argument(
'--name',
@@ -465,6 +532,22 @@ class ListImage(command.Lister):
help=_("Filter images based on status.")
)
parser.add_argument(
+ '--member-status',
+ metavar='<member-status>',
+ default=None,
+ type=lambda s: s.lower(),
+ choices=MEMBER_STATUS_CHOICES,
+ help=(_("Filter images based on member status. "
+ "The supported options are: %s. ") %
+ ', '.join(MEMBER_STATUS_CHOICES))
+ )
+ parser.add_argument(
+ '--tag',
+ metavar='<tag>',
+ default=None,
+ help=_('Filter images based on tag.'),
+ )
+ parser.add_argument(
'--long',
action='store_true',
default=False,
@@ -510,6 +593,8 @@ class ListImage(command.Lister):
kwargs['public'] = True
if parsed_args.private:
kwargs['private'] = True
+ if parsed_args.community:
+ kwargs['community'] = True
if parsed_args.shared:
kwargs['shared'] = True
if parsed_args.limit:
@@ -521,6 +606,10 @@ class ListImage(command.Lister):
kwargs['name'] = parsed_args.name
if parsed_args.status:
kwargs['status'] = parsed_args.status
+ if parsed_args.member_status:
+ kwargs['member_status'] = parsed_args.member_status
+ if parsed_args.tag:
+ kwargs['tag'] = parsed_args.tag
if parsed_args.long:
columns = (
'ID',
@@ -568,16 +657,15 @@ class ListImage(command.Lister):
marker = page[-1]['id']
if parsed_args.property:
- # NOTE(dtroyer): coerce to a list to subscript it in py3
- attr, value = list(parsed_args.property.items())[0]
- api_utils.simple_filter(
- data,
- attr=attr,
- value=value,
- property_field='properties',
- )
+ for attr, value in parsed_args.property.items():
+ api_utils.simple_filter(
+ data,
+ attr=attr,
+ value=value,
+ property_field='properties',
+ )
- data = utils.sort_items(data, parsed_args.sort)
+ data = utils.sort_items(data, parsed_args.sort, str)
return (
column_headers,
diff --git a/openstackclient/network/client.py b/openstackclient/network/client.py
index 5183cbda..39936fde 100644
--- a/openstackclient/network/client.py
+++ b/openstackclient/network/client.py
@@ -13,16 +13,6 @@
import logging
-from openstack import connection
-
-
-# NOTE(dtroyer): Attempt an import to detect if the SDK installed is new
-# enough to not use Profile. If so, use that.
-try:
- from openstack.config import loader as config # noqa
- profile = None
-except ImportError:
- from openstack import profile
from osc_lib import utils
from openstackclient.i18n import _
@@ -41,37 +31,17 @@ API_VERSIONS = {
def make_client(instance):
"""Returns a network proxy"""
- if getattr(instance, "sdk_connection", None) is None:
- if profile is None:
- # If the installed OpenStackSDK is new enough to not require a
- # Profile obejct and osc-lib is not new enough to have created
- # it for us, make an SDK Connection.
- # NOTE(dtroyer): This can be removed when this bit is in the
- # released osc-lib in requirements.txt.
- conn = connection.Connection(
- config=instance._cli_options,
- session=instance.session,
- )
- else:
- # Fall back to the original Connection creation
- prof = profile.Profile()
- prof.set_region(API_NAME, instance.region_name)
- prof.set_version(API_NAME, instance._api_version[API_NAME])
- prof.set_interface(API_NAME, instance.interface)
- conn = connection.Connection(
- authenticator=instance.session.auth,
- verify=instance.session.verify,
- cert=instance.session.cert,
- profile=prof,
- )
-
- instance.sdk_connection = conn
-
- conn = instance.sdk_connection
- LOG.debug('Connection: %s', conn)
- LOG.debug('Network client initialized using OpenStack SDK: %s',
- conn.network)
- return conn.network
+ # NOTE(dtroyer): As of osc-lib 1.8.0 and OpenStackSDK 0.10.0 the
+ # old Profile interface and separate client creation
+ # for each API that uses the SDK is unnecessary. This
+ # callback remains as a remnant of the original plugin
+ # interface and to avoid the code churn of changing all
+ # of the existing references.
+ LOG.debug(
+ 'Network client initialized using OpenStack SDK: %s',
+ instance.sdk_connection.network,
+ )
+ return instance.sdk_connection.network
def build_option_parser(parser):
diff --git a/openstackclient/network/common.py b/openstackclient/network/common.py
index 37bf1406..d22b2caa 100644
--- a/openstackclient/network/common.py
+++ b/openstackclient/network/common.py
@@ -12,6 +12,7 @@
#
import abc
+import contextlib
import logging
import openstack.exceptions
@@ -24,6 +25,30 @@ from openstackclient.i18n import _
LOG = logging.getLogger(__name__)
+_required_opt_extensions_map = {
+ 'allowed_address_pairs': 'allowed-address-pairs',
+ 'dns_domain': 'dns-integration',
+ 'dns_name': 'dns-integration',
+ 'extra_dhcp_opts': 'extra_dhcp_opt',
+ 'qos_policy_id': 'qos',
+ 'security_groups': 'security-groups',
+}
+
+
+@contextlib.contextmanager
+def check_missing_extension_if_error(client_manager, attrs):
+ # If specified option requires extension, then try to
+ # find out if it exists. If it does not exist,
+ # then an exception with the appropriate message
+ # will be thrown from within client.find_extension
+ try:
+ yield
+ except openstack.exceptions.HttpException:
+ for opt, ext in _required_opt_extensions_map.items():
+ if opt in attrs:
+ client_manager.find_extension(ext, ignore_missing=False)
+ raise
+
@six.add_metaclass(abc.ABCMeta)
class NetworkAndComputeCommand(command.Command):
diff --git a/openstackclient/network/v2/floating_ip.py b/openstackclient/network/v2/floating_ip.py
index f51baed5..8ac8e107 100644
--- a/openstackclient/network/v2/floating_ip.py
+++ b/openstackclient/network/v2/floating_ip.py
@@ -13,8 +13,6 @@
"""IP Floating action implementations"""
-import logging
-
from osc_lib.command import command
from osc_lib import utils
@@ -25,6 +23,11 @@ from openstackclient.network import sdk_utils
from openstackclient.network.v2 import _tag
+_formatters = {
+ 'port_details': utils.format_dict,
+}
+
+
def _get_network_columns(item):
column_map = {
'tenant_id': 'project_id',
@@ -82,6 +85,12 @@ def _get_attrs(client_manager, parsed_args):
).id
attrs['tenant_id'] = project_id
+ if parsed_args.dns_domain:
+ attrs['dns_domain'] = parsed_args.dns_domain
+
+ if parsed_args.dns_name:
+ attrs['dns_name'] = parsed_args.dns_name
+
return attrs
@@ -139,15 +148,32 @@ class CreateFloatingIP(common.NetworkAndComputeShowOne):
metavar='<project>',
help=_("Owner's project (name or ID)")
)
+ parser.add_argument(
+ '--dns-domain',
+ metavar='<dns-domain>',
+ dest='dns_domain',
+ help=_("Set DNS domain for this floating IP")
+ )
+ parser.add_argument(
+ '--dns-name',
+ metavar='<dns-name>',
+ dest='dns_name',
+ help=_("Set DNS name for this floating IP")
+ )
+
identity_common.add_project_domain_option_to_parser(parser)
_tag.add_tag_option_to_parser_for_create(parser, _('floating IP'))
return parser
def take_action_network(self, client, parsed_args):
attrs = _get_attrs(self.app.client_manager, parsed_args)
- obj = client.create_ip(**attrs)
+ with common.check_missing_extension_if_error(
+ self.app.client_manager.network, attrs):
+ obj = client.create_ip(**attrs)
+
# tags cannot be set when created, so tags need to be set later.
_tag.update_tags_for_set(client, obj, parsed_args)
+
display_columns, columns = _get_network_columns(obj)
data = utils.get_item_properties(obj, columns)
return (display_columns, data)
@@ -159,30 +185,6 @@ class CreateFloatingIP(common.NetworkAndComputeShowOne):
return (columns, data)
-class CreateIPFloating(CreateFloatingIP):
- _description = _("Create floating IP")
-
- # TODO(tangchen): Remove this class and ``ip floating create`` command
- # two cycles after Mitaka.
-
- # This notifies cliff to not display the help for this command
- deprecated = True
-
- log = logging.getLogger('deprecated')
-
- def take_action_network(self, client, parsed_args):
- self.log.warning(_('This command has been deprecated. '
- 'Please use "floating ip create" instead.'))
- return super(CreateIPFloating, self).take_action_network(
- client, parsed_args)
-
- def take_action_compute(self, client, parsed_args):
- self.log.warning(_('This command has been deprecated. '
- 'Please use "floating ip create" instead.'))
- return super(CreateIPFloating, self).take_action_compute(
- client, parsed_args)
-
-
class DeleteFloatingIP(common.NetworkAndComputeDelete):
_description = _("Delete floating IP(s)")
@@ -210,30 +212,6 @@ class DeleteFloatingIP(common.NetworkAndComputeDelete):
client.api.floating_ip_delete(self.r)
-class DeleteIPFloating(DeleteFloatingIP):
- _description = _("Delete floating IP(s)")
-
- # TODO(tangchen): Remove this class and ``ip floating delete`` command
- # two cycles after Mitaka.
-
- # This notifies cliff to not display the help for this command
- deprecated = True
-
- log = logging.getLogger('deprecated')
-
- def take_action_network(self, client, parsed_args):
- self.log.warning(_('This command has been deprecated. '
- 'Please use "floating ip delete" instead.'))
- return super(DeleteIPFloating, self).take_action_network(
- client, parsed_args)
-
- def take_action_compute(self, client, parsed_args):
- self.log.warning(_('This command has been deprecated. '
- 'Please use "floating ip delete" instead.'))
- return super(DeleteIPFloating, self).take_action_compute(
- client, parsed_args)
-
-
class ListFloatingIP(common.NetworkAndComputeLister):
# TODO(songminglong): Use SDK resource mapped attribute names once
# the OSC minimum requirements include SDK 1.0
@@ -259,6 +237,12 @@ class ListFloatingIP(common.NetworkAndComputeLister):
"given fixed IP address")
)
parser.add_argument(
+ '--floating-ip-address',
+ metavar='<ip-address>',
+ help=_("List floating IP(s) according to "
+ "given floating IP address")
+ )
+ parser.add_argument(
'--long',
action='store_true',
default=False,
@@ -314,12 +298,16 @@ class ListFloatingIP(common.NetworkAndComputeLister):
'status',
'description',
'tags',
+ 'dns_name',
+ 'dns_domain',
)
headers = headers + (
'Router',
'Status',
'Description',
'Tags',
+ 'DNS Name',
+ 'DNS Domain',
)
query = {}
@@ -334,6 +322,8 @@ class ListFloatingIP(common.NetworkAndComputeLister):
query['port_id'] = port.id
if parsed_args.fixed_ip_address is not None:
query['fixed_ip_address'] = parsed_args.fixed_ip_address
+ if parsed_args.floating_ip_address is not None:
+ query['floating_ip_address'] = parsed_args.floating_ip_address
if parsed_args.status:
query['status'] = parsed_args.status
if parsed_args.project is not None:
@@ -384,30 +374,6 @@ class ListFloatingIP(common.NetworkAndComputeLister):
) for s in data))
-class ListIPFloating(ListFloatingIP):
- _description = _("List floating IP(s)")
-
- # TODO(tangchen): Remove this class and ``ip floating list`` command
- # two cycles after Mitaka.
-
- # This notifies cliff to not display the help for this command
- deprecated = True
-
- log = logging.getLogger('deprecated')
-
- def take_action_network(self, client, parsed_args):
- self.log.warning(_('This command has been deprecated. '
- 'Please use "floating ip list" instead.'))
- return super(ListIPFloating, self).take_action_network(
- client, parsed_args)
-
- def take_action_compute(self, client, parsed_args):
- self.log.warning(_('This command has been deprecated. '
- 'Please use "floating ip list" instead.'))
- return super(ListIPFloating, self).take_action_compute(
- client, parsed_args)
-
-
class SetFloatingIP(command.Command):
_description = _("Set floating IP Properties")
@@ -416,11 +382,10 @@ class SetFloatingIP(command.Command):
parser.add_argument(
'floating_ip',
metavar='<floating-ip>',
- help=_("Floating IP to associate (IP address or ID)"))
+ help=_("Floating IP to modify (IP address or ID)"))
parser.add_argument(
'--port',
metavar='<port>',
- required=True,
help=_("Associate the floating IP with port (name or ID)")),
parser.add_argument(
'--fixed-ip-address',
@@ -452,9 +417,11 @@ class SetFloatingIP(command.Command):
parsed_args.floating_ip,
ignore_missing=False,
)
- port = client.find_port(parsed_args.port,
- ignore_missing=False)
- attrs['port_id'] = port.id
+ if parsed_args.port:
+ port = client.find_port(parsed_args.port,
+ ignore_missing=False)
+ attrs['port_id'] = port.id
+
if parsed_args.fixed_ip_address:
attrs['fixed_ip_address'] = parsed_args.fixed_ip_address
@@ -489,7 +456,7 @@ class ShowFloatingIP(common.NetworkAndComputeShowOne):
ignore_missing=False,
)
display_columns, columns = _get_network_columns(obj)
- data = utils.get_item_properties(obj, columns)
+ data = utils.get_item_properties(obj, columns, formatters=_formatters)
return (display_columns, data)
def take_action_compute(self, client, parsed_args):
@@ -499,30 +466,6 @@ class ShowFloatingIP(common.NetworkAndComputeShowOne):
return (columns, data)
-class ShowIPFloating(ShowFloatingIP):
- _description = _("Display floating IP details")
-
- # TODO(tangchen): Remove this class and ``ip floating show`` command
- # two cycles after Mitaka.
-
- # This notifies cliff to not display the help for this command
- deprecated = True
-
- log = logging.getLogger('deprecated')
-
- def take_action_network(self, client, parsed_args):
- self.log.warning(_('This command has been deprecated. '
- 'Please use "floating ip show" instead.'))
- return super(ShowIPFloating, self).take_action_network(
- client, parsed_args)
-
- def take_action_compute(self, client, parsed_args):
- self.log.warning(_('This command has been deprecated. '
- 'Please use "floating ip show" instead.'))
- return super(ShowIPFloating, self).take_action_compute(
- client, parsed_args)
-
-
class UnsetFloatingIP(command.Command):
_description = _("Unset floating IP Properties")
diff --git a/openstackclient/network/v2/floating_ip_pool.py b/openstackclient/network/v2/floating_ip_pool.py
index ebb15da8..32852004 100644
--- a/openstackclient/network/v2/floating_ip_pool.py
+++ b/openstackclient/network/v2/floating_ip_pool.py
@@ -13,7 +13,6 @@
"""Floating IP Pool action implementations"""
-import logging
from osc_lib import exceptions
from osc_lib import utils
@@ -40,27 +39,3 @@ class ListFloatingIPPool(common.NetworkAndComputeLister):
(utils.get_dict_properties(
s, columns,
) for s in data))
-
-
-class ListIPFloatingPool(ListFloatingIPPool):
- _description = _("List pools of floating IP addresses")
-
- # TODO(tangchen): Remove this class and ``ip floating pool list`` command
- # two cycles after Mitaka.
-
- # This notifies cliff to not display the help for this command
- deprecated = True
-
- log = logging.getLogger('deprecated')
-
- def take_action_network(self, client, parsed_args):
- self.log.warning(_('This command has been deprecated. '
- 'Please use "floating ip pool list" instead.'))
- return super(ListIPFloatingPool, self).take_action_network(
- client, parsed_args)
-
- def take_action_compute(self, client, parsed_args):
- self.log.warning(_('This command has been deprecated. '
- 'Please use "floating ip pool list" instead.'))
- return super(ListIPFloatingPool, self).take_action_compute(
- client, parsed_args)
diff --git a/openstackclient/network/v2/network.py b/openstackclient/network/v2/network.py
index d1c7f005..f5123932 100644
--- a/openstackclient/network/v2/network.py
+++ b/openstackclient/network/v2/network.py
@@ -72,7 +72,7 @@ def _get_columns_compute(item):
def _get_attrs_network(client_manager, parsed_args):
attrs = {}
if parsed_args.name is not None:
- attrs['name'] = str(parsed_args.name)
+ attrs['name'] = parsed_args.name
if parsed_args.enable:
attrs['admin_state_up'] = True
if parsed_args.disable:
@@ -134,13 +134,16 @@ def _get_attrs_network(client_manager, parsed_args):
attrs['qos_policy_id'] = _qos_policy.id
if 'no_qos_policy' in parsed_args and parsed_args.no_qos_policy:
attrs['qos_policy_id'] = None
+ # Update DNS network options
+ if parsed_args.dns_domain:
+ attrs['dns_domain'] = parsed_args.dns_domain
return attrs
def _get_attrs_compute(client_manager, parsed_args):
attrs = {}
if parsed_args.name is not None:
- attrs['name'] = str(parsed_args.name)
+ attrs['name'] = parsed_args.name
if parsed_args.share:
attrs['share_subnet'] = True
if parsed_args.no_share:
@@ -171,6 +174,13 @@ def _add_additional_network_options(parser):
dest='segmentation_id',
help=_("VLAN ID for VLAN networks or Tunnel ID for "
"GENEVE/GRE/VXLAN networks"))
+ parser.add_argument(
+ '--dns-domain',
+ metavar='<dns-domain>',
+ dest='dns_domain',
+ help=_("Set DNS domain for this network "
+ "(requires DNS integration extension)")
+ )
# TODO(sindhu): Use the SDK resource mapped attribute names once the
@@ -308,8 +318,10 @@ class CreateNetwork(common.NetworkAndComputeShowOne):
attrs['vlan_transparent'] = True
if parsed_args.no_transparent_vlan:
attrs['vlan_transparent'] = False
+ with common.check_missing_extension_if_error(
+ self.app.client_manager.network, attrs):
+ obj = client.create_network(**attrs)
- obj = client.create_network(**attrs)
# tags cannot be set when created, so tags need to be set later.
_tag.update_tags_for_set(client, obj, parsed_args)
display_columns, columns = _get_columns_network(obj)
@@ -690,7 +702,9 @@ class SetNetwork(command.Command):
attrs = _get_attrs_network(self.app.client_manager, parsed_args)
if attrs:
- client.update_network(obj, **attrs)
+ with common.check_missing_extension_if_error(
+ self.app.client_manager.network, attrs):
+ client.update_network(obj, **attrs)
# tags is a subresource and it needs to be updated separately.
_tag.update_tags_for_set(client, obj, parsed_args)
diff --git a/openstackclient/network/v2/network_agent.py b/openstackclient/network/v2/network_agent.py
index ba2a2633..46e8d4b2 100644
--- a/openstackclient/network/v2/network_agent.py
+++ b/openstackclient/network/v2/network_agent.py
@@ -351,7 +351,7 @@ class SetNetworkAgent(command.Command):
obj = client.get_agent(parsed_args.network_agent)
attrs = {}
if parsed_args.description is not None:
- attrs['description'] = str(parsed_args.description)
+ attrs['description'] = parsed_args.description
if parsed_args.enable:
attrs['is_admin_state_up'] = True
attrs['admin_state_up'] = True
diff --git a/openstackclient/network/v2/network_qos_policy.py b/openstackclient/network/v2/network_qos_policy.py
index 2c6b841b..fd5ff937 100644
--- a/openstackclient/network/v2/network_qos_policy.py
+++ b/openstackclient/network/v2/network_qos_policy.py
@@ -38,7 +38,7 @@ def _get_columns(item):
def _get_attrs(client_manager, parsed_args):
attrs = {}
if 'name' in parsed_args and parsed_args.name is not None:
- attrs['name'] = str(parsed_args.name)
+ attrs['name'] = parsed_args.name
if 'description' in parsed_args and parsed_args.description is not None:
attrs['description'] = parsed_args.description
if parsed_args.share:
diff --git a/openstackclient/network/v2/network_qos_rule.py b/openstackclient/network/v2/network_qos_rule.py
index 9c4275a8..28c5600a 100644
--- a/openstackclient/network/v2/network_qos_rule.py
+++ b/openstackclient/network/v2/network_qos_rule.py
@@ -141,7 +141,10 @@ def _add_rule_arguments(parser):
dest='max_burst_kbits',
metavar='<max-burst-kbits>',
type=int,
- help=_('Maximum burst in kilobits, 0 means automatic')
+ help=_('Maximum burst in kilobits, 0 or not specified means '
+ 'automatic, which is 80%% of the bandwidth limit, which works '
+ 'for typical TCP traffic. For details check the QoS user '
+ 'workflow.')
)
parser.add_argument(
'--dscp-mark',
diff --git a/openstackclient/network/v2/port.py b/openstackclient/network/v2/port.py
index f13ee7b9..f6d6fc72 100644
--- a/openstackclient/network/v2/port.py
+++ b/openstackclient/network/v2/port.py
@@ -25,6 +25,7 @@ from osc_lib import utils
from openstackclient.i18n import _
from openstackclient.identity import common as identity_common
+from openstackclient.network import common
from openstackclient.network import sdk_utils
from openstackclient.network.v2 import _tag
@@ -133,7 +134,7 @@ def _get_attrs(client_manager, parsed_args):
attrs['dns_name'] = parsed_args.dns_name
# It is possible that name is not updated during 'port set'
if parsed_args.name is not None:
- attrs['name'] = str(parsed_args.name)
+ attrs['name'] = parsed_args.name
# The remaining options do not support 'port set' command, so they require
# additional check
if 'network' in parsed_args and parsed_args.network is not None:
@@ -162,6 +163,13 @@ def _get_attrs(client_manager, parsed_args):
attrs['qos_policy_id'] = client_manager.network.find_qos_policy(
parsed_args.qos_policy, ignore_missing=False).id
+ if ('enable_uplink_status_propagation' in parsed_args and
+ parsed_args.enable_uplink_status_propagation):
+ attrs['propagate_uplink_status'] = True
+ if ('disable_uplink_status_propagation' in parsed_args and
+ parsed_args.disable_uplink_status_propagation):
+ attrs['propagate_uplink_status'] = False
+
return attrs
@@ -215,6 +223,9 @@ def _prepare_filter_fixed_ips(client_manager, parsed_args):
if 'ip-address' in ip_spec:
ips.append('ip_address=%s' % ip_spec['ip-address'])
+
+ if 'ip-substring' in ip_spec:
+ ips.append('ip_address_substr=%s' % ip_spec['ip-substring'])
return ips
@@ -255,7 +266,7 @@ def _add_updatable_args(parser):
'normal', 'baremetal', 'virtio-forwarder'],
help=_("VNIC type for this port (direct | direct-physical | "
"macvtap | normal | baremetal | virtio-forwarder, "
- " default: normal)")
+ "default: normal)")
)
# NOTE(dtroyer): --host-id is deprecated in Mar 2016. Do not
# remove before 3.x release or Mar 2017.
@@ -278,8 +289,8 @@ def _add_updatable_args(parser):
)
parser.add_argument(
'--dns-name',
- metavar='dns-name',
- help=_("Set DNS name to this port "
+ metavar='<dns-name>',
+ help=_("Set DNS name for this port "
"(requires DNS integration extension)")
)
@@ -345,6 +356,17 @@ class CreatePort(command.ShowOne):
action='store_true',
help=_("Disable port")
)
+ uplink_status_group = parser.add_mutually_exclusive_group()
+ uplink_status_group.add_argument(
+ '--enable-uplink-status-propagation',
+ action='store_true',
+ help=_("Enable uplink status propagate")
+ )
+ uplink_status_group.add_argument(
+ '--disable-uplink-status-propagation',
+ action='store_true',
+ help=_("Disable uplink status propagate (default)")
+ )
parser.add_argument(
'--project',
metavar='<project>',
@@ -434,7 +456,10 @@ class CreatePort(command.ShowOne):
if parsed_args.qos_policy:
attrs['qos_policy_id'] = client.find_qos_policy(
parsed_args.qos_policy, ignore_missing=False).id
- obj = client.create_port(**attrs)
+ with common.check_missing_extension_if_error(
+ self.app.client_manager.network, attrs):
+ obj = client.create_port(**attrs)
+
# tags cannot be set when created, so tags need to be set later.
_tag.update_tags_for_set(client, obj, parsed_args)
display_columns, columns = _get_columns(obj)
@@ -531,11 +556,13 @@ class ListPort(command.Lister):
identity_common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--fixed-ip',
- metavar='subnet=<subnet>,ip-address=<ip-address>',
+ metavar=('subnet=<subnet>,ip-address=<ip-address>,'
+ 'ip-substring=<ip-substring>'),
action=parseractions.MultiKeyValueAction,
- optional_keys=['subnet', 'ip-address'],
+ optional_keys=['subnet', 'ip-address', 'ip-substring'],
help=_("Desired IP and/or subnet for filtering ports "
- "(name or ID): subnet=<subnet>,ip-address=<ip-address> "
+ "(name or ID): subnet=<subnet>,ip-address=<ip-address>,"
+ "ip-substring=<ip-substring> "
"(repeat option to set multiple fixed IP addresses)"),
)
_tag.add_tag_filtering_option_to_parser(parser, _('ports'))
@@ -660,7 +687,7 @@ class SetPort(command.Command):
parser.add_argument(
'--no-binding-profile',
action='store_true',
- help=_("Clear existing information of binding:profile."
+ help=_("Clear existing information of binding:profile. "
"Specify both --binding-profile and --no-binding-profile "
"to overwrite the current binding:profile information.")
)
@@ -714,9 +741,9 @@ class SetPort(command.Command):
'--no-allowed-address',
dest='no_allowed_address_pair',
action='store_true',
- help=_("Clear existing allowed-address pairs associated"
- "with this port."
- "(Specify both --allowed-address and --no-allowed-address"
+ help=_("Clear existing allowed-address pairs associated "
+ "with this port. "
+ "(Specify both --allowed-address and --no-allowed-address "
"to overwrite the current allowed-address pairs)")
)
parser.add_argument(
@@ -785,7 +812,9 @@ class SetPort(command.Command):
attrs['data_plane_status'] = parsed_args.data_plane_status
if attrs:
- client.update_port(obj, **attrs)
+ with common.check_missing_extension_if_error(
+ self.app.client_manager.network, attrs):
+ client.update_port(obj, **attrs)
# tags is a subresource and it needs to be updated separately.
_tag.update_tags_for_set(client, obj, parsed_args)
@@ -832,7 +861,7 @@ class UnsetPort(command.Command):
'--binding-profile',
metavar='<binding-profile-key>',
action='append',
- help=_("Desired key which should be removed from binding:profile"
+ help=_("Desired key which should be removed from binding:profile "
"(repeat option to unset multiple binding:profile data)"))
parser.add_argument(
'--security-group',
@@ -856,8 +885,8 @@ class UnsetPort(command.Command):
required_keys=['ip-address'],
optional_keys=['mac-address'],
help=_("Desired allowed-address pair which should be removed "
- "from this port: ip-address=<ip-address> "
- "[,mac-address=<mac-address>] (repeat option to set "
+ "from this port: ip-address=<ip-address>"
+ "[,mac-address=<mac-address>] (repeat option to unset "
"multiple allowed-address pairs)")
)
parser.add_argument(
diff --git a/openstackclient/network/v2/router.py b/openstackclient/network/v2/router.py
index f0a51967..2ec3e2f0 100644
--- a/openstackclient/network/v2/router.py
+++ b/openstackclient/network/v2/router.py
@@ -71,13 +71,21 @@ def _get_columns(item):
}
if hasattr(item, 'interfaces_info'):
column_map['interfaces_info'] = 'interfaces_info'
- return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map)
+ invisible_columns = []
+ if item.is_ha is None:
+ invisible_columns.append('is_ha')
+ column_map.pop('is_ha')
+ if item.is_distributed is None:
+ invisible_columns.append('is_distributed')
+ column_map.pop('is_distributed')
+ return sdk_utils.get_osc_show_columns_for_sdk_resource(
+ item, column_map, invisible_columns)
def _get_attrs(client_manager, parsed_args):
attrs = {}
if parsed_args.name is not None:
- attrs['name'] = str(parsed_args.name)
+ attrs['name'] = parsed_args.name
if parsed_args.enable:
attrs['admin_state_up'] = True
if parsed_args.disable:
@@ -330,8 +338,6 @@ class ListRouter(command.Lister):
'name',
'status',
'is_admin_state_up',
- 'is_distributed',
- 'is_ha',
'project_id',
)
column_headers = (
@@ -339,8 +345,6 @@ class ListRouter(command.Lister):
'Name',
'Status',
'State',
- 'Distributed',
- 'HA',
'Project',
)
@@ -376,6 +380,16 @@ class ListRouter(command.Lister):
else:
data = client.routers(**args)
+ # check if "HA" and "Distributed" columns should be displayed also
+ data = list(data)
+ for d in data:
+ if (d.is_distributed is not None and
+ 'is_distributed' not in columns):
+ columns = columns + ('is_distributed',)
+ column_headers = column_headers + ('Distributed',)
+ if d.is_ha is not None and 'is_ha' not in columns:
+ columns = columns + ('is_ha',)
+ column_headers = column_headers + ('HA',)
if parsed_args.long:
columns = columns + (
'routes',
@@ -563,7 +577,7 @@ class SetRouter(command.Command):
metavar='subnet=<subnet>,ip-address=<ip-address>',
action=parseractions.MultiKeyValueAction,
optional_keys=['subnet', 'ip-address'],
- help=_("Desired IP and/or subnet (name or ID)"
+ help=_("Desired IP and/or subnet (name or ID) "
"on external gateway: "
"subnet=<subnet>,ip-address=<ip-address> "
"(repeat option to set multiple fixed IP addresses)")
@@ -579,6 +593,17 @@ class SetRouter(command.Command):
action='store_true',
help=_("Disable Source NAT on external gateway")
)
+ qos_policy_group = parser.add_mutually_exclusive_group()
+ qos_policy_group.add_argument(
+ '--qos-policy',
+ metavar='<qos-policy>',
+ help=_("Attach QoS policy to router gateway IPs")
+ )
+ qos_policy_group.add_argument(
+ '--no-qos-policy',
+ action='store_true',
+ help=_("Remove QoS policy from router gateway IPs")
+ )
_tag.add_tag_option_to_parser_for_set(parser, _('router'))
return parser
@@ -611,10 +636,10 @@ class SetRouter(command.Command):
elif parsed_args.no_route or parsed_args.clear_routes:
attrs['routes'] = []
if (parsed_args.disable_snat or parsed_args.enable_snat or
- parsed_args.fixed_ip) and not parsed_args.external_gateway:
- msg = (_("You must specify '--external-gateway' in order"
- "to update the SNAT or fixed-ip values"))
- raise exceptions.CommandError(msg)
+ parsed_args.fixed_ip) and not parsed_args.external_gateway:
+ msg = (_("You must specify '--external-gateway' in order "
+ "to update the SNAT or fixed-ip values"))
+ raise exceptions.CommandError(msg)
if parsed_args.external_gateway:
gateway_info = {}
network = client.find_network(
@@ -638,6 +663,27 @@ class SetRouter(command.Command):
ips.append(ip_spec)
gateway_info['external_fixed_ips'] = ips
attrs['external_gateway_info'] = gateway_info
+
+ if ((parsed_args.qos_policy or parsed_args.no_qos_policy) and
+ not parsed_args.external_gateway):
+ try:
+ original_net_id = obj.external_gateway_info['network_id']
+ except (KeyError, TypeError):
+ msg = (_("You must specify '--external-gateway' or the router "
+ "must already have an external network in order to "
+ "set router gateway IP QoS"))
+ raise exceptions.CommandError(msg)
+ else:
+ if not attrs.get('external_gateway_info'):
+ attrs['external_gateway_info'] = {}
+ attrs['external_gateway_info']['network_id'] = original_net_id
+ if parsed_args.qos_policy:
+ check_qos_id = client.find_qos_policy(
+ parsed_args.qos_policy, ignore_missing=False).id
+ attrs['external_gateway_info']['qos_policy_id'] = check_qos_id
+
+ if 'no_qos_policy' in parsed_args and parsed_args.no_qos_policy:
+ attrs['external_gateway_info']['qos_policy_id'] = None
if attrs:
client.update_router(obj, **attrs)
# tags is a subresource and it needs to be updated separately.
@@ -702,6 +748,12 @@ class UnsetRouter(command.Command):
default=False,
help=_("Remove external gateway information from the router"))
parser.add_argument(
+ '--qos-policy',
+ action='store_true',
+ default=False,
+ help=_("Remove QoS policy from router gateway IPs")
+ )
+ parser.add_argument(
'router',
metavar="<router>",
help=_("Router to modify (name or ID)")
@@ -713,6 +765,7 @@ class UnsetRouter(command.Command):
client = self.app.client_manager.network
obj = client.find_router(parsed_args.router, ignore_missing=False)
tmp_routes = copy.deepcopy(obj.routes)
+ tmp_external_gateway_info = copy.deepcopy(obj.external_gateway_info)
attrs = {}
if parsed_args.routes:
try:
@@ -723,6 +776,20 @@ class UnsetRouter(command.Command):
msg = (_("Router does not contain route %s") % route)
raise exceptions.CommandError(msg)
attrs['routes'] = tmp_routes
+ if parsed_args.qos_policy:
+ try:
+ if (tmp_external_gateway_info['network_id'] and
+ tmp_external_gateway_info['qos_policy_id']):
+ pass
+ except (KeyError, TypeError):
+ msg = _("Router does not have external network or qos policy")
+ raise exceptions.CommandError(msg)
+ else:
+ attrs['external_gateway_info'] = {
+ 'network_id': tmp_external_gateway_info['network_id'],
+ 'qos_policy_id': None
+ }
+
if parsed_args.external_gateway:
attrs['external_gateway_info'] = {}
if attrs:
diff --git a/openstackclient/network/v2/security_group.py b/openstackclient/network/v2/security_group.py
index 75af3587..ed6c8d7c 100644
--- a/openstackclient/network/v2/security_group.py
+++ b/openstackclient/network/v2/security_group.py
@@ -15,6 +15,7 @@
import argparse
+from osc_lib.command import command
from osc_lib import utils
import six
@@ -23,6 +24,7 @@ from openstackclient.identity import common as identity_common
from openstackclient.network import common
from openstackclient.network import sdk_utils
from openstackclient.network import utils as network_utils
+from openstackclient.network.v2 import _tag
def _format_network_security_group_rules(sg_rules):
@@ -106,6 +108,7 @@ class CreateSecurityGroup(common.NetworkAndComputeShowOne):
help=_("Owner's project (name or ID)")
)
identity_common.add_project_domain_option_to_parser(parser)
+ _tag.add_tag_option_to_parser_for_create(parser, _('security group'))
return parser
def _get_description(self, parsed_args):
@@ -130,6 +133,8 @@ class CreateSecurityGroup(common.NetworkAndComputeShowOne):
# Create the security group and display the results.
obj = client.create_security_group(**attrs)
+ # tags cannot be set when created, so tags need to be set later.
+ _tag.update_tags_for_set(client, obj, parsed_args)
display_columns, property_columns = _get_columns(obj)
data = utils.get_item_properties(
obj,
@@ -198,6 +203,7 @@ class ListSecurityGroup(common.NetworkAndComputeLister):
"(name or ID)")
)
identity_common.add_project_domain_option_to_parser(parser)
+ _tag.add_tag_filtering_option_to_parser(parser, _('security group'))
return parser
def update_parser_compute(self, parser):
@@ -220,19 +226,23 @@ class ListSecurityGroup(common.NetworkAndComputeLister):
).id
filters['tenant_id'] = project_id
filters['project_id'] = project_id
+
+ _tag.get_tag_filtering_args(parsed_args, filters)
data = client.security_groups(**filters)
columns = (
"ID",
"Name",
"Description",
- "Project ID"
+ "Project ID",
+ "tags"
)
column_headers = (
"ID",
"Name",
"Description",
- "Project"
+ "Project",
+ "Tags"
)
return (column_headers,
(utils.get_item_properties(
@@ -282,6 +292,10 @@ class SetSecurityGroup(common.NetworkAndComputeCommand):
)
return parser
+ def update_parser_network(self, parser):
+ _tag.add_tag_option_to_parser_for_set(parser, _('security group'))
+ return parser
+
def take_action_network(self, client, parsed_args):
obj = client.find_security_group(parsed_args.group,
ignore_missing=False)
@@ -295,6 +309,9 @@ class SetSecurityGroup(common.NetworkAndComputeCommand):
# the update.
client.update_security_group(obj, **attrs)
+ # tags is a subresource and it needs to be updated separately.
+ _tag.update_tags_for_set(client, obj, parsed_args)
+
def take_action_compute(self, client, parsed_args):
data = client.api.security_group_find(parsed_args.group)
@@ -344,3 +361,25 @@ class ShowSecurityGroup(common.NetworkAndComputeShowOne):
formatters=_formatters_compute
)
return (display_columns, data)
+
+
+class UnsetSecurityGroup(command.Command):
+ _description = _("Unset security group properties")
+
+ def get_parser(self, prog_name):
+ parser = super(UnsetSecurityGroup, self).get_parser(prog_name)
+ parser.add_argument(
+ 'group',
+ metavar="<group>",
+ help=_("Security group to modify (name or ID)")
+ )
+ _tag.add_tag_option_to_parser_for_unset(parser, _('security group'))
+ return parser
+
+ def take_action(self, parsed_args):
+ client = self.app.client_manager.network
+ obj = client.find_security_group(parsed_args.group,
+ ignore_missing=False)
+
+ # tags is a subresource and it needs to be updated separately.
+ _tag.update_tags_for_unset(client, obj, parsed_args)
diff --git a/openstackclient/network/v2/subnet.py b/openstackclient/network/v2/subnet.py
index b5a8b35a..5f8113bb 100644
--- a/openstackclient/network/v2/subnet.py
+++ b/openstackclient/network/v2/subnet.py
@@ -169,7 +169,7 @@ def _get_attrs(client_manager, parsed_args, is_create=True):
attrs = {}
client = client_manager.network
if 'name' in parsed_args and parsed_args.name is not None:
- attrs['name'] = str(parsed_args.name)
+ attrs['name'] = parsed_args.name
if is_create:
if 'project' in parsed_args and parsed_args.project is not None:
@@ -247,6 +247,7 @@ class CreateSubnet(command.ShowOne):
parser = super(CreateSubnet, self).get_parser(prog_name)
parser.add_argument(
'name',
+ metavar='<name>',
help=_("New subnet name")
)
parser.add_argument(
@@ -412,7 +413,7 @@ class ListSubnet(command.Lister):
choices=[4, 6],
metavar='<ip-version>',
dest='ip_version',
- help=_("List only subnets of given IP version in output."
+ help=_("List only subnets of given IP version in output. "
"Allowed values for IP version are 4 and 6."),
)
dhcp_enable_group = parser.add_mutually_exclusive_group()
@@ -589,7 +590,7 @@ class SetSubnet(command.Command):
if not parsed_args.no_host_route:
attrs['host_routes'] += obj.host_routes
elif parsed_args.no_host_route:
- attrs['host_routes'] = ''
+ attrs['host_routes'] = []
if 'allocation_pools' in attrs:
if not parsed_args.no_allocation_pool:
attrs['allocation_pools'] += obj.allocation_pools
diff --git a/openstackclient/network/v2/subnet_pool.py b/openstackclient/network/v2/subnet_pool.py
index a5839868..ba0b6c45 100644
--- a/openstackclient/network/v2/subnet_pool.py
+++ b/openstackclient/network/v2/subnet_pool.py
@@ -13,7 +13,6 @@
"""Subnet pool action implementations"""
-import copy
import logging
from osc_lib.cli import parseractions
@@ -52,7 +51,7 @@ def _get_attrs(client_manager, parsed_args):
network_client = client_manager.network
if parsed_args.name is not None:
- attrs['name'] = str(parsed_args.name)
+ attrs['name'] = parsed_args.name
if parsed_args.prefixes is not None:
attrs['prefixes'] = parsed_args.prefixes
if parsed_args.default_prefix_length is not None:
@@ -191,8 +190,9 @@ class CreateSubnetPool(command.ShowOne):
'--default-quota',
type=int,
metavar='<num-ip-addresses>',
- help=_("Set default quota for subnet pool as the number of"
- "IP addresses allowed in a subnet")),
+ help=_("Set default per-project quota for this subnet pool "
+ "as the number of IP addresses that can be allocated "
+ "from the subnet pool")),
_tag.add_tag_option_to_parser_for_create(parser, _('subnet pool'))
return parser
@@ -389,8 +389,9 @@ class SetSubnetPool(command.Command):
'--default-quota',
type=int,
metavar='<num-ip-addresses>',
- help=_("Set default quota for subnet pool as the number of"
- "IP addresses allowed in a subnet")),
+ help=_("Set default per-project quota for this subnet pool "
+ "as the number of IP addresses that can be allocated "
+ "from the subnet pool")),
_tag.add_tag_option_to_parser_for_set(parser, _('subnet pool'))
return parser
@@ -441,14 +442,6 @@ class UnsetSubnetPool(command.Command):
def get_parser(self, prog_name):
parser = super(UnsetSubnetPool, self).get_parser(prog_name)
parser.add_argument(
- '--pool-prefix',
- metavar='<pool-prefix>',
- action='append',
- dest='prefixes',
- help=_('Remove subnet pool prefixes (in CIDR notation). '
- '(repeat option to unset multiple prefixes).'),
- )
- parser.add_argument(
'subnet_pool',
metavar="<subnet-pool>",
help=_("Subnet pool to modify (name or ID)")
@@ -460,19 +453,5 @@ class UnsetSubnetPool(command.Command):
client = self.app.client_manager.network
obj = client.find_subnet_pool(
parsed_args.subnet_pool, ignore_missing=False)
- tmp_prefixes = copy.deepcopy(obj.prefixes)
- attrs = {}
- if parsed_args.prefixes:
- for prefix in parsed_args.prefixes:
- try:
- tmp_prefixes.remove(prefix)
- except ValueError:
- msg = _(
- "Subnet pool does not "
- "contain prefix %s") % prefix
- raise exceptions.CommandError(msg)
- attrs['prefixes'] = tmp_prefixes
- if attrs:
- client.update_subnet_pool(obj, **attrs)
# tags is a subresource and it needs to be updated separately.
_tag.update_tags_for_unset(client, obj, parsed_args)
diff --git a/openstackclient/tests/functional/base.py b/openstackclient/tests/functional/base.py
index 90bbc24d..7705c655 100644
--- a/openstackclient/tests/functional/base.py
+++ b/openstackclient/tests/functional/base.py
@@ -24,6 +24,7 @@ COMMON_DIR = os.path.dirname(os.path.abspath(__file__))
FUNCTIONAL_DIR = os.path.normpath(os.path.join(COMMON_DIR, '..'))
ROOT_DIR = os.path.normpath(os.path.join(FUNCTIONAL_DIR, '..'))
EXAMPLE_DIR = os.path.join(ROOT_DIR, 'examples')
+ADMIN_CLOUD = os.environ.get('OS_ADMIN_CLOUD', 'devstack-admin')
def execute(cmd, fail_ok=False, merge_stderr=False):
@@ -59,9 +60,11 @@ class TestCase(testtools.TestCase):
delimiter_line = re.compile('^\+\-[\+\-]+\-\+$')
@classmethod
- def openstack(cls, cmd, fail_ok=False):
+ def openstack(cls, cmd, cloud=ADMIN_CLOUD, fail_ok=False):
"""Executes openstackclient command for the given action."""
- return execute('openstack ' + cmd, fail_ok=fail_ok)
+ return execute(
+ 'openstack --os-cloud={cloud} '.format(cloud=cloud) +
+ cmd, fail_ok=fail_ok)
@classmethod
def get_openstack_configuration_value(cls, configuration):
diff --git a/openstackclient/tests/functional/common/test_quota.py b/openstackclient/tests/functional/common/test_quota.py
index 76c69a4d..85942281 100644
--- a/openstackclient/tests/functional/common/test_quota.py
+++ b/openstackclient/tests/functional/common/test_quota.py
@@ -31,6 +31,38 @@ class QuotaTests(base.TestCase):
cls.PROJECT_NAME =\
cls.get_openstack_configuration_value('auth.project_name')
+ def test_quota_list_details_compute(self):
+ expected_headers = ["Resource", "In Use", "Reserved", "Limit"]
+ cmd_output = json.loads(self.openstack(
+ 'quota list -f json --detail --compute'
+ ))
+ self.assertIsNotNone(cmd_output)
+ resources = []
+ for row in cmd_output:
+ row_headers = [str(r) for r in row.keys()]
+ self.assertEqual(sorted(expected_headers), sorted(row_headers))
+ resources.append(row['Resource'])
+ # Ensure that returned quota is compute quota
+ self.assertIn("instances", resources)
+ # and that there is no network quota here
+ self.assertNotIn("networks", resources)
+
+ def test_quota_list_details_network(self):
+ expected_headers = ["Resource", "In Use", "Reserved", "Limit"]
+ cmd_output = json.loads(self.openstack(
+ 'quota list -f json --detail --network'
+ ))
+ self.assertIsNotNone(cmd_output)
+ resources = []
+ for row in cmd_output:
+ row_headers = [str(r) for r in row.keys()]
+ self.assertEqual(sorted(expected_headers), sorted(row_headers))
+ resources.append(row['Resource'])
+ # Ensure that returned quota is network quota
+ self.assertIn("networks", resources)
+ # and that there is no compute quota here
+ self.assertNotIn("instances", resources)
+
def test_quota_list_network_option(self):
if not self.haz_network:
self.skipTest("No Network service present")
diff --git a/openstackclient/tests/functional/common/test_versions.py b/openstackclient/tests/functional/common/test_versions.py
new file mode 100644
index 00000000..adc74ebc
--- /dev/null
+++ b/openstackclient/tests/functional/common/test_versions.py
@@ -0,0 +1,31 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from openstackclient.tests.functional import base
+
+
+class VersionsTests(base.TestCase):
+ """Functional tests for versions."""
+
+ def test_versions_show(self):
+ # TODO(mordred) Make this better. The trick is knowing what in the
+ # payload to test for.
+ cmd_output = json.loads(self.openstack(
+ 'versions show -f json'
+ ))
+ self.assertIsNotNone(cmd_output)
+ self.assertIn(
+ "Region Name",
+ cmd_output[0],
+ )
diff --git a/openstackclient/tests/functional/compute/v2/test_aggregate.py b/openstackclient/tests/functional/compute/v2/test_aggregate.py
index cf9d2bc0..71026757 100644
--- a/openstackclient/tests/functional/compute/v2/test_aggregate.py
+++ b/openstackclient/tests/functional/compute/v2/test_aggregate.py
@@ -11,6 +11,7 @@
# under the License.
import json
+import time
import uuid
from openstackclient.tests.functional import base
@@ -51,6 +52,23 @@ class AggregateTests(base.TestCase):
cmd_output['availability_zone']
)
+ # Loop a few times since this is timing-sensitive
+ # Just hard-code it for now, since there is no pause and it is
+ # racy we shouldn't have to wait too long, a minute seems reasonable
+ wait_time = 0
+ while wait_time < 60:
+ cmd_output = json.loads(self.openstack(
+ 'aggregate show -f json ' +
+ name2
+ ))
+ if cmd_output['name'] != name2:
+ # Hang out for a bit and try again
+ print('retrying aggregate check')
+ wait_time += 10
+ time.sleep(10)
+ else:
+ break
+
del_output = self.openstack(
'aggregate delete ' +
name1 + ' ' +
diff --git a/openstackclient/tests/functional/compute/v2/test_flavor.py b/openstackclient/tests/functional/compute/v2/test_flavor.py
index eefd3fab..c274adf2 100644
--- a/openstackclient/tests/functional/compute/v2/test_flavor.py
+++ b/openstackclient/tests/functional/compute/v2/test_flavor.py
@@ -112,8 +112,7 @@ class FlavorTests(base.TestCase):
0,
cmd_output["disk"],
)
- self.assertEqual(
- False,
+ self.assertFalse(
cmd_output["os-flavor-access:is_public"],
)
self.assertEqual(
@@ -199,8 +198,7 @@ class FlavorTests(base.TestCase):
20,
cmd_output["disk"],
)
- self.assertEqual(
- False,
+ self.assertFalse(
cmd_output["os-flavor-access:is_public"],
)
self.assertEqual(
diff --git a/openstackclient/tests/functional/compute/v2/test_server.py b/openstackclient/tests/functional/compute/v2/test_server.py
index 0b29fe5f..3cb72d9f 100644
--- a/openstackclient/tests/functional/compute/v2/test_server.py
+++ b/openstackclient/tests/functional/compute/v2/test_server.py
@@ -11,6 +11,7 @@
# under the License.
import json
+import time
import uuid
from tempest.lib import exceptions
@@ -255,10 +256,24 @@ class ServerTests(common.ComputeTestCase):
floating_ip
)
self.assertEqual("", raw_output)
- cmd_output = json.loads(self.openstack(
- 'server show -f json ' +
- name
- ))
+
+ # Loop a few times since this is timing-sensitive
+ # Just hard-code it for now, since there is no pause and it is
+ # racy we shouldn't have to wait too long, a minute seems reasonable
+ wait_time = 0
+ while wait_time < 60:
+ cmd_output = json.loads(self.openstack(
+ 'server show -f json ' +
+ name
+ ))
+ if floating_ip not in cmd_output['addresses']:
+ # Hang out for a bit and try again
+ print('retrying floating IP check')
+ wait_time += 10
+ time.sleep(10)
+ else:
+ break
+
self.assertIn(
floating_ip,
cmd_output['addresses'],
@@ -272,6 +287,23 @@ class ServerTests(common.ComputeTestCase):
)
self.assertEqual("", raw_output)
+ # Loop a few times since this is timing-sensitive
+ # Just hard-code it for now, since there is no pause and it is
+ # racy we shouldn't have to wait too long, a minute seems reasonable
+ wait_time = 0
+ while wait_time < 60:
+ cmd_output = json.loads(self.openstack(
+ 'server show -f json ' +
+ name
+ ))
+ if floating_ip in cmd_output['addresses']:
+ # Hang out for a bit and try again
+ print('retrying floating IP check')
+ wait_time += 10
+ time.sleep(10)
+ else:
+ break
+
cmd_output = json.loads(self.openstack(
'server show -f json ' +
name
@@ -586,7 +618,9 @@ class ServerTests(common.ComputeTestCase):
server_name
)
except exceptions.CommandFailed as e:
- self.assertIn('nics are required after microversion 2.36',
- e.stderr)
- else:
- self.fail('CommandFailed should be raised.')
+ # If we got here, it shouldn't be because a nics value wasn't
+ # provided to the server; it is likely due to something else in
+ # the functional tests like there being multiple available
+ # networks and the test didn't specify a specific network.
+ self.assertNotIn('nics are required after microversion 2.36',
+ e.stderr)
diff --git a/openstackclient/tests/functional/identity/v2/test_project.py b/openstackclient/tests/functional/identity/v2/test_project.py
index b6222a1b..38777c36 100644
--- a/openstackclient/tests/functional/identity/v2/test_project.py
+++ b/openstackclient/tests/functional/identity/v2/test_project.py
@@ -72,7 +72,7 @@ class ProjectTests(common.IdentityTests):
self.assert_show_fields(items, fields)
project = self.parse_show_as_object(raw_output)
self.assertEqual(new_project_name, project['name'])
- self.assertEqual('False', project['enabled'])
+ self.assertFalse(project['enabled'])
self.assertEqual("k0='v0'", project['properties'])
def test_project_show(self):
diff --git a/openstackclient/tests/functional/identity/v3/common.py b/openstackclient/tests/functional/identity/v3/common.py
index 33cb5d86..43b416aa 100644
--- a/openstackclient/tests/functional/identity/v3/common.py
+++ b/openstackclient/tests/functional/identity/v3/common.py
@@ -19,6 +19,7 @@ from openstackclient.tests.functional import base
BASIC_LIST_HEADERS = ['ID', 'Name']
+SYSTEM_CLOUD = os.environ.get('OS_SYSTEM_CLOUD', 'devstack-system-admin')
class IdentityTests(base.TestCase):
@@ -52,6 +53,17 @@ class IdentityTests(base.TestCase):
'id', 'relay_state_prefix', 'sp_url']
SERVICE_PROVIDER_LIST_HEADERS = ['ID', 'Enabled', 'Description',
'Auth URL']
+ IMPLIED_ROLE_LIST_HEADERS = ['Prior Role ID', 'Prior Role Name',
+ 'Implied Role ID', 'Implied Role Name']
+ REGISTERED_LIMIT_FIELDS = ['id', 'service_id', 'resource_name',
+ 'default_limit', 'description', 'region_id']
+ REGISTERED_LIMIT_LIST_HEADERS = ['ID', 'Service ID', 'Resource Name',
+ 'Default Limit', 'Description',
+ 'Region ID']
+ LIMIT_FIELDS = ['id', 'project_id', 'service_id', 'resource_name',
+ 'resource_limit', 'description', 'region_id']
+ LIMIT_LIST_HEADERS = ['ID', 'Project ID', 'Service ID', 'Resource Name',
+ 'Resource Limit', 'Description', 'Region ID']
@classmethod
def setUpClass(cls):
@@ -149,6 +161,17 @@ class IdentityTests(base.TestCase):
self.assertEqual(role_name, role['name'])
return role_name
+ def _create_dummy_implied_role(self, add_clean_up=True):
+ role_name = self._create_dummy_role(add_clean_up)
+ implied_role_name = self._create_dummy_role(add_clean_up)
+ self.openstack(
+ 'implied role create '
+ '--implied-role %(implied_role)s '
+ '%(role)s' % {'implied_role': implied_role_name,
+ 'role': role_name})
+
+ return implied_role_name, role_name
+
def _create_dummy_group(self, add_clean_up=True):
group_name = data_utils.rand_name('TestGroup')
description = data_utils.rand_name('description')
@@ -306,3 +329,81 @@ class IdentityTests(base.TestCase):
items = self.parse_show(raw_output)
self.assert_show_fields(items, self.SERVICE_PROVIDER_FIELDS)
return service_provider
+
+ def _create_dummy_registered_limit(self, add_clean_up=True):
+ service_name = self._create_dummy_service()
+ resource_name = data_utils.rand_name('resource_name')
+ params = {
+ 'service_name': service_name,
+ 'default_limit': 10,
+ 'resource_name': resource_name
+ }
+ raw_output = self.openstack(
+ 'registered limit create'
+ ' --service %(service_name)s'
+ ' --default-limit %(default_limit)s'
+ ' %(resource_name)s' % params,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ registered_limit_id = self._extract_value_from_items('id', items)
+
+ if add_clean_up:
+ self.addCleanup(
+ self.openstack,
+ 'registered limit delete %s' % registered_limit_id,
+ cloud=SYSTEM_CLOUD
+ )
+
+ self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
+ return registered_limit_id
+
+ def _extract_value_from_items(self, key, items):
+ for d in items:
+ for k, v in d.iteritems():
+ if k == key:
+ return v
+
+ def _create_dummy_limit(self, add_clean_up=True):
+ registered_limit_id = self._create_dummy_registered_limit()
+
+ raw_output = self.openstack(
+ 'registered limit show %s' % registered_limit_id,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ resource_name = self._extract_value_from_items('resource_name', items)
+ service_id = self._extract_value_from_items('service_id', items)
+ resource_limit = 15
+
+ project_name = self._create_dummy_project()
+ raw_output = self.openstack('project show %s' % project_name)
+ items = self.parse_show(raw_output)
+ project_id = self._extract_value_from_items('id', items)
+
+ params = {
+ 'project_id': project_id,
+ 'service_id': service_id,
+ 'resource_name': resource_name,
+ 'resource_limit': resource_limit
+ }
+
+ raw_output = self.openstack(
+ 'limit create'
+ ' --project %(project_id)s'
+ ' --service %(service_id)s'
+ ' --resource-limit %(resource_limit)s'
+ ' %(resource_name)s' % params,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ limit_id = self._extract_value_from_items('id', items)
+
+ if add_clean_up:
+ self.addCleanup(
+ self.openstack, 'limit delete %s' % limit_id,
+ cloud=SYSTEM_CLOUD
+ )
+
+ self.assert_show_fields(items, self.LIMIT_FIELDS)
+ return limit_id
diff --git a/openstackclient/tests/functional/identity/v3/test_limit.py b/openstackclient/tests/functional/identity/v3/test_limit.py
new file mode 100644
index 00000000..b03f0f28
--- /dev/null
+++ b/openstackclient/tests/functional/identity/v3/test_limit.py
@@ -0,0 +1,221 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from tempest.lib.common.utils import data_utils
+
+from openstackclient.tests.functional.identity.v3 import common
+
+SYSTEM_CLOUD = os.environ.get('OS_SYSTEM_CLOUD', 'devstack-system-admin')
+
+
+class LimitTestCase(common.IdentityTests):
+
+ def test_limit_create_with_service_name(self):
+ registered_limit_id = self._create_dummy_registered_limit()
+ raw_output = self.openstack(
+ 'registered limit show %s' % registered_limit_id,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ service_id = self._extract_value_from_items('service_id', items)
+ resource_name = self._extract_value_from_items('resource_name', items)
+
+ raw_output = self.openstack('service show %s' % service_id)
+ items = self.parse_show(raw_output)
+ service_name = self._extract_value_from_items('name', items)
+
+ project_name = self._create_dummy_project()
+ raw_output = self.openstack('project show %s' % project_name)
+ items = self.parse_show(raw_output)
+ project_id = self._extract_value_from_items('id', items)
+
+ params = {
+ 'project_id': project_id,
+ 'service_name': service_name,
+ 'resource_name': resource_name,
+ 'resource_limit': 15
+ }
+ raw_output = self.openstack(
+ 'limit create'
+ ' --project %(project_id)s'
+ ' --service %(service_name)s'
+ ' --resource-limit %(resource_limit)s'
+ ' %(resource_name)s' % params,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ limit_id = self._extract_value_from_items('id', items)
+ self.addCleanup(
+ self.openstack,
+ 'limit delete %s' % limit_id,
+ cloud=SYSTEM_CLOUD
+ )
+
+ self.assert_show_fields(items, self.LIMIT_FIELDS)
+
+ def test_limit_create_with_project_name(self):
+ registered_limit_id = self._create_dummy_registered_limit()
+ raw_output = self.openstack(
+ 'registered limit show %s' % registered_limit_id,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ service_id = self._extract_value_from_items('service_id', items)
+ resource_name = self._extract_value_from_items('resource_name', items)
+
+ raw_output = self.openstack('service show %s' % service_id)
+ items = self.parse_show(raw_output)
+ service_name = self._extract_value_from_items('name', items)
+
+ project_name = self._create_dummy_project()
+
+ params = {
+ 'project_name': project_name,
+ 'service_name': service_name,
+ 'resource_name': resource_name,
+ 'resource_limit': 15
+ }
+ raw_output = self.openstack(
+ 'limit create'
+ ' --project %(project_name)s'
+ ' --service %(service_name)s'
+ ' --resource-limit %(resource_limit)s'
+ ' %(resource_name)s' % params,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ limit_id = self._extract_value_from_items('id', items)
+ self.addCleanup(
+ self.openstack,
+ 'limit delete %s' % limit_id,
+ cloud=SYSTEM_CLOUD
+ )
+
+ self.assert_show_fields(items, self.LIMIT_FIELDS)
+ registered_limit_id = self._create_dummy_registered_limit()
+
+ def test_limit_create_with_service_id(self):
+ self._create_dummy_limit()
+
+ def test_limit_create_with_project_id(self):
+ self._create_dummy_limit()
+
+ def test_limit_create_with_options(self):
+ registered_limit_id = self._create_dummy_registered_limit()
+ region_id = self._create_dummy_region()
+
+ params = {
+ 'region_id': region_id,
+ 'registered_limit_id': registered_limit_id
+ }
+
+ raw_output = self.openstack(
+ 'registered limit set'
+ ' %(registered_limit_id)s'
+ ' --region %(region_id)s' % params,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ service_id = self._extract_value_from_items('service_id', items)
+ resource_name = self._extract_value_from_items('resource_name', items)
+
+ project_name = self._create_dummy_project()
+ raw_output = self.openstack('project show %s' % project_name)
+ items = self.parse_show(raw_output)
+ project_id = self._extract_value_from_items('id', items)
+ description = data_utils.arbitrary_string()
+
+ params = {
+ 'project_id': project_id,
+ 'service_id': service_id,
+ 'resource_name': resource_name,
+ 'resource_limit': 15,
+ 'region_id': region_id,
+ 'description': description
+ }
+ raw_output = self.openstack(
+ 'limit create'
+ ' --project %(project_id)s'
+ ' --service %(service_id)s'
+ ' --resource-limit %(resource_limit)s'
+ ' --region %(region_id)s'
+ ' --description %(description)s'
+ ' %(resource_name)s' % params,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ limit_id = self._extract_value_from_items('id', items)
+ self.addCleanup(
+ self.openstack,
+ 'limit delete %s' % limit_id,
+ cloud=SYSTEM_CLOUD
+ )
+
+ self.assert_show_fields(items, self.LIMIT_FIELDS)
+
+ def test_limit_show(self):
+ limit_id = self._create_dummy_limit()
+ raw_output = self.openstack(
+ 'limit show %s' % limit_id,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ self.assert_show_fields(items, self.LIMIT_FIELDS)
+
+ def test_limit_set_description(self):
+ limit_id = self._create_dummy_limit()
+
+ params = {
+ 'description': data_utils.arbitrary_string(),
+ 'limit_id': limit_id
+ }
+
+ raw_output = self.openstack(
+ 'limit set'
+ ' --description %(description)s'
+ ' %(limit_id)s' % params,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ self.assert_show_fields(items, self.LIMIT_FIELDS)
+
+ def test_limit_set_resource_limit(self):
+ limit_id = self._create_dummy_limit()
+
+ params = {
+ 'resource_limit': 5,
+ 'limit_id': limit_id
+ }
+
+ raw_output = self.openstack(
+ 'limit set'
+ ' --resource-limit %(resource_limit)s'
+ ' %(limit_id)s' % params,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ self.assert_show_fields(items, self.LIMIT_FIELDS)
+
+ def test_limit_list(self):
+ self._create_dummy_limit()
+ raw_output = self.openstack('limit list', cloud=SYSTEM_CLOUD)
+ items = self.parse_listing(raw_output)
+ self.assert_table_structure(items, self.LIMIT_LIST_HEADERS)
+
+ def test_limit_delete(self):
+ limit_id = self._create_dummy_limit(add_clean_up=False)
+ raw_output = self.openstack(
+ 'limit delete %s' % limit_id,
+ cloud=SYSTEM_CLOUD)
+ self.assertEqual(0, len(raw_output))
diff --git a/openstackclient/tests/functional/identity/v3/test_registered_limit.py b/openstackclient/tests/functional/identity/v3/test_registered_limit.py
new file mode 100644
index 00000000..80f51ad9
--- /dev/null
+++ b/openstackclient/tests/functional/identity/v3/test_registered_limit.py
@@ -0,0 +1,198 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from tempest.lib.common.utils import data_utils
+
+from openstackclient.tests.functional.identity.v3 import common
+
+SYSTEM_CLOUD = os.environ.get('OS_SYSTEM_CLOUD', 'devstack-system-admin')
+
+
+class RegisteredLimitTestCase(common.IdentityTests):
+
+ def test_registered_limit_create_with_service_name(self):
+ self._create_dummy_registered_limit()
+
+ def test_registered_limit_create_with_service_id(self):
+ service_name = self._create_dummy_service()
+ raw_output = self.openstack(
+ 'service show'
+ ' %(service_name)s' % {'service_name': service_name}
+ )
+ service_items = self.parse_show(raw_output)
+ service_id = self._extract_value_from_items('id', service_items)
+
+ raw_output = self.openstack(
+ 'registered limit create'
+ ' --service %(service_id)s'
+ ' --default-limit %(default_limit)s'
+ ' %(resource_name)s' % {
+ 'service_id': service_id,
+ 'default_limit': 10,
+ 'resource_name': 'cores'
+ },
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ registered_limit_id = self._extract_value_from_items('id', items)
+ self.addCleanup(
+ self.openstack,
+ 'registered limit delete'
+ ' %(registered_limit_id)s' % {
+ 'registered_limit_id': registered_limit_id
+ },
+ cloud=SYSTEM_CLOUD
+ )
+
+ self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
+
+ def test_registered_limit_create_with_options(self):
+ service_name = self._create_dummy_service()
+ region_id = self._create_dummy_region()
+ params = {
+ 'service_name': service_name,
+ 'resource_name': 'cores',
+ 'default_limit': 10,
+ 'description': 'default limit for cores',
+ 'region_id': region_id
+ }
+
+ raw_output = self.openstack(
+ 'registered limit create'
+ ' --description \'%(description)s\''
+ ' --region %(region_id)s'
+ ' --service %(service_name)s'
+ ' --default-limit %(default_limit)s'
+ ' %(resource_name)s' % params,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ registered_limit_id = self._extract_value_from_items('id', items)
+ self.addCleanup(
+ self.openstack,
+ 'registered limit delete %(registered_limit_id)s' % {
+ 'registered_limit_id': registered_limit_id
+ },
+ cloud=SYSTEM_CLOUD
+ )
+
+ self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
+
+ def test_registered_limit_show(self):
+ registered_limit_id = self._create_dummy_registered_limit()
+ raw_output = self.openstack(
+ 'registered limit show %(registered_limit_id)s' % {
+ 'registered_limit_id': registered_limit_id
+ }
+ )
+ items = self.parse_show(raw_output)
+ self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
+
+ def test_registered_limit_set_region_id(self):
+ region_id = self._create_dummy_region()
+ registered_limit_id = self._create_dummy_registered_limit()
+
+ params = {
+ 'registered_limit_id': registered_limit_id,
+ 'region_id': region_id
+ }
+ raw_output = self.openstack(
+ 'registered limit set'
+ ' %(registered_limit_id)s'
+ ' --region %(region_id)s' % params,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
+
+ def test_registered_limit_set_description(self):
+ registered_limit_id = self._create_dummy_registered_limit()
+ params = {
+ 'registered_limit_id': registered_limit_id,
+ 'description': 'updated description'
+ }
+ raw_output = self.openstack(
+ 'registered limit set'
+ ' %(registered_limit_id)s'
+ ' --description \'%(description)s\'' % params,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
+
+ def test_registered_limit_set_service(self):
+ registered_limit_id = self._create_dummy_registered_limit()
+ service_name = self._create_dummy_service()
+ params = {
+ 'registered_limit_id': registered_limit_id,
+ 'service': service_name
+ }
+ raw_output = self.openstack(
+ 'registered limit set'
+ ' %(registered_limit_id)s'
+ ' --service %(service)s' % params,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
+
+ def test_registered_limit_set_default_limit(self):
+ registered_limit_id = self._create_dummy_registered_limit()
+ params = {
+ 'registered_limit_id': registered_limit_id,
+ 'default_limit': 20
+ }
+ raw_output = self.openstack(
+ 'registered limit set'
+ ' %(registered_limit_id)s'
+ ' --default-limit %(default_limit)s' % params,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
+
+ def test_registered_limit_set_resource_name(self):
+ registered_limit_id = self._create_dummy_registered_limit()
+ resource_name = data_utils.rand_name('resource_name')
+ params = {
+ 'registered_limit_id': registered_limit_id,
+ 'resource_name': resource_name
+ }
+ raw_output = self.openstack(
+ 'registered limit set'
+ ' %(registered_limit_id)s'
+ ' --resource-name %(resource_name)s' % params,
+ cloud=SYSTEM_CLOUD
+ )
+ items = self.parse_show(raw_output)
+ self.assert_show_fields(items, self.REGISTERED_LIMIT_FIELDS)
+
+ def test_registered_limit_list(self):
+ self._create_dummy_registered_limit()
+ raw_output = self.openstack('registered limit list')
+ items = self.parse_listing(raw_output)
+ self.assert_table_structure(items, self.REGISTERED_LIMIT_LIST_HEADERS)
+
+ def test_registered_limit_delete(self):
+ registered_limit_id = self._create_dummy_registered_limit(
+ add_clean_up=False
+ )
+ raw_output = self.openstack(
+ 'registered limit delete'
+ ' %(registered_limit_id)s' % {
+ 'registered_limit_id': registered_limit_id
+ },
+ cloud=SYSTEM_CLOUD
+ )
+ self.assertEqual(0, len(raw_output))
diff --git a/openstackclient/tests/functional/identity/v3/test_role.py b/openstackclient/tests/functional/identity/v3/test_role.py
index ab8af9c0..fb9e0614 100644
--- a/openstackclient/tests/functional/identity/v3/test_role.py
+++ b/openstackclient/tests/functional/identity/v3/test_role.py
@@ -143,3 +143,28 @@ class RoleTests(common.IdentityTests):
'role': role_name})
self.assertEqual(0, len(add_raw_output))
self.assertEqual(0, len(remove_raw_output))
+
+ def test_implied_role_list(self):
+ self._create_dummy_implied_role()
+ raw_output = self.openstack('implied role list')
+ items = self.parse_listing(raw_output)
+ self.assert_table_structure(items, self.IMPLIED_ROLE_LIST_HEADERS)
+ self.assertEqual(3, len(items))
+
+ def test_implied_role_create(self):
+ role_name = self._create_dummy_role()
+ implied_role_name = self._create_dummy_role()
+ self.openstack(
+ 'implied role create '
+ '--implied-role %(implied_role)s '
+ '%(role)s' % {'implied_role': implied_role_name,
+ 'role': role_name})
+
+ def test_implied_role_delete(self):
+ implied_role_name, role_name = self._create_dummy_implied_role()
+ raw_output = self.openstack(
+ 'implied role delete '
+ '--implied-role %(implied_role)s '
+ '%(role)s' % {'implied_role': implied_role_name,
+ 'role': role_name})
+ self.assertEqual(0, len(raw_output))
diff --git a/openstackclient/tests/functional/image/base.py b/openstackclient/tests/functional/image/base.py
new file mode 100644
index 00000000..4b2ab64b
--- /dev/null
+++ b/openstackclient/tests/functional/image/base.py
@@ -0,0 +1,24 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from openstackclient.tests.functional import base
+
+
+class BaseImageTests(base.TestCase):
+ """Functional tests for Image commands"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(BaseImageTests, cls).setUpClass()
+ # TODO(dtroyer): maybe do image API discovery here to determine
+ # what is available, it isn't in the service catalog
+ cls.haz_v1_api = False
diff --git a/openstackclient/tests/functional/image/v1/test_image.py b/openstackclient/tests/functional/image/v1/test_image.py
index fa073f99..30490bf5 100644
--- a/openstackclient/tests/functional/image/v1/test_image.py
+++ b/openstackclient/tests/functional/image/v1/test_image.py
@@ -15,50 +15,47 @@ import uuid
import fixtures
-from openstackclient.tests.functional import base
+from openstackclient.tests.functional.image import base
-class ImageTests(base.TestCase):
- """Functional tests for image. """
+class ImageTests(base.BaseImageTests):
+ """Functional tests for Image commands"""
- NAME = uuid.uuid4().hex
- OTHER_NAME = uuid.uuid4().hex
+ def setUp(self):
+ super(ImageTests, self).setUp()
+ if not self.haz_v1_api:
+ self.skipTest('No Image v1 API present')
- @classmethod
- def setUpClass(cls):
- super(ImageTests, cls).setUpClass()
- json_output = json.loads(cls.openstack(
+ self.name = uuid.uuid4().hex
+ json_output = json.loads(self.openstack(
'--os-image-api-version 1 '
'image create -f json ' +
- cls.NAME
+ self.name
))
- cls.image_id = json_output["id"]
- cls.assertOutput(cls.NAME, json_output['name'])
+ self.image_id = json_output["id"]
+ self.assertOutput(self.name, json_output['name'])
+
+ ver_fixture = fixtures.EnvironmentVariable(
+ 'OS_IMAGE_API_VERSION', '1'
+ )
+ self.useFixture(ver_fixture)
- @classmethod
- def tearDownClass(cls):
+ def tearDown(self):
try:
- cls.openstack(
+ self.openstack(
'--os-image-api-version 1 '
'image delete ' +
- cls.image_id
+ self.image_id
)
finally:
- super(ImageTests, cls).tearDownClass()
-
- def setUp(self):
- super(ImageTests, self).setUp()
- ver_fixture = fixtures.EnvironmentVariable(
- 'OS_IMAGE_API_VERSION', '1'
- )
- self.useFixture(ver_fixture)
+ super(ImageTests, self).tearDown()
def test_image_list(self):
json_output = json.loads(self.openstack(
'image list -f json '
))
self.assertIn(
- self.NAME,
+ self.name,
[img['Name'] for img in json_output]
)
@@ -72,11 +69,11 @@ class ImageTests(base.TestCase):
'--min-ram 5 ' +
'--disk-format qcow2 ' +
'--public ' +
- self.NAME
+ self.name
)
json_output = json.loads(self.openstack(
'image show -f json ' +
- self.NAME
+ self.name
))
self.assertEqual(
4,
@@ -100,11 +97,11 @@ class ImageTests(base.TestCase):
'--property a=b ' +
'--property c=d ' +
'--public ' +
- self.NAME
+ self.name
)
json_output = json.loads(self.openstack(
'image show -f json ' +
- self.NAME
+ self.name
))
self.assertEqual(
"a='b', c='d'",
diff --git a/openstackclient/tests/functional/image/v2/test_image.py b/openstackclient/tests/functional/image/v2/test_image.py
index 278ba5b9..3185c3bd 100644
--- a/openstackclient/tests/functional/image/v2/test_image.py
+++ b/openstackclient/tests/functional/image/v2/test_image.py
@@ -16,59 +16,55 @@ import uuid
import fixtures
# from glanceclient import exc as image_exceptions
-from openstackclient.tests.functional import base
+from openstackclient.tests.functional.image import base
-class ImageTests(base.TestCase):
- """Functional tests for image. """
+class ImageTests(base.BaseImageTests):
+ """Functional tests for Image commands"""
- NAME = uuid.uuid4().hex
- OTHER_NAME = uuid.uuid4().hex
+ def setUp(self):
+ super(ImageTests, self).setUp()
- @classmethod
- def setUpClass(cls):
- super(ImageTests, cls).setUpClass()
- json_output = json.loads(cls.openstack(
+ self.name = uuid.uuid4().hex
+ self.image_tag = 'my_tag'
+ json_output = json.loads(self.openstack(
'--os-image-api-version 2 '
- 'image create -f json ' +
- cls.NAME
+ 'image create -f json --tag {tag} {name}'.format(
+ tag=self.image_tag, name=self.name)
))
- cls.image_id = json_output["id"]
- cls.assertOutput(cls.NAME, json_output['name'])
+ self.image_id = json_output["id"]
+ self.assertOutput(self.name, json_output['name'])
+
+ ver_fixture = fixtures.EnvironmentVariable(
+ 'OS_IMAGE_API_VERSION', '2'
+ )
+ self.useFixture(ver_fixture)
- @classmethod
- def tearDownClass(cls):
+ def tearDown(self):
try:
- cls.openstack(
+ self.openstack(
'--os-image-api-version 2 '
'image delete ' +
- cls.image_id
+ self.image_id
)
finally:
- super(ImageTests, cls).tearDownClass()
-
- def setUp(self):
- super(ImageTests, self).setUp()
- ver_fixture = fixtures.EnvironmentVariable(
- 'OS_IMAGE_API_VERSION', '2'
- )
- self.useFixture(ver_fixture)
+ super(ImageTests, self).tearDown()
def test_image_list(self):
json_output = json.loads(self.openstack(
'image list -f json '
))
self.assertIn(
- self.NAME,
+ self.name,
[img['Name'] for img in json_output]
)
def test_image_list_with_name_filter(self):
json_output = json.loads(self.openstack(
- 'image list --name ' + self.NAME + ' -f json'
+ 'image list --name ' + self.name + ' -f json'
))
self.assertIn(
- self.NAME,
+ self.name,
[img['Name'] for img in json_output]
)
@@ -81,6 +77,16 @@ class ImageTests(base.TestCase):
[img['Status'] for img in json_output]
)
+ def test_image_list_with_tag_filter(self):
+ json_output = json.loads(self.openstack(
+ 'image list --tag ' + self.image_tag + ' --long -f json'
+ ))
+ for taglist in [img['Tags'].split(', ') for img in json_output]:
+ self.assertIn(
+ self.image_tag,
+ taglist
+ )
+
def test_image_attributes(self):
"""Test set, unset, show on attributes, tags and properties"""
@@ -90,11 +96,11 @@ class ImageTests(base.TestCase):
'--min-disk 4 ' +
'--min-ram 5 ' +
'--public ' +
- self.NAME
+ self.name
)
json_output = json.loads(self.openstack(
'image show -f json ' +
- self.NAME
+ self.name
))
self.assertEqual(
4,
@@ -115,59 +121,63 @@ class ImageTests(base.TestCase):
'--property a=b ' +
'--property c=d ' +
'--public ' +
- self.NAME
+ self.name
)
json_output = json.loads(self.openstack(
'image show -f json ' +
- self.NAME
+ self.name
))
- self.assertEqual(
- "a='b', c='d'",
- json_output["properties"],
- )
+ # NOTE(dtroyer): Don't do a full-string compare so we are tolerant of
+ # new artributes in the returned data
+ self.assertIn("a='b'", json_output["properties"])
+ self.assertIn("c='d'", json_output["properties"])
self.openstack(
'image unset ' +
'--property a ' +
'--property c ' +
- self.NAME
+ self.name
)
json_output = json.loads(self.openstack(
'image show -f json ' +
- self.NAME
+ self.name
))
- self.assertNotIn(
- 'properties',
- json_output,
- )
+ # NOTE(dtroyer): Don't do a full-string compare so we are tolerant of
+ # new artributes in the returned data
+ self.assertNotIn("a='b'", json_output["properties"])
+ self.assertNotIn("c='d'", json_output["properties"])
# Test tags
+ self.assertNotIn(
+ '01',
+ json_output["tags"].split(', ')
+ )
self.openstack(
'image set ' +
'--tag 01 ' +
- self.NAME
+ self.name
)
json_output = json.loads(self.openstack(
'image show -f json ' +
- self.NAME
+ self.name
))
- self.assertEqual(
+ self.assertIn(
'01',
- json_output["tags"],
+ json_output["tags"].split(', ')
)
self.openstack(
'image unset ' +
'--tag 01 ' +
- self.NAME
+ self.name
)
json_output = json.loads(self.openstack(
'image show -f json ' +
- self.NAME
+ self.name
))
- self.assertEqual(
- '',
- json_output["tags"],
+ self.assertNotIn(
+ '01',
+ json_output["tags"].split(', ')
)
def test_image_set_rename(self):
@@ -207,7 +217,7 @@ class ImageTests(base.TestCase):
json_output = json.loads(self.openstack(
'image show -f json ' +
- self.NAME
+ self.name
))
# NOTE(dtroyer): Until OSC supports --shared flags in create and set
# we can not properly test membership. Sometimes the
@@ -215,47 +225,47 @@ class ImageTests(base.TestCase):
if json_output["visibility"] == 'shared':
self.openstack(
'image add project ' +
- self.NAME + ' ' +
+ self.name + ' ' +
my_project_id
)
# self.addCleanup(
# self.openstack,
# 'image remove project ' +
- # self.NAME + ' ' +
+ # self.name + ' ' +
# my_project_id
# )
self.openstack(
'image set ' +
'--accept ' +
- self.NAME
+ self.name
)
json_output = json.loads(self.openstack(
'image list -f json ' +
'--shared'
))
self.assertIn(
- self.NAME,
+ self.name,
[img['Name'] for img in json_output]
)
self.openstack(
'image set ' +
'--reject ' +
- self.NAME
+ self.name
)
json_output = json.loads(self.openstack(
'image list -f json ' +
'--shared'
))
# self.assertNotIn(
- # self.NAME,
+ # self.name,
# [img['Name'] for img in json_output]
# )
self.openstack(
'image remove project ' +
- self.NAME + ' ' +
+ self.name + ' ' +
my_project_id
)
@@ -265,11 +275,11 @@ class ImageTests(base.TestCase):
# image_exceptions.HTTPForbidden,
# self.openstack,
# 'image add project ' +
- # self.NAME + ' ' +
+ # self.name + ' ' +
# my_project_id
# )
# self.openstack(
# 'image set ' +
# '--share ' +
- # self.NAME
+ # self.name
# )
diff --git a/openstackclient/tests/functional/network/v2/test_address_scope.py b/openstackclient/tests/functional/network/v2/test_address_scope.py
index ebd2ba86..8a99ec5e 100644
--- a/openstackclient/tests/functional/network/v2/test_address_scope.py
+++ b/openstackclient/tests/functional/network/v2/test_address_scope.py
@@ -42,10 +42,7 @@ class AddressScopeTests(common.NetworkTests):
cmd_output['name'],
)
# Check the default values
- self.assertEqual(
- False,
- cmd_output['shared'],
- )
+ self.assertFalse(cmd_output['shared'])
name2 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
@@ -80,10 +77,7 @@ class AddressScopeTests(common.NetworkTests):
4,
cmd_output['ip_version'],
)
- self.assertEqual(
- True,
- cmd_output['shared'],
- )
+ self.assertTrue(cmd_output['shared'])
name2 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
@@ -101,10 +95,7 @@ class AddressScopeTests(common.NetworkTests):
6,
cmd_output['ip_version'],
)
- self.assertEqual(
- False,
- cmd_output['shared'],
- )
+ self.assertFalse(cmd_output['shared'])
# Test list
cmd_output = json.loads(self.openstack(
@@ -149,10 +140,7 @@ class AddressScopeTests(common.NetworkTests):
4,
cmd_output['ip_version'],
)
- self.assertEqual(
- False,
- cmd_output['shared'],
- )
+ self.assertFalse(cmd_output['shared'])
raw_output = self.openstack(
'address scope set ' +
@@ -174,7 +162,4 @@ class AddressScopeTests(common.NetworkTests):
4,
cmd_output['ip_version'],
)
- self.assertEqual(
- True,
- cmd_output['shared'],
- )
+ self.assertTrue(cmd_output['shared'])
diff --git a/openstackclient/tests/functional/network/v2/test_network.py b/openstackclient/tests/functional/network/v2/test_network.py
index 9cef135f..1a744969 100644
--- a/openstackclient/tests/functional/network/v2/test_network.py
+++ b/openstackclient/tests/functional/network/v2/test_network.py
@@ -70,8 +70,7 @@ class NetworkTests(common.NetworkTagTests):
'1.2.4.0/28',
cmd_output["cidr"],
)
- self.assertEqual(
- True,
+ self.assertTrue(
cmd_output["share_address"],
)
@@ -124,8 +123,7 @@ class NetworkTests(common.NetworkTagTests):
'UP',
cmd_output["admin_state_up"],
)
- self.assertEqual(
- False,
+ self.assertFalse(
cmd_output["shared"],
)
self.assertEqual(
@@ -236,21 +234,14 @@ class NetworkTests(common.NetworkTagTests):
'UP',
cmd_output["admin_state_up"],
)
- self.assertEqual(
- False,
- cmd_output["shared"],
- )
+ self.assertFalse(cmd_output["shared"])
self.assertEqual(
'Internal',
cmd_output["router:external"],
)
- self.assertEqual(
- False,
- cmd_output["is_default"],
- )
- self.assertEqual(
- True,
- cmd_output["port_security_enabled"],
+ self.assertFalse(cmd_output["is_default"])
+ self.assertTrue(
+ cmd_output["port_security_enabled"]
)
else:
self.assertEqual(
@@ -278,27 +269,15 @@ class NetworkTests(common.NetworkTagTests):
'DOWN',
cmd_output["admin_state_up"],
)
- self.assertEqual(
- True,
- cmd_output["shared"],
- )
- self.assertEqual(
- False,
- cmd_output["is_default"],
- )
- self.assertEqual(
- True,
- cmd_output["port_security_enabled"],
- )
+ self.assertTrue(cmd_output["shared"])
+ self.assertFalse(cmd_output["is_default"])
+ self.assertTrue(cmd_output["port_security_enabled"])
else:
self.assertEqual(
'4.5.6.0/28',
cmd_output["cidr"],
)
- self.assertEqual(
- True,
- cmd_output["share_address"],
- )
+ self.assertTrue(cmd_output["share_address"])
# Test list
cmd_output = json.loads(self.openstack(
@@ -422,22 +401,15 @@ class NetworkTests(common.NetworkTagTests):
'UP',
cmd_output["admin_state_up"],
)
- self.assertEqual(
- False,
- cmd_output["shared"],
- )
+ self.assertFalse(cmd_output["shared"])
self.assertEqual(
'Internal',
cmd_output["router:external"],
)
- self.assertEqual(
- False,
- cmd_output["is_default"],
- )
- self.assertEqual(
- True,
- cmd_output["port_security_enabled"],
+ self.assertFalse(cmd_output["is_default"])
+ self.assertTrue(
+ cmd_output["port_security_enabled"]
)
raw_output = self.openstack(
@@ -463,19 +435,12 @@ class NetworkTests(common.NetworkTagTests):
'DOWN',
cmd_output["admin_state_up"],
)
- self.assertEqual(
- True,
- cmd_output["shared"],
- )
+ self.assertTrue(cmd_output["shared"])
self.assertEqual(
'External',
cmd_output["router:external"],
)
- self.assertEqual(
- False,
- cmd_output["is_default"],
- )
- self.assertEqual(
- False,
- cmd_output["port_security_enabled"],
+ self.assertFalse(cmd_output["is_default"])
+ self.assertFalse(
+ cmd_output["port_security_enabled"]
)
diff --git a/openstackclient/tests/functional/network/v2/test_network_flavor.py b/openstackclient/tests/functional/network/v2/test_network_flavor.py
index ba3de2cd..cf68a096 100644
--- a/openstackclient/tests/functional/network/v2/test_network_flavor.py
+++ b/openstackclient/tests/functional/network/v2/test_network_flavor.py
@@ -87,10 +87,7 @@ class NetworkFlavorTests(common.NetworkTests):
name1,
cmd_output['name'],
)
- self.assertEqual(
- True,
- cmd_output['enabled'],
- )
+ self.assertTrue(cmd_output['enabled'])
self.assertEqual(
'testdescription',
cmd_output['description'],
@@ -105,10 +102,7 @@ class NetworkFlavorTests(common.NetworkTests):
name2,
cmd_output['name'],
)
- self.assertEqual(
- False,
- cmd_output['enabled'],
- )
+ self.assertFalse(cmd_output['enabled'])
self.assertEqual(
'testdescription1',
cmd_output['description'],
diff --git a/openstackclient/tests/functional/network/v2/test_network_flavor_profile.py b/openstackclient/tests/functional/network/v2/test_network_flavor_profile.py
index 2207c847..5b5ec926 100644
--- a/openstackclient/tests/functional/network/v2/test_network_flavor_profile.py
+++ b/openstackclient/tests/functional/network/v2/test_network_flavor_profile.py
@@ -35,10 +35,7 @@ class NetworkFlavorProfileTests(common.NetworkTests):
))
ID = json_output.get('id')
self.assertIsNotNone(ID)
- self.assertEqual(
- True,
- json_output.get('enabled'),
- )
+ self.assertTrue(json_output.get('enabled'))
self.assertEqual(
'fakedescription',
json_output.get('description'),
@@ -61,10 +58,7 @@ class NetworkFlavorProfileTests(common.NetworkTests):
))
ID1 = json_output.get('id')
self.assertIsNotNone(ID1)
- self.assertEqual(
- True,
- json_output.get('enabled'),
- )
+ self.assertTrue(json_output.get('enabled'))
self.assertEqual(
'fakedescription',
json_output.get('description'),
@@ -82,10 +76,7 @@ class NetworkFlavorProfileTests(common.NetworkTests):
))
ID2 = json_output.get('id')
self.assertIsNotNone(ID2)
- self.assertEqual(
- False,
- json_output.get('enabled'),
- )
+ self.assertFalse(json_output.get('enabled'))
self.assertEqual(
'fakedescription',
json_output.get('description'),
@@ -120,10 +111,7 @@ class NetworkFlavorProfileTests(common.NetworkTests):
))
ID = json_output_1.get('id')
self.assertIsNotNone(ID)
- self.assertEqual(
- True,
- json_output_1.get('enabled'),
- )
+ self.assertTrue(json_output_1.get('enabled'))
self.assertEqual(
'fakedescription',
json_output_1.get('description'),
@@ -138,10 +126,7 @@ class NetworkFlavorProfileTests(common.NetworkTests):
json_output = json.loads(self.openstack(
'network flavor profile show -f json ' + ID
))
- self.assertEqual(
- False,
- json_output.get('enabled'),
- )
+ self.assertFalse(json_output.get('enabled'))
self.assertEqual(
'fakedescription',
json_output.get('description'),
@@ -171,10 +156,7 @@ class NetworkFlavorProfileTests(common.NetworkTests):
ID,
json_output["id"],
)
- self.assertEqual(
- True,
- json_output["enabled"],
- )
+ self.assertTrue(json_output["enabled"])
self.assertEqual(
'fakedescription',
json_output["description"],
diff --git a/openstackclient/tests/functional/network/v2/test_network_meter.py b/openstackclient/tests/functional/network/v2/test_network_meter.py
index 7f6da28d..0a8b89ca 100644
--- a/openstackclient/tests/functional/network/v2/test_network_meter.py
+++ b/openstackclient/tests/functional/network/v2/test_network_meter.py
@@ -48,10 +48,7 @@ class TestMeter(common.NetworkTests):
json_output.get('name'),
)
# Check if default shared values
- self.assertEqual(
- False,
- json_output.get('shared'),
- )
+ self.assertFalse(json_output.get('shared'))
self.assertEqual(
'fakedescription',
json_output.get('description'),
@@ -67,10 +64,7 @@ class TestMeter(common.NetworkTests):
json_output_2.get('name'),
)
# Check if default shared values
- self.assertEqual(
- False,
- json_output_2.get('shared'),
- )
+ self.assertFalse(json_output_2.get('shared'))
self.assertEqual(
'fakedescription',
json_output_2.get('description'),
@@ -99,10 +93,7 @@ class TestMeter(common.NetworkTests):
'Test1',
json_output.get('description'),
)
- self.assertEqual(
- True,
- json_output.get('shared'),
- )
+ self.assertTrue(json_output.get('shared'))
name2 = uuid.uuid4().hex
json_output_2 = json.loads(self.openstack(
@@ -117,8 +108,7 @@ class TestMeter(common.NetworkTests):
'Test2',
json_output_2.get('description'),
)
- self.assertEqual(
- False,
+ self.assertFalse(
json_output_2.get('shared'),
)
@@ -143,10 +133,7 @@ class TestMeter(common.NetworkTests):
json_output = json.loads(self.openstack(
'network meter show -f json ' + meter_id
))
- self.assertEqual(
- False,
- json_output.get('shared'),
- )
+ self.assertFalse(json_output.get('shared'))
self.assertEqual(
'fakedescription',
json_output.get('description'),
@@ -164,10 +151,7 @@ class TestMeter(common.NetworkTests):
meter_id,
json_output.get('id'),
)
- self.assertEqual(
- False,
- json_output.get('shared'),
- )
+ self.assertFalse(json_output.get('shared'))
self.assertEqual(
'fakedescription',
json_output.get('description'),
diff --git a/openstackclient/tests/functional/network/v2/test_router.py b/openstackclient/tests/functional/network/v2/test_router.py
index 95c5a96f..9d5beff0 100644
--- a/openstackclient/tests/functional/network/v2/test_router.py
+++ b/openstackclient/tests/functional/network/v2/test_router.py
@@ -247,10 +247,7 @@ class RouterTests(common.NetworkTagTests):
'router show -f json ' +
new_name
))
- self.assertEqual(
- True,
- cmd_output["distributed"],
- )
+ self.assertTrue(cmd_output["distributed"])
self.assertIsNotNone(cmd_output["external_gateway_info"])
# Test unset
diff --git a/openstackclient/tests/functional/run_stestr.sh b/openstackclient/tests/functional/run_stestr.sh
deleted file mode 100755
index 229b42b6..00000000
--- a/openstackclient/tests/functional/run_stestr.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-
-# This is a script that runs ostestr with the openrc OS_ variables sourced.
-# Do not run this script unless you know what you're doing.
-# For more information refer to:
-# https://docs.openstack.org/python-openstackclient/latest/
-
-# Source environment variables to kick things off
-if [ -f ~stack/devstack/openrc ] ; then
- source ~stack/devstack/openrc admin admin
-fi
-
-echo 'Running tests with:'
-env | grep OS
-
-stestr run $*
diff --git a/openstackclient/tests/functional/volume/v1/test_snapshot.py b/openstackclient/tests/functional/volume/v1/test_snapshot.py
index c60472c5..083cd1b0 100644
--- a/openstackclient/tests/functional/volume/v1/test_snapshot.py
+++ b/openstackclient/tests/functional/volume/v1/test_snapshot.py
@@ -42,7 +42,7 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
finally:
super(VolumeSnapshotTests, cls).tearDownClass()
- def test_volume_snapshot__delete(self):
+ def test_volume_snapshot_delete(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
diff --git a/openstackclient/tests/functional/volume/v1/test_transfer_request.py b/openstackclient/tests/functional/volume/v1/test_transfer_request.py
index 73191fc9..0399e6cc 100644
--- a/openstackclient/tests/functional/volume/v1/test_transfer_request.py
+++ b/openstackclient/tests/functional/volume/v1/test_transfer_request.py
@@ -29,20 +29,13 @@ class TransferRequestTests(common.BaseVolumeTests):
'volume create -f json --size 1 ' + cls.VOLUME_NAME))
cls.assertOutput(cls.VOLUME_NAME, cmd_output['name'])
- cmd_output = json.loads(cls.openstack(
- 'volume transfer request create -f json ' +
- cls.VOLUME_NAME +
- ' --name ' + cls.NAME))
- cls.assertOutput(cls.NAME, cmd_output['name'])
+ cls.wait_for_status("volume", cls.VOLUME_NAME, "available")
@classmethod
def tearDownClass(cls):
try:
- raw_output_transfer = cls.openstack(
- 'volume transfer request delete ' + cls.NAME)
raw_output_volume = cls.openstack(
'volume delete ' + cls.VOLUME_NAME)
- cls.assertOutput('', raw_output_transfer)
cls.assertOutput('', raw_output_volume)
finally:
super(TransferRequestTests, cls).tearDownClass()
@@ -79,12 +72,28 @@ class TransferRequestTests(common.BaseVolumeTests):
'volume delete ' + volume_name)
self.assertEqual('', raw_output)
- def test_volume_transfer_request_list(self):
+ def test_volume_transfer_request_list_show(self):
+ name = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
- 'volume transfer request list -f json'))
- self.assertIn(self.NAME, [req['Name'] for req in cmd_output])
+ 'volume transfer request create -f json ' +
+ ' --name ' + name + ' ' +
+ self.VOLUME_NAME
+ ))
+ self.addCleanup(
+ self.openstack,
+ 'volume transfer request delete ' + name
+ )
+ self.assertOutput(name, cmd_output['name'])
+ auth_key = cmd_output['auth_key']
+ self.assertTrue(auth_key)
- def test_volume_transfer_request_show(self):
cmd_output = json.loads(self.openstack(
- 'volume transfer request show -f json ' + self.NAME))
- self.assertEqual(self.NAME, cmd_output['name'])
+ 'volume transfer request list -f json'
+ ))
+ self.assertIn(name, [req['Name'] for req in cmd_output])
+
+ cmd_output = json.loads(self.openstack(
+ 'volume transfer request show -f json ' +
+ name
+ ))
+ self.assertEqual(name, cmd_output['name'])
diff --git a/openstackclient/tests/functional/volume/v1/test_volume_type.py b/openstackclient/tests/functional/volume/v1/test_volume_type.py
index c5886a69..eb9d7f64 100644
--- a/openstackclient/tests/functional/volume/v1/test_volume_type.py
+++ b/openstackclient/tests/functional/volume/v1/test_volume_type.py
@@ -20,62 +20,92 @@ from openstackclient.tests.functional.volume.v1 import common
class VolumeTypeTests(common.BaseVolumeTests):
"""Functional tests for volume type. """
- NAME = uuid.uuid4().hex
-
- @classmethod
- def setUpClass(cls):
- super(VolumeTypeTests, cls).setUpClass()
- cmd_output = json.loads(cls.openstack(
- 'volume type create -f json %s' % cls.NAME))
- cls.assertOutput(cls.NAME, cmd_output['name'])
-
- @classmethod
- def tearDownClass(cls):
- try:
- raw_output = cls.openstack('volume type delete %s' % cls.NAME)
- cls.assertOutput('', raw_output)
- finally:
- super(VolumeTypeTests, cls).tearDownClass()
-
- def test_volume_type_list(self):
+ def test_volume_type_create_list(self):
+ name = uuid.uuid4().hex
+ cmd_output = json.loads(self.openstack(
+ 'volume type create -f json --private ' +
+ name,
+ ))
+ self.addCleanup(
+ self.openstack,
+ 'volume type delete ' +
+ name,
+ )
+ self.assertEqual(name, cmd_output['name'])
+
+ cmd_output = json.loads(self.openstack(
+ 'volume type show -f json %s' % name
+ ))
+ self.assertEqual(self.NAME, cmd_output['name'])
+
cmd_output = json.loads(self.openstack('volume type list -f json'))
self.assertIn(self.NAME, [t['Name'] for t in cmd_output])
- def test_volume_type_show(self):
cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % self.NAME))
- self.assertEqual(self.NAME, cmd_output['name'])
+ 'volume type list -f json --default'
+ ))
+ self.assertEqual(1, len(cmd_output))
+ self.assertEqual('lvmdriver-1', cmd_output[0]['Name'])
def test_volume_type_set_unset_properties(self):
+ name = uuid.uuid4().hex
+ cmd_output = json.loads(self.openstack(
+ 'volume type create -f json --private ' +
+ name,
+ ))
+ self.addCleanup(
+ self.openstack,
+ 'volume type delete ' + name
+ )
+ self.assertEqual(name, cmd_output['name'])
+
raw_output = self.openstack(
- 'volume type set --property a=b --property c=d %s' % self.NAME)
+ 'volume type set --property a=b --property c=d %s' % name
+ )
self.assertEqual("", raw_output)
-
cmd_output = json.loads(self.openstack(
- 'volume type show -f json ' + self.NAME))
+ 'volume type show -f json %s' % name
+ ))
+ # TODO(amotoki): properties output should be machine-readable
self.assertEqual("a='b', c='d'", cmd_output['properties'])
- raw_output = self.openstack('volume type unset --property a %s' %
- self.NAME)
+ raw_output = self.openstack(
+ 'volume type unset --property a %s' % name
+ )
self.assertEqual("", raw_output)
cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % self.NAME))
+ 'volume type show -f json %s' % name
+ ))
self.assertEqual("c='d'", cmd_output['properties'])
def test_volume_type_set_unset_multiple_properties(self):
+ name = uuid.uuid4().hex
+ cmd_output = json.loads(self.openstack(
+ 'volume type create -f json --private ' +
+ name,
+ ))
+ self.addCleanup(
+ self.openstack,
+ 'volume type delete ' + name
+ )
+ self.assertEqual(name, cmd_output['name'])
+
raw_output = self.openstack(
- 'volume type set --property a=b --property c=d %s' % self.NAME)
+ 'volume type set --property a=b --property c=d %s' % name
+ )
self.assertEqual("", raw_output)
-
cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % self.NAME))
+ 'volume type show -f json %s' % name
+ ))
self.assertEqual("a='b', c='d'", cmd_output['properties'])
raw_output = self.openstack(
- 'volume type unset --property a --property c %s' % self.NAME)
+ 'volume type unset --property a --property c %s' % name
+ )
self.assertEqual("", raw_output)
cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % self.NAME))
+ 'volume type show -f json %s' % name
+ ))
self.assertEqual("", cmd_output['properties'])
def test_multi_delete(self):
@@ -140,8 +170,21 @@ class VolumeTypeTests(common.BaseVolumeTests):
'--encryption-control-location front-end ' +
self.NAME)
self.assertEqual('', raw_output)
+
+ name = uuid.uuid4().hex
+ cmd_output = json.loads(self.openstack(
+ 'volume type create -f json --private ' +
+ name,
+ ))
+ self.addCleanup(
+ self.openstack,
+ 'volume type delete ' + name,
+ )
+ self.assertEqual(name, cmd_output['name'])
+
cmd_output = json.loads(self.openstack(
- 'volume type show -f json --encryption-type ' + self.NAME))
+ 'volume type show -f json --encryption-type ' + name
+ ))
expected = ["provider='LuksEncryptor'",
"cipher='aes-xts-plain64'",
"key_size='128'",
@@ -150,10 +193,12 @@ class VolumeTypeTests(common.BaseVolumeTests):
self.assertIn(attr, cmd_output['encryption'])
# test unset encryption type
raw_output = self.openstack(
- 'volume type unset --encryption-type ' + self.NAME)
+ 'volume type unset --encryption-type ' + name
+ )
self.assertEqual('', raw_output)
cmd_output = json.loads(self.openstack(
- 'volume type show -f json --encryption-type ' + self.NAME))
+ 'volume type show -f json --encryption-type ' + name
+ ))
self.assertEqual('', cmd_output['encryption'])
# test delete encryption type
raw_output = self.openstack('volume type delete ' + encryption_type)
diff --git a/openstackclient/tests/functional/volume/v2/test_backup.py b/openstackclient/tests/functional/volume/v2/test_backup.py
new file mode 100644
index 00000000..e4890b00
--- /dev/null
+++ b/openstackclient/tests/functional/volume/v2/test_backup.py
@@ -0,0 +1,58 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import uuid
+
+from openstackclient.tests.functional.volume.v2 import common
+
+
+class VolumeBackupTests(common.BaseVolumeTests):
+ """Functional tests for volume backups. """
+
+ def setUp(self):
+ super(VolumeBackupTests, self).setUp()
+ self.backup_enabled = False
+ serv_list = json.loads(self.openstack('volume service list -f json'))
+ for service in serv_list:
+ if service['Binary'] == 'cinder-backup':
+ if service['Status'] == 'enabled':
+ self.backup_enabled = True
+
+ def test_volume_backup_restore(self):
+ """Test restore backup"""
+ if not self.backup_enabled:
+ self.skipTest('Backup service is not enabled')
+ vol_id = uuid.uuid4().hex
+ # create a volume
+ json.loads(self.openstack(
+ 'volume create -f json ' +
+ '--size 1 ' +
+ vol_id
+ ))
+ # create a backup
+ backup = json.loads(self.openstack(
+ 'volume backup create -f json ' +
+ vol_id
+ ))
+
+ self.wait_for_status("volume", vol_id, "available")
+ self.wait_for_status("backup", backup['id'], "available")
+ # restore the backup
+ backup_restored = json.loads(self.openstack(
+ 'volume backup restore -f json %s %s'
+ % (backup['id'], vol_id)))
+ self.assertEqual(backup_restored['backup_id'], backup['id'])
+ self.wait_for_status("backup", backup['id'], "available")
+ self.wait_for_status("volume", backup_restored['volume_id'],
+ "available")
+ self.addCleanup(self.openstack, 'volume delete %s' % vol_id)
diff --git a/openstackclient/tests/functional/volume/v2/test_qos.py b/openstackclient/tests/functional/volume/v2/test_qos.py
index 888f12b1..646becc1 100644
--- a/openstackclient/tests/functional/volume/v2/test_qos.py
+++ b/openstackclient/tests/functional/volume/v2/test_qos.py
@@ -125,7 +125,6 @@ class QosTests(common.BaseVolumeTests):
def test_volume_qos_asso_disasso(self):
"""Tests associate and disassociate qos with volume type"""
vol_type1 = uuid.uuid4().hex
- vol_type2 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
'volume type create -f json ' +
vol_type1
@@ -134,6 +133,9 @@ class QosTests(common.BaseVolumeTests):
vol_type1,
cmd_output['name']
)
+ self.addCleanup(self.openstack, 'volume type delete ' + vol_type1)
+
+ vol_type2 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
'volume type create -f json ' +
vol_type2
@@ -142,7 +144,6 @@ class QosTests(common.BaseVolumeTests):
vol_type2,
cmd_output['name']
)
- self.addCleanup(self.openstack, 'volume type delete ' + vol_type1)
self.addCleanup(self.openstack, 'volume type delete ' + vol_type2)
name = uuid.uuid4().hex
diff --git a/openstackclient/tests/functional/volume/v2/test_snapshot.py b/openstackclient/tests/functional/volume/v2/test_snapshot.py
index ba6b2c28..264f4adb 100644
--- a/openstackclient/tests/functional/volume/v2/test_snapshot.py
+++ b/openstackclient/tests/functional/volume/v2/test_snapshot.py
@@ -43,7 +43,7 @@ class VolumeSnapshotTests(common.BaseVolumeTests):
finally:
super(VolumeSnapshotTests, cls).tearDownClass()
- def test_volume_snapshot__delete(self):
+ def test_volume_snapshot_delete(self):
"""Test create, delete multiple"""
name1 = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
diff --git a/openstackclient/tests/functional/volume/v2/test_transfer_request.py b/openstackclient/tests/functional/volume/v2/test_transfer_request.py
index 33495af6..00d0865c 100644
--- a/openstackclient/tests/functional/volume/v2/test_transfer_request.py
+++ b/openstackclient/tests/functional/volume/v2/test_transfer_request.py
@@ -19,73 +19,103 @@ from openstackclient.tests.functional.volume.v2 import common
class TransferRequestTests(common.BaseVolumeTests):
"""Functional tests for transfer request. """
- NAME = uuid.uuid4().hex
- VOLUME_NAME = uuid.uuid4().hex
-
- @classmethod
- def setUpClass(cls):
- super(TransferRequestTests, cls).setUpClass()
-
- cmd_output = json.loads(cls.openstack(
- 'volume create -f json --size 1 ' + cls.VOLUME_NAME))
- cls.assertOutput(cls.VOLUME_NAME, cmd_output['name'])
-
- cmd_output = json.loads(cls.openstack(
- 'volume transfer request create -f json ' +
- cls.VOLUME_NAME +
- ' --name ' + cls.NAME))
- cls.assertOutput(cls.NAME, cmd_output['name'])
-
- @classmethod
- def tearDownClass(cls):
- try:
- raw_output_transfer = cls.openstack(
- 'volume transfer request delete ' + cls.NAME)
- raw_output_volume = cls.openstack(
- 'volume delete ' + cls.VOLUME_NAME)
- cls.assertOutput('', raw_output_transfer)
- cls.assertOutput('', raw_output_volume)
- finally:
- super(TransferRequestTests, cls).tearDownClass()
+ API_VERSION = '2'
def test_volume_transfer_request_accept(self):
volume_name = uuid.uuid4().hex
- name = uuid.uuid4().hex
+ xfer_name = uuid.uuid4().hex
# create a volume
cmd_output = json.loads(self.openstack(
- 'volume create -f json --size 1 ' + volume_name))
+ 'volume create -f json ' +
+ '--size 1 ' +
+ volume_name
+ ))
self.assertEqual(volume_name, cmd_output['name'])
+ self.addCleanup(
+ self.openstack,
+ '--os-volume-api-version ' + self.API_VERSION + ' ' +
+ 'volume delete ' +
+ volume_name
+ )
+ self.wait_for_status("volume", volume_name, "available")
# create volume transfer request for the volume
# and get the auth_key of the new transfer request
cmd_output = json.loads(self.openstack(
+ '--os-volume-api-version ' + self.API_VERSION + ' ' +
'volume transfer request create -f json ' +
- volume_name +
- ' --name ' + name))
+ ' --name ' + xfer_name + ' ' +
+ volume_name
+ ))
+ self.assertEqual(xfer_name, cmd_output['name'])
+ xfer_id = cmd_output['id']
auth_key = cmd_output['auth_key']
self.assertTrue(auth_key)
+ self.wait_for_status("volume", volume_name, "awaiting-transfer")
# accept the volume transfer request
cmd_output = json.loads(self.openstack(
+ '--os-volume-api-version ' + self.API_VERSION + ' ' +
'volume transfer request accept -f json ' +
- name + ' ' +
- '--auth-key ' + auth_key
+ '--auth-key ' + auth_key + ' ' +
+ xfer_id
))
- self.assertEqual(name, cmd_output['name'])
+ self.assertEqual(xfer_name, cmd_output['name'])
+ self.wait_for_status("volume", volume_name, "available")
- # the volume transfer will be removed by default after accepted
- # so just need to delete the volume here
- raw_output = self.openstack(
- 'volume delete ' + volume_name)
- self.assertEqual('', raw_output)
+ def test_volume_transfer_request_list_show(self):
+ volume_name = uuid.uuid4().hex
+ xfer_name = uuid.uuid4().hex
- def test_volume_transfer_request_list(self):
+ # create a volume
cmd_output = json.loads(self.openstack(
- 'volume transfer request list -f json'))
- self.assertIn(self.NAME, [req['Name'] for req in cmd_output])
+ 'volume create -f json ' +
+ '--size 1 ' +
+ volume_name
+ ))
+ self.assertEqual(volume_name, cmd_output['name'])
+ self.addCleanup(
+ self.openstack,
+ '--os-volume-api-version ' + self.API_VERSION + ' ' +
+ 'volume delete ' +
+ volume_name
+ )
+ self.wait_for_status("volume", volume_name, "available")
- def test_volume_transfer_request_show(self):
cmd_output = json.loads(self.openstack(
- 'volume transfer request show -f json ' + self.NAME))
- self.assertEqual(self.NAME, cmd_output['name'])
+ '--os-volume-api-version ' + self.API_VERSION + ' ' +
+ 'volume transfer request create -f json ' +
+ ' --name ' + xfer_name + ' ' +
+ volume_name
+ ))
+ self.assertEqual(xfer_name, cmd_output['name'])
+ xfer_id = cmd_output['id']
+ auth_key = cmd_output['auth_key']
+ self.assertTrue(auth_key)
+ self.wait_for_status("volume", volume_name, "awaiting-transfer")
+
+ cmd_output = json.loads(self.openstack(
+ '--os-volume-api-version ' + self.API_VERSION + ' ' +
+ 'volume transfer request list -f json'
+ ))
+ self.assertIn(xfer_name, [req['Name'] for req in cmd_output])
+
+ cmd_output = json.loads(self.openstack(
+ '--os-volume-api-version ' + self.API_VERSION + ' ' +
+ 'volume transfer request show -f json ' +
+ xfer_id
+ ))
+ self.assertEqual(xfer_name, cmd_output['name'])
+
+ # NOTE(dtroyer): We need to delete the transfer request to allow the
+ # volume to be deleted. The addCleanup() route does
+ # not have a mechanism to wait for the volume status
+ # to become 'available' before attempting to delete
+ # the volume.
+ cmd_output = self.openstack(
+ '--os-volume-api-version ' + self.API_VERSION + ' ' +
+ 'volume transfer request delete ' +
+ xfer_id
+ )
+ self.wait_for_status("volume", volume_name, "available")
diff --git a/openstackclient/tests/functional/volume/v2/test_volume_type.py b/openstackclient/tests/functional/volume/v2/test_volume_type.py
index 5c551ca9..d8dd5bd6 100644
--- a/openstackclient/tests/functional/volume/v2/test_volume_type.py
+++ b/openstackclient/tests/functional/volume/v2/test_volume_type.py
@@ -20,76 +20,113 @@ from openstackclient.tests.functional.volume.v2 import common
class VolumeTypeTests(common.BaseVolumeTests):
"""Functional tests for volume type. """
- NAME = uuid.uuid4().hex
-
- @classmethod
- def setUpClass(cls):
- super(VolumeTypeTests, cls).setUpClass()
- cmd_output = json.loads(cls.openstack(
- 'volume type create -f json --private %s' % cls.NAME))
- cls.assertOutput(cls.NAME, cmd_output['name'])
-
- @classmethod
- def tearDownClass(cls):
- try:
- raw_output = cls.openstack('volume type delete %s' % cls.NAME)
- cls.assertOutput('', raw_output)
- finally:
- super(VolumeTypeTests, cls).tearDownClass()
-
- def test_volume_type_list(self):
+ def test_volume_type_create_list(self):
+ name = uuid.uuid4().hex
+ cmd_output = json.loads(self.openstack(
+ 'volume type create -f json --private ' +
+ name,
+ ))
+ self.addCleanup(
+ self.openstack,
+ 'volume type delete ' + name,
+ )
+ self.assertEqual(name, cmd_output['name'])
+
+ cmd_output = json.loads(self.openstack(
+ 'volume type show -f json %s' % name
+ ))
+ self.assertEqual(name, cmd_output['name'])
+
cmd_output = json.loads(self.openstack('volume type list -f json'))
- self.assertIn(self.NAME, [t['Name'] for t in cmd_output])
+ self.assertIn(name, [t['Name'] for t in cmd_output])
- def test_volume_type_list_default(self):
cmd_output = json.loads(self.openstack(
- 'volume type list -f json --default'))
+ 'volume type list -f json --default'
+ ))
self.assertEqual(1, len(cmd_output))
self.assertEqual('lvmdriver-1', cmd_output[0]['Name'])
- def test_volume_type_show(self):
+ def test_volume_type_set_unset_properties(self):
+ name = uuid.uuid4().hex
cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % self.NAME))
- self.assertEqual(self.NAME, cmd_output['name'])
+ 'volume type create -f json --private ' +
+ name,
+ ))
+ self.addCleanup(
+ self.openstack,
+ 'volume type delete ' + name
+ )
+ self.assertEqual(name, cmd_output['name'])
- def test_volume_type_set_unset_properties(self):
raw_output = self.openstack(
- 'volume type set --property a=b --property c=d %s' % self.NAME)
+ 'volume type set --property a=b --property c=d %s' % name
+ )
self.assertEqual("", raw_output)
cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % self.NAME))
+ 'volume type show -f json %s' % name
+ ))
# TODO(amotoki): properties output should be machine-readable
self.assertEqual("a='b', c='d'", cmd_output['properties'])
- raw_output = self.openstack('volume type unset --property a %s' %
- self.NAME)
+ raw_output = self.openstack(
+ 'volume type unset --property a %s' % name
+ )
self.assertEqual("", raw_output)
cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % self.NAME))
+ 'volume type show -f json %s' % name
+ ))
self.assertEqual("c='d'", cmd_output['properties'])
def test_volume_type_set_unset_multiple_properties(self):
+ name = uuid.uuid4().hex
+ cmd_output = json.loads(self.openstack(
+ 'volume type create -f json --private ' +
+ name,
+ ))
+ self.addCleanup(
+ self.openstack,
+ 'volume type delete ' + name
+ )
+ self.assertEqual(name, cmd_output['name'])
+
raw_output = self.openstack(
- 'volume type set --property a=b --property c=d %s' % self.NAME)
+ 'volume type set --property a=b --property c=d %s' % name
+ )
self.assertEqual("", raw_output)
cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % self.NAME))
+ 'volume type show -f json %s' % name
+ ))
self.assertEqual("a='b', c='d'", cmd_output['properties'])
raw_output = self.openstack(
- 'volume type unset --property a --property c %s' % self.NAME)
+ 'volume type unset --property a --property c %s' % name
+ )
self.assertEqual("", raw_output)
cmd_output = json.loads(self.openstack(
- 'volume type show -f json %s' % self.NAME))
+ 'volume type show -f json %s' % name
+ ))
self.assertEqual("", cmd_output['properties'])
def test_volume_type_set_unset_project(self):
+ name = uuid.uuid4().hex
+ cmd_output = json.loads(self.openstack(
+ 'volume type create -f json --private ' +
+ name,
+ ))
+ self.addCleanup(
+ self.openstack,
+ 'volume type delete ' + name
+ )
+ self.assertEqual(name, cmd_output['name'])
+
raw_output = self.openstack(
- 'volume type set --project admin %s' % self.NAME)
+ 'volume type set --project admin %s' % name
+ )
self.assertEqual("", raw_output)
raw_output = self.openstack(
- 'volume type unset --project admin %s' % self.NAME)
+ 'volume type unset --project admin %s' % name
+ )
self.assertEqual("", raw_output)
def test_multi_delete(self):
@@ -108,6 +145,7 @@ class VolumeTypeTests(common.BaseVolumeTests):
# these to new test format when beef up all tests for
# volume tye commands.
def test_encryption_type(self):
+ name = uuid.uuid4().hex
encryption_type = uuid.uuid4().hex
# test create new encryption type
cmd_output = json.loads(self.openstack(
@@ -162,16 +200,28 @@ class VolumeTypeTests(common.BaseVolumeTests):
for attr in expected:
self.assertIn(attr, cmd_output['encryption'])
# test set new encryption type
+ cmd_output = json.loads(self.openstack(
+ 'volume type create -f json --private ' +
+ name,
+ ))
+ self.addCleanup(
+ self.openstack,
+ 'volume type delete ' + name,
+ )
+ self.assertEqual(name, cmd_output['name'])
+
raw_output = self.openstack(
'volume type set '
'--encryption-provider LuksEncryptor '
'--encryption-cipher aes-xts-plain64 '
'--encryption-key-size 128 '
'--encryption-control-location front-end ' +
- self.NAME)
+ name)
self.assertEqual('', raw_output)
+
cmd_output = json.loads(self.openstack(
- 'volume type show -f json --encryption-type ' + self.NAME))
+ 'volume type show -f json --encryption-type ' + name
+ ))
expected = ["provider='LuksEncryptor'",
"cipher='aes-xts-plain64'",
"key_size='128'",
@@ -180,10 +230,12 @@ class VolumeTypeTests(common.BaseVolumeTests):
self.assertIn(attr, cmd_output['encryption'])
# test unset encryption type
raw_output = self.openstack(
- 'volume type unset --encryption-type ' + self.NAME)
+ 'volume type unset --encryption-type ' + name
+ )
self.assertEqual('', raw_output)
cmd_output = json.loads(self.openstack(
- 'volume type show -f json --encryption-type ' + self.NAME))
+ 'volume type show -f json --encryption-type ' + name
+ ))
self.assertEqual('', cmd_output['encryption'])
# test delete encryption type
raw_output = self.openstack('volume type delete ' + encryption_type)
diff --git a/openstackclient/tests/functional/volume/v3/test_transfer_request.py b/openstackclient/tests/functional/volume/v3/test_transfer_request.py
index b3253237..f16dfafa 100644
--- a/openstackclient/tests/functional/volume/v3/test_transfer_request.py
+++ b/openstackclient/tests/functional/volume/v3/test_transfer_request.py
@@ -17,3 +17,5 @@ from openstackclient.tests.functional.volume.v3 import common
class TransferRequestTests(common.BaseVolumeTests, v2.TransferRequestTests):
"""Functional tests for transfer request. """
+
+ API_VERSION = '3'
diff --git a/openstackclient/tests/unit/common/test_project_purge.py b/openstackclient/tests/unit/common/test_project_purge.py
index 2385eae8..6e8ce188 100644
--- a/openstackclient/tests/unit/common/test_project_purge.py
+++ b/openstackclient/tests/unit/common/test_project_purge.py
@@ -117,10 +117,11 @@ class TestProjectPurge(TestProjectPurgeInit):
self.projects_mock.get.assert_called_once_with(self.project.id)
self.projects_mock.delete.assert_called_once_with(self.project.id)
self.servers_mock.list.assert_called_once_with(
- search_opts={'tenant_id': self.project.id})
+ search_opts={'tenant_id': self.project.id, 'all_tenants': True})
kwargs = {'filters': {'owner': self.project.id}}
self.images_mock.list.assert_called_once_with(**kwargs)
- volume_search_opts = {'project_id': self.project.id}
+ volume_search_opts = {'project_id': self.project.id,
+ 'all_tenants': True}
self.volumes_mock.list.assert_called_once_with(
search_opts=volume_search_opts)
self.snapshots_mock.list.assert_called_once_with(
@@ -152,10 +153,11 @@ class TestProjectPurge(TestProjectPurgeInit):
self.projects_mock.get.assert_called_once_with(self.project.id)
self.projects_mock.delete.assert_not_called()
self.servers_mock.list.assert_called_once_with(
- search_opts={'tenant_id': self.project.id})
+ search_opts={'tenant_id': self.project.id, 'all_tenants': True})
kwargs = {'filters': {'owner': self.project.id}}
self.images_mock.list.assert_called_once_with(**kwargs)
- volume_search_opts = {'project_id': self.project.id}
+ volume_search_opts = {'project_id': self.project.id,
+ 'all_tenants': True}
self.volumes_mock.list.assert_called_once_with(
search_opts=volume_search_opts)
self.snapshots_mock.list.assert_called_once_with(
@@ -187,10 +189,11 @@ class TestProjectPurge(TestProjectPurgeInit):
self.projects_mock.get.assert_called_once_with(self.project.id)
self.projects_mock.delete.assert_not_called()
self.servers_mock.list.assert_called_once_with(
- search_opts={'tenant_id': self.project.id})
+ search_opts={'tenant_id': self.project.id, 'all_tenants': True})
kwargs = {'filters': {'owner': self.project.id}}
self.images_mock.list.assert_called_once_with(**kwargs)
- volume_search_opts = {'project_id': self.project.id}
+ volume_search_opts = {'project_id': self.project.id,
+ 'all_tenants': True}
self.volumes_mock.list.assert_called_once_with(
search_opts=volume_search_opts)
self.snapshots_mock.list.assert_called_once_with(
@@ -223,10 +226,11 @@ class TestProjectPurge(TestProjectPurgeInit):
self.projects_mock.get.assert_not_called()
self.projects_mock.delete.assert_called_once_with(self.project.id)
self.servers_mock.list.assert_called_once_with(
- search_opts={'tenant_id': self.project.id})
+ search_opts={'tenant_id': self.project.id, 'all_tenants': True})
kwargs = {'filters': {'owner': self.project.id}}
self.images_mock.list.assert_called_once_with(**kwargs)
- volume_search_opts = {'project_id': self.project.id}
+ volume_search_opts = {'project_id': self.project.id,
+ 'all_tenants': True}
self.volumes_mock.list.assert_called_once_with(
search_opts=volume_search_opts)
self.snapshots_mock.list.assert_called_once_with(
@@ -259,10 +263,11 @@ class TestProjectPurge(TestProjectPurgeInit):
self.projects_mock.get.assert_called_once_with(self.project.id)
self.projects_mock.delete.assert_called_once_with(self.project.id)
self.servers_mock.list.assert_called_once_with(
- search_opts={'tenant_id': self.project.id})
+ search_opts={'tenant_id': self.project.id, 'all_tenants': True})
kwargs = {'filters': {'owner': self.project.id}}
self.images_mock.list.assert_called_once_with(**kwargs)
- volume_search_opts = {'project_id': self.project.id}
+ volume_search_opts = {'project_id': self.project.id,
+ 'all_tenants': True}
self.volumes_mock.list.assert_called_once_with(
search_opts=volume_search_opts)
self.snapshots_mock.list.assert_called_once_with(
@@ -295,10 +300,11 @@ class TestProjectPurge(TestProjectPurgeInit):
self.projects_mock.get.assert_called_once_with(self.project.id)
self.projects_mock.delete.assert_called_once_with(self.project.id)
self.servers_mock.list.assert_called_once_with(
- search_opts={'tenant_id': self.project.id})
+ search_opts={'tenant_id': self.project.id, 'all_tenants': True})
kwargs = {'filters': {'owner': self.project.id}}
self.images_mock.list.assert_called_once_with(**kwargs)
- volume_search_opts = {'project_id': self.project.id}
+ volume_search_opts = {'project_id': self.project.id,
+ 'all_tenants': True}
self.volumes_mock.list.assert_called_once_with(
search_opts=volume_search_opts)
self.snapshots_mock.list.assert_called_once_with(
diff --git a/openstackclient/tests/unit/common/test_quota.py b/openstackclient/tests/unit/common/test_quota.py
index 1a3da31d..4f9e321b 100644
--- a/openstackclient/tests/unit/common/test_quota.py
+++ b/openstackclient/tests/unit/common/test_quota.py
@@ -197,6 +197,85 @@ class TestQuotaList(TestQuota):
self.cmd = quota.ListQuota(self.app, None)
+ @staticmethod
+ def _get_detailed_reference_data(quota):
+ reference_data = []
+ for name, values in quota.to_dict().items():
+ if type(values) is dict:
+ if 'used' in values:
+ # For network quota it's "used" key instead of "in_use"
+ in_use = values['used']
+ else:
+ in_use = values['in_use']
+ resource_values = [
+ in_use,
+ values['reserved'],
+ values['limit']]
+ reference_data.append(tuple([name] + resource_values))
+ return reference_data
+
+ def test_quota_list_details_compute(self):
+ detailed_quota = (
+ compute_fakes.FakeQuota.create_one_comp_detailed_quota())
+
+ detailed_column_header = (
+ 'Resource',
+ 'In Use',
+ 'Reserved',
+ 'Limit',
+ )
+ detailed_reference_data = (
+ self._get_detailed_reference_data(detailed_quota))
+
+ self.compute.quotas.get = mock.Mock(return_value=detailed_quota)
+
+ arglist = [
+ '--detail', '--compute',
+ ]
+ verifylist = [
+ ('detail', True),
+ ('compute', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+ ret_quotas = list(data)
+
+ self.assertEqual(detailed_column_header, columns)
+ self.assertEqual(
+ sorted(detailed_reference_data), sorted(ret_quotas))
+
+ def test_quota_list_details_network(self):
+ detailed_quota = (
+ network_fakes.FakeQuota.create_one_net_detailed_quota())
+
+ detailed_column_header = (
+ 'Resource',
+ 'In Use',
+ 'Reserved',
+ 'Limit',
+ )
+ detailed_reference_data = (
+ self._get_detailed_reference_data(detailed_quota))
+
+ self.network.get_quota = mock.Mock(return_value=detailed_quota)
+
+ arglist = [
+ '--detail', '--network',
+ ]
+ verifylist = [
+ ('detail', True),
+ ('network', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+ ret_quotas = list(data)
+
+ self.assertEqual(detailed_column_header, columns)
+ self.assertEqual(
+ sorted(detailed_reference_data), sorted(ret_quotas))
+
def test_quota_list_compute(self):
# Two projects with non-default quotas
self.compute.quotas.get = mock.Mock(
@@ -827,13 +906,13 @@ class TestQuotaShow(TestQuota):
self.cmd.take_action(parsed_args)
self.compute_quotas_mock.get.assert_called_once_with(
- self.projects[0].id,
+ self.projects[0].id, detail=False
)
self.volume_quotas_mock.get.assert_called_once_with(
self.projects[0].id,
)
self.network.get_quota.assert_called_once_with(
- self.projects[0].id,
+ self.projects[0].id, details=False
)
self.assertNotCalled(self.network.get_quota_default)
@@ -889,12 +968,12 @@ class TestQuotaShow(TestQuota):
self.cmd.take_action(parsed_args)
self.compute_quotas_mock.get.assert_called_once_with(
- identity_fakes.project_id,
+ identity_fakes.project_id, detail=False
)
self.volume_quotas_mock.get.assert_called_once_with(
identity_fakes.project_id,
)
self.network.get_quota.assert_called_once_with(
- identity_fakes.project_id,
+ identity_fakes.project_id, details=False
)
self.assertNotCalled(self.network.get_quota_default)
diff --git a/openstackclient/tests/unit/compute/v2/fakes.py b/openstackclient/tests/unit/compute/v2/fakes.py
index 46fa5992..ee7d4983 100644
--- a/openstackclient/tests/unit/compute/v2/fakes.py
+++ b/openstackclient/tests/unit/compute/v2/fakes.py
@@ -17,6 +17,7 @@ import copy
import uuid
import mock
+from novaclient import api_versions
from openstackclient.api import compute_v2
from openstackclient.tests.unit import fakes
@@ -201,6 +202,8 @@ class FakeComputev2Client(object):
self.management_url = kwargs['endpoint']
+ self.api_version = api_versions.APIVersion('2.1')
+
class TestComputev2(utils.TestCommand):
@@ -765,6 +768,7 @@ class FakeFlavor(object):
'rxtx_factor': 1.0,
'OS-FLV-DISABLED:disabled': False,
'os-flavor-access:is_public': True,
+ 'description': 'description',
'OS-FLV-EXT-DATA:ephemeral': 0,
'properties': {'property': 'value'},
}
@@ -1300,6 +1304,7 @@ class FakeUsage(object):
'local_gb': 1,
'memory_mb': 512,
'name': 'usage-name-' + uuid.uuid4().hex,
+ 'instance_id': uuid.uuid4().hex,
'state': 'active',
'uptime': 3600,
'vcpus': 1
@@ -1396,6 +1401,38 @@ class FakeQuota(object):
return quota
+ @staticmethod
+ def create_one_comp_detailed_quota(attrs=None):
+ """Create one quota"""
+
+ attrs = attrs or {}
+
+ quota_attrs = {
+ 'id': 'project-id-' + uuid.uuid4().hex,
+ 'cores': {'reserved': 0, 'in_use': 0, 'limit': 20},
+ 'fixed_ips': {'reserved': 0, 'in_use': 0, 'limit': 30},
+ 'injected_files': {'reserved': 0, 'in_use': 0, 'limit': 100},
+ 'injected_file_content_bytes': {
+ 'reserved': 0, 'in_use': 0, 'limit': 10240},
+ 'injected_file_path_bytes': {
+ 'reserved': 0, 'in_use': 0, 'limit': 255},
+ 'instances': {'reserved': 0, 'in_use': 0, 'limit': 50},
+ 'key_pairs': {'reserved': 0, 'in_use': 0, 'limit': 20},
+ 'metadata_items': {'reserved': 0, 'in_use': 0, 'limit': 10},
+ 'ram': {'reserved': 0, 'in_use': 0, 'limit': 51200},
+ 'server_groups': {'reserved': 0, 'in_use': 0, 'limit': 10},
+ 'server_group_members': {'reserved': 0, 'in_use': 0, 'limit': 10}
+ }
+
+ quota_attrs.update(attrs)
+ quota = fakes.FakeResource(
+ info=copy.deepcopy(quota_attrs),
+ loaded=True)
+
+ quota.project_id = quota_attrs['id']
+
+ return quota
+
class FakeLimits(object):
"""Fake limits"""
diff --git a/openstackclient/tests/unit/compute/v2/test_flavor.py b/openstackclient/tests/unit/compute/v2/test_flavor.py
index 4cdbb25b..a112fc1f 100644
--- a/openstackclient/tests/unit/compute/v2/test_flavor.py
+++ b/openstackclient/tests/unit/compute/v2/test_flavor.py
@@ -16,6 +16,7 @@
import mock
from mock import call
+import novaclient
from osc_lib import exceptions
from osc_lib import utils
@@ -50,6 +51,7 @@ class TestFlavorCreate(TestFlavor):
columns = (
'OS-FLV-DISABLED:disabled',
'OS-FLV-EXT-DATA:ephemeral',
+ 'description',
'disk',
'id',
'name',
@@ -63,6 +65,7 @@ class TestFlavorCreate(TestFlavor):
data = (
flavor.disabled,
flavor.ephemeral,
+ flavor.description,
flavor.disk,
flavor.id,
flavor.name,
@@ -101,7 +104,8 @@ class TestFlavorCreate(TestFlavor):
0,
0,
1.0,
- True
+ True,
+ None,
)
columns, data = self.cmd.take_action(parsed_args)
self.flavors_mock.create.assert_called_once_with(*default_args)
@@ -120,6 +124,7 @@ class TestFlavorCreate(TestFlavor):
'--vcpus', str(self.flavor.vcpus),
'--rxtx-factor', str(self.flavor.rxtx_factor),
'--public',
+ '--description', str(self.flavor.description),
'--property', 'property=value',
self.flavor.name,
]
@@ -132,6 +137,7 @@ class TestFlavorCreate(TestFlavor):
('vcpus', self.flavor.vcpus),
('rxtx_factor', self.flavor.rxtx_factor),
('public', True),
+ ('description', self.flavor.description),
('property', {'property': 'value'}),
('name', self.flavor.name),
]
@@ -147,8 +153,13 @@ class TestFlavorCreate(TestFlavor):
self.flavor.swap,
self.flavor.rxtx_factor,
self.flavor.is_public,
+ self.flavor.description,
)
- columns, data = self.cmd.take_action(parsed_args)
+ self.app.client_manager.compute.api_version = 2.55
+ with mock.patch.object(novaclient.api_versions,
+ 'APIVersion',
+ return_value=2.55):
+ columns, data = self.cmd.take_action(parsed_args)
self.flavors_mock.create.assert_called_once_with(*args)
self.flavor.set_keys.assert_called_once_with({'property': 'value'})
self.flavor.get_keys.assert_called_once_with()
@@ -168,6 +179,7 @@ class TestFlavorCreate(TestFlavor):
'--vcpus', str(self.flavor.vcpus),
'--rxtx-factor', str(self.flavor.rxtx_factor),
'--private',
+ '--description', str(self.flavor.description),
'--project', self.project.id,
'--property', 'key1=value1',
'--property', 'key2=value2',
@@ -181,6 +193,7 @@ class TestFlavorCreate(TestFlavor):
('vcpus', self.flavor.vcpus),
('rxtx_factor', self.flavor.rxtx_factor),
('public', False),
+ ('description', 'description'),
('project', self.project.id),
('property', {'key1': 'value1', 'key2': 'value2'}),
('name', self.flavor.name),
@@ -197,8 +210,13 @@ class TestFlavorCreate(TestFlavor):
self.flavor.swap,
self.flavor.rxtx_factor,
self.flavor.is_public,
+ self.flavor.description,
)
- columns, data = self.cmd.take_action(parsed_args)
+ self.app.client_manager.compute.api_version = 2.55
+ with mock.patch.object(novaclient.api_versions,
+ 'APIVersion',
+ return_value=2.55):
+ columns, data = self.cmd.take_action(parsed_args)
self.flavors_mock.create.assert_called_once_with(*args)
self.flavor_access_mock.add_tenant_access.assert_called_with(
self.flavor.id,
@@ -234,6 +252,79 @@ class TestFlavorCreate(TestFlavor):
arglist,
verifylist)
+ def test_flavor_create_with_description_api_newer(self):
+ arglist = [
+ '--id', self.flavor.id,
+ '--ram', str(self.flavor.ram),
+ '--disk', str(self.flavor.disk),
+ '--ephemeral', str(self.flavor.ephemeral),
+ '--swap', str(self.flavor.swap),
+ '--vcpus', str(self.flavor.vcpus),
+ '--rxtx-factor', str(self.flavor.rxtx_factor),
+ '--private',
+ '--description', 'fake description',
+ self.flavor.name,
+ ]
+ verifylist = [
+ ('id', self.flavor.id),
+ ('ram', self.flavor.ram),
+ ('disk', self.flavor.disk),
+ ('ephemeral', self.flavor.ephemeral),
+ ('swap', self.flavor.swap),
+ ('vcpus', self.flavor.vcpus),
+ ('rxtx_factor', self.flavor.rxtx_factor),
+ ('public', False),
+ ('description', 'fake description'),
+ ('name', self.flavor.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.app.client_manager.compute.api_version = 2.55
+ with mock.patch.object(novaclient.api_versions,
+ 'APIVersion',
+ return_value=2.55):
+ columns, data = self.cmd.take_action(parsed_args)
+
+ args = (
+ self.flavor.name,
+ self.flavor.ram,
+ self.flavor.vcpus,
+ self.flavor.disk,
+ self.flavor.id,
+ self.flavor.ephemeral,
+ self.flavor.swap,
+ self.flavor.rxtx_factor,
+ False,
+ 'fake description',
+ )
+
+ self.flavors_mock.create.assert_called_once_with(*args)
+
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, data)
+
+ def test_flavor_create_with_description_api_older(self):
+ arglist = [
+ '--id', self.flavor.id,
+ '--ram', str(self.flavor.ram),
+ '--vcpus', str(self.flavor.vcpus),
+ '--description', 'description',
+ self.flavor.name,
+ ]
+ verifylist = [
+ ('ram', self.flavor.ram),
+ ('vcpus', self.flavor.vcpus),
+ ('description', 'description'),
+ ('name', self.flavor.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.app.client_manager.compute.api_version = 2.54
+ with mock.patch.object(novaclient.api_versions,
+ 'APIVersion',
+ return_value=2.55):
+ self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+
class TestFlavorDelete(TestFlavor):
@@ -622,6 +713,42 @@ class TestFlavorSet(TestFlavor):
self.flavor_access_mock.add_tenant_access.assert_not_called()
self.assertIsNone(result)
+ def test_flavor_set_description_api_newer(self):
+ arglist = [
+ '--description', 'description',
+ self.flavor.id,
+ ]
+ verifylist = [
+ ('description', 'description'),
+ ('flavor', self.flavor.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.app.client_manager.compute.api_version = 2.55
+ with mock.patch.object(novaclient.api_versions,
+ 'APIVersion',
+ return_value=2.55):
+ result = self.cmd.take_action(parsed_args)
+ self.flavors_mock.update.assert_called_with(
+ flavor=self.flavor.id, description='description')
+ self.assertIsNone(result)
+
+ def test_flavor_set_description_api_older(self):
+ arglist = [
+ '--description', 'description',
+ self.flavor.id,
+ ]
+ verifylist = [
+ ('description', 'description'),
+ ('flavor', self.flavor.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.app.client_manager.compute.api_version = 2.54
+ with mock.patch.object(novaclient.api_versions,
+ 'APIVersion',
+ return_value=2.55):
+ self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+
class TestFlavorShow(TestFlavor):
@@ -633,6 +760,7 @@ class TestFlavorShow(TestFlavor):
'OS-FLV-DISABLED:disabled',
'OS-FLV-EXT-DATA:ephemeral',
'access_project_ids',
+ 'description',
'disk',
'id',
'name',
@@ -648,6 +776,7 @@ class TestFlavorShow(TestFlavor):
flavor.disabled,
flavor.ephemeral,
None,
+ flavor.description,
flavor.disk,
flavor.id,
flavor.name,
@@ -710,6 +839,7 @@ class TestFlavorShow(TestFlavor):
private_flavor.disabled,
private_flavor.ephemeral,
self.flavor_access.tenant_id,
+ private_flavor.description,
private_flavor.disk,
private_flavor.id,
private_flavor.name,
diff --git a/openstackclient/tests/unit/compute/v2/test_host.py b/openstackclient/tests/unit/compute/v2/test_host.py
index 329095de..244da413 100644
--- a/openstackclient/tests/unit/compute/v2/test_host.py
+++ b/openstackclient/tests/unit/compute/v2/test_host.py
@@ -111,8 +111,7 @@ class TestHostSet(TestHost):
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
- body = {}
- h_mock.assert_called_with(self.host['host'], body)
+ h_mock.assert_called_with(self.host['host'])
def test_host_set(self, h_mock):
h_mock.return_value = self.host
@@ -133,8 +132,8 @@ class TestHostSet(TestHost):
result = self.cmd.take_action(parsed_args)
self.assertIsNone(result)
- body = {'status': 'enable', 'maintenance_mode': 'disable'}
- h_mock.assert_called_with(self.host['host'], body)
+ h_mock.assert_called_with(self.host['host'], status='enable',
+ maintenance_mode='disable')
@mock.patch(
diff --git a/openstackclient/tests/unit/compute/v2/test_server.py b/openstackclient/tests/unit/compute/v2/test_server.py
index d242dc26..c30af8fb 100644
--- a/openstackclient/tests/unit/compute/v2/test_server.py
+++ b/openstackclient/tests/unit/compute/v2/test_server.py
@@ -18,6 +18,8 @@ import getpass
import mock
from mock import call
+from novaclient import api_versions
+from openstack import exceptions as sdk_exceptions
from osc_lib import exceptions
from osc_lib import utils as common_utils
from oslo_utils import timeutils
@@ -221,11 +223,11 @@ class TestServerAddFloatingIPNetwork(
self.network.ports = mock.Mock(return_value=[_port])
arglist = [
_server.id,
- _floating_ip['ip'],
+ _floating_ip['floating_ip_address'],
]
verifylist = [
('server', _server.id),
- ('ip_address', _floating_ip['ip']),
+ ('ip_address', _floating_ip['floating_ip_address']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -236,7 +238,7 @@ class TestServerAddFloatingIPNetwork(
}
self.network.find_ip.assert_called_once_with(
- _floating_ip['ip'],
+ _floating_ip['floating_ip_address'],
ignore_missing=False,
)
self.network.ports.assert_called_once_with(
@@ -247,6 +249,64 @@ class TestServerAddFloatingIPNetwork(
**attrs
)
+ def test_server_add_floating_ip_default_no_external_gateway(self,
+ success=False):
+ _server = compute_fakes.FakeServer.create_one_server()
+ self.servers_mock.get.return_value = _server
+ _port = network_fakes.FakePort.create_one_port()
+ _floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip()
+ self.network.find_ip = mock.Mock(return_value=_floating_ip)
+ return_value = [_port]
+ # In the success case, we'll have two ports, where the first port is
+ # not attached to an external gateway but the second port is.
+ if success:
+ return_value.append(_port)
+ self.network.ports = mock.Mock(return_value=return_value)
+ side_effect = [sdk_exceptions.NotFoundException()]
+ if success:
+ side_effect.append(None)
+ self.network.update_ip = mock.Mock(side_effect=side_effect)
+ arglist = [
+ _server.id,
+ _floating_ip['floating_ip_address'],
+ ]
+ verifylist = [
+ ('server', _server.id),
+ ('ip_address', _floating_ip['floating_ip_address']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ if success:
+ self.cmd.take_action(parsed_args)
+ else:
+ self.assertRaises(sdk_exceptions.NotFoundException,
+ self.cmd.take_action, parsed_args)
+
+ attrs = {
+ 'port_id': _port.id,
+ }
+
+ self.network.find_ip.assert_called_once_with(
+ _floating_ip['floating_ip_address'],
+ ignore_missing=False,
+ )
+ self.network.ports.assert_called_once_with(
+ device_id=_server.id,
+ )
+ if success:
+ self.assertEqual(2, self.network.update_ip.call_count)
+ calls = [mock.call(_floating_ip, **attrs)] * 2
+ self.network.update_ip.assert_has_calls(calls)
+ else:
+ self.network.update_ip.assert_called_once_with(
+ _floating_ip,
+ **attrs
+ )
+
+ def test_server_add_floating_ip_default_one_external_gateway(self):
+ self.test_server_add_floating_ip_default_no_external_gateway(
+ success=True)
+
def test_server_add_floating_ip_fixed(self):
_server = compute_fakes.FakeServer.create_one_server()
self.servers_mock.get.return_value = _server
@@ -254,26 +314,31 @@ class TestServerAddFloatingIPNetwork(
_floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip()
self.network.find_ip = mock.Mock(return_value=_floating_ip)
self.network.ports = mock.Mock(return_value=[_port])
+ # The user has specified a fixed ip that matches one of the ports
+ # already attached to the instance.
arglist = [
- '--fixed-ip-address', _floating_ip['fixed_ip'],
+ '--fixed-ip-address', _port.fixed_ips[0]['ip_address'],
_server.id,
- _floating_ip['ip'],
+ _floating_ip['floating_ip_address'],
]
verifylist = [
- ('fixed_ip_address', _floating_ip['fixed_ip']),
+ ('fixed_ip_address', _port.fixed_ips[0]['ip_address']),
('server', _server.id),
- ('ip_address', _floating_ip['ip']),
+ ('ip_address', _floating_ip['floating_ip_address']),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
+ # We expect the update_ip call to specify a new fixed_ip_address which
+ # will overwrite the floating ip's existing fixed_ip_address.
attrs = {
'port_id': _port.id,
+ 'fixed_ip_address': _port.fixed_ips[0]['ip_address'],
}
self.network.find_ip.assert_called_once_with(
- _floating_ip['ip'],
+ _floating_ip['floating_ip_address'],
ignore_missing=False,
)
self.network.ports.assert_called_once_with(
@@ -284,6 +349,40 @@ class TestServerAddFloatingIPNetwork(
**attrs
)
+ def test_server_add_floating_ip_fixed_no_port_found(self):
+ _server = compute_fakes.FakeServer.create_one_server()
+ self.servers_mock.get.return_value = _server
+ _port = network_fakes.FakePort.create_one_port()
+ _floating_ip = network_fakes.FakeFloatingIP.create_one_floating_ip()
+ self.network.find_ip = mock.Mock(return_value=_floating_ip)
+ self.network.ports = mock.Mock(return_value=[_port])
+ # The user has specified a fixed ip that does not match any of the
+ # ports already attached to the instance.
+ nonexistent_ip = '10.0.0.9'
+ arglist = [
+ '--fixed-ip-address', nonexistent_ip,
+ _server.id,
+ _floating_ip['floating_ip_address'],
+ ]
+ verifylist = [
+ ('fixed_ip_address', nonexistent_ip),
+ ('server', _server.id),
+ ('ip_address', _floating_ip['floating_ip_address']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+
+ self.network.find_ip.assert_called_once_with(
+ _floating_ip['floating_ip_address'],
+ ignore_missing=False,
+ )
+ self.network.ports.assert_called_once_with(
+ device_id=_server.id,
+ )
+ self.network.update_ip.assert_not_called()
+
class TestServerAddPort(TestServer):
@@ -844,6 +943,55 @@ class TestServerCreate(TestServer):
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist(), data)
+ def test_server_create_with_auto_network_default_v2_37(self):
+ """Tests creating a server without specifying --nic using 2.37."""
+ arglist = [
+ '--image', 'image1',
+ '--flavor', 'flavor1',
+ self.new_server.name,
+ ]
+ verifylist = [
+ ('image', 'image1'),
+ ('flavor', 'flavor1'),
+ ('config_drive', False),
+ ('server_name', self.new_server.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ # Since check_parser doesn't handle compute global options like
+ # --os-compute-api-version, we have to mock the construction of
+ # the novaclient client object with our own APIVersion.
+ with mock.patch.object(self.app.client_manager.compute, 'api_version',
+ api_versions.APIVersion('2.37')):
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Set expected values
+ kwargs = dict(
+ meta=None,
+ files={},
+ reservation_id=None,
+ min_count=1,
+ max_count=1,
+ security_groups=[],
+ userdata=None,
+ key_name=None,
+ availability_zone=None,
+ block_device_mapping_v2=[],
+ nics='auto',
+ scheduler_hints={},
+ config_drive=None,
+ )
+ # ServerManager.create(name, image, flavor, **kwargs)
+ self.servers_mock.create.assert_called_with(
+ self.new_server.name,
+ self.image,
+ self.flavor,
+ **kwargs
+ )
+
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.datalist(), data)
+
def test_server_create_with_none_network(self):
arglist = [
'--image', 'image1',
@@ -1938,12 +2086,37 @@ class TestServerList(TestServer):
('all_projects', False),
('long', False),
('deleted', False),
+ ('name_lookup_one_by_one', False),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.images_mock.list.assert_called()
+ self.flavors_mock.list.assert_called()
+ # we did not pass image or flavor, so gets on those must be absent
+ self.assertFalse(self.flavors_mock.get.call_count)
+ self.assertFalse(self.images_mock.get.call_count)
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(tuple(self.data), tuple(data))
+
+ def test_server_list_no_servers(self):
+ arglist = []
+ verifylist = [
+ ('all_projects', False),
+ ('long', False),
+ ('deleted', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.servers_mock.list.return_value = []
+ self.data = ()
columns, data = self.cmd.take_action(parsed_args)
self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.assertEqual(0, self.images_mock.list.call_count)
+ self.assertEqual(0, self.flavors_mock.list.call_count)
self.assertEqual(self.columns, columns)
self.assertEqual(tuple(self.data), tuple(data))
@@ -1995,6 +2168,28 @@ class TestServerList(TestServer):
self.assertEqual(self.columns, columns)
self.assertEqual(tuple(self.data_no_name_lookup), tuple(data))
+ def test_server_list_name_lookup_one_by_one(self):
+ arglist = [
+ '--name-lookup-one-by-one'
+ ]
+ verifylist = [
+ ('all_projects', False),
+ ('no_name_lookup', False),
+ ('name_lookup_one_by_one', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.servers_mock.list.assert_called_with(**self.kwargs)
+ self.assertFalse(self.images_mock.list.call_count)
+ self.assertFalse(self.flavors_mock.list.call_count)
+ self.images_mock.get.assert_called()
+ self.flavors_mock.get.assert_called()
+
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(tuple(self.data), tuple(data))
+
def test_server_list_with_image(self):
arglist = [
@@ -2027,7 +2222,7 @@ class TestServerList(TestServer):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.flavors_mock.get.assert_called_with(self.flavor.id)
+ self.flavors_mock.get.has_calls(self.flavor.id)
self.search_opts['flavor'] = self.flavor.id
self.servers_mock.list.assert_called_with(**self.kwargs)
@@ -2077,6 +2272,50 @@ class TestServerList(TestServer):
'Invalid time value'
)
+ def test_server_list_v269_with_partial_constructs(self):
+ self.app.client_manager.compute.api_version = \
+ api_versions.APIVersion('2.69')
+ arglist = []
+ verifylist = []
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ # include "partial results" from non-responsive part of
+ # infrastructure.
+ server_dict = {
+ "id": "server-id-95a56bfc4xxxxxx28d7e418bfd97813a",
+ "status": "UNKNOWN",
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "created": "2018-12-03T21:06:18Z",
+ "links": [
+ {
+ "href": "http://fake/v2.1/",
+ "rel": "self"
+ },
+ {
+ "href": "http://fake",
+ "rel": "bookmark"
+ }
+ ],
+ # We need to pass networks as {} because its defined as a property
+ # of the novaclient Server class which gives {} by default. If not
+ # it will fail at formatting the networks info later on.
+ "networks": {}
+ }
+ server = compute_fakes.fakes.FakeResource(
+ info=server_dict,
+ )
+ self.servers.append(server)
+ columns, data = self.cmd.take_action(parsed_args)
+ # get the first three servers out since our interest is in the partial
+ # server.
+ next(data)
+ next(data)
+ next(data)
+ partial_server = next(data)
+ expected_row = (
+ 'server-id-95a56bfc4xxxxxx28d7e418bfd97813a', '',
+ 'UNKNOWN', '', '', '')
+ self.assertEqual(expected_row, partial_server)
+
class TestServerLock(TestServer):
@@ -2188,6 +2427,9 @@ class TestServerMigrate(TestServer):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.app.client_manager.compute.api_version = \
+ api_versions.APIVersion('2.24')
+
result = self.cmd.take_action(parsed_args)
self.servers_mock.get.assert_called_with(self.server.id)
@@ -2209,6 +2451,9 @@ class TestServerMigrate(TestServer):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.app.client_manager.compute.api_version = \
+ api_versions.APIVersion('2.24')
+
result = self.cmd.take_action(parsed_args)
self.servers_mock.get.assert_called_with(self.server.id)
@@ -2230,6 +2475,9 @@ class TestServerMigrate(TestServer):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.app.client_manager.compute.api_version = \
+ api_versions.APIVersion('2.24')
+
result = self.cmd.take_action(parsed_args)
self.servers_mock.get.assert_called_with(self.server.id)
@@ -2252,6 +2500,9 @@ class TestServerMigrate(TestServer):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.app.client_manager.compute.api_version = \
+ api_versions.APIVersion('2.24')
+
result = self.cmd.take_action(parsed_args)
self.servers_mock.get.assert_called_with(self.server.id)
@@ -2261,6 +2512,28 @@ class TestServerMigrate(TestServer):
self.assertNotCalled(self.servers_mock.migrate)
self.assertIsNone(result)
+ def test_server_live_migrate_225(self):
+ arglist = [
+ '--live', 'fakehost', self.server.id,
+ ]
+ verifylist = [
+ ('live', 'fakehost'),
+ ('block_migration', False),
+ ('wait', False),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.app.client_manager.compute.api_version = \
+ api_versions.APIVersion('2.25')
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.servers_mock.get.assert_called_with(self.server.id)
+ self.server.live_migrate.assert_called_with(block_migration=False,
+ host='fakehost')
+ self.assertNotCalled(self.servers_mock.migrate)
+ self.assertIsNone(result)
+
@mock.patch.object(common_utils, 'wait_for_status', return_value=True)
def test_server_migrate_with_wait(self, mock_wait_for_status):
arglist = [
@@ -2331,17 +2604,17 @@ class TestServerRebuild(TestServer):
self.images_mock.get.return_value = self.image
# Fake the rebuilt new server.
- new_server = compute_fakes.FakeServer.create_one_server()
-
- # Fake the server to be rebuilt. The IDs of them should be the same.
attrs = {
- 'id': new_server.id,
'image': {
'id': self.image.id
},
'networks': {},
'adminPass': 'passw0rd',
}
+ new_server = compute_fakes.FakeServer.create_one_server(attrs=attrs)
+
+ # Fake the server to be rebuilt. The IDs of them should be the same.
+ attrs['id'] = new_server.id
methods = {
'rebuild': new_server,
}
@@ -2442,6 +2715,117 @@ class TestServerRebuild(TestServer):
self.images_mock.get.assert_called_with(self.image.id)
self.server.rebuild.assert_called_with(self.image, None)
+ def test_rebuild_with_property(self):
+ arglist = [
+ self.server.id,
+ '--property', 'key1=value1',
+ '--property', 'key2=value2'
+ ]
+ expected_property = {'key1': 'value1', 'key2': 'value2'}
+ verifylist = [
+ ('server', self.server.id),
+ ('property', expected_property)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ # Get the command object to test
+ self.cmd.take_action(parsed_args)
+
+ self.servers_mock.get.assert_called_with(self.server.id)
+ self.images_mock.get.assert_called_with(self.image.id)
+ self.server.rebuild.assert_called_with(
+ self.image, None, meta=expected_property)
+
+ def test_rebuild_with_keypair_name(self):
+ self.server.key_name = 'mykey'
+ arglist = [
+ self.server.id,
+ '--key-name', self.server.key_name,
+ ]
+ verifylist = [
+ ('server', self.server.id),
+ ('key_name', self.server.key_name)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.app.client_manager.compute.api_version = 2.54
+ with mock.patch.object(api_versions,
+ 'APIVersion',
+ return_value=2.54):
+ self.cmd.take_action(parsed_args)
+ args = (
+ self.image,
+ None,
+ )
+ kwargs = dict(
+ key_name=self.server.key_name,
+ )
+ self.servers_mock.get.assert_called_with(self.server.id)
+ self.images_mock.get.assert_called_with(self.image.id)
+ self.server.rebuild.assert_called_with(*args, **kwargs)
+
+ def test_rebuild_with_keypair_name_older_version(self):
+ self.server.key_name = 'mykey'
+ arglist = [
+ self.server.id,
+ '--key-name', self.server.key_name,
+ ]
+ verifylist = [
+ ('server', self.server.id),
+ ('key_name', self.server.key_name)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.app.client_manager.compute.api_version = 2.53
+ with mock.patch.object(api_versions,
+ 'APIVersion',
+ return_value=2.54):
+ self.assertRaises(exceptions.CommandError,
+ self.cmd.take_action,
+ parsed_args)
+
+ def test_rebuild_with_keypair_unset(self):
+ self.server.key_name = 'mykey'
+ arglist = [
+ self.server.id,
+ '--key-unset',
+ ]
+ verifylist = [
+ ('server', self.server.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.app.client_manager.compute.api_version = 2.54
+ with mock.patch.object(api_versions,
+ 'APIVersion',
+ return_value=2.54):
+ self.cmd.take_action(parsed_args)
+ args = (
+ self.image,
+ None,
+ )
+ kwargs = dict(
+ key_name=None,
+ )
+ self.servers_mock.get.assert_called_with(self.server.id)
+ self.images_mock.get.assert_called_with(self.image.id)
+ self.server.rebuild.assert_called_with(*args, **kwargs)
+
+ def test_rebuild_with_key_name_and_unset(self):
+ self.server.key_name = 'mykey'
+ arglist = [
+ self.server.id,
+ '--key-name', self.server.key_name,
+ '--key-unset',
+ ]
+ verifylist = [
+ ('server', self.server.id),
+ ('key_name', self.server.key_name)
+ ]
+ self.assertRaises(utils.ParserException,
+ self.check_parser,
+ self.cmd, arglist, verifylist)
+
class TestServerRemoveFixedIP(TestServer):
@@ -3217,6 +3601,33 @@ class TestServerShow(TestServer):
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
+ def test_show_embedded_flavor(self):
+ # Tests using --os-compute-api-version >= 2.47 where the flavor
+ # details are embedded in the server response body excluding the id.
+ arglist = [
+ self.server.name,
+ ]
+ verifylist = [
+ ('diagnostics', False),
+ ('server', self.server.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.server.info['flavor'] = {
+ 'ephemeral': 0,
+ 'ram': 512,
+ 'original_name': 'm1.tiny',
+ 'vcpus': 1,
+ 'extra_specs': {},
+ 'swap': 0,
+ 'disk': 1
+ }
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertEqual(self.columns, columns)
+ # Since the flavor details are in a dict we can't be sure of the
+ # ordering so just assert that one of the keys is in the output.
+ self.assertIn('original_name', data[2])
+
def test_show_diagnostics(self):
arglist = [
'--diagnostics',
diff --git a/openstackclient/tests/unit/compute/v2/test_service.py b/openstackclient/tests/unit/compute/v2/test_service.py
index 8403efc9..bd299123 100644
--- a/openstackclient/tests/unit/compute/v2/test_service.py
+++ b/openstackclient/tests/unit/compute/v2/test_service.py
@@ -15,7 +15,7 @@
import mock
from mock import call
-
+from novaclient import api_versions
from osc_lib import exceptions
from openstackclient.compute.v2 import service
@@ -340,6 +340,8 @@ class TestServiceSet(TestService):
('service', self.service.binary),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.app.client_manager.compute.api_version = api_versions.APIVersion(
+ '2.11')
result = self.cmd.take_action(parsed_args)
self.service_mock.force_down.assert_called_once_with(
self.service.host, self.service.binary, force_down=False)
@@ -359,6 +361,8 @@ class TestServiceSet(TestService):
('service', self.service.binary),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.app.client_manager.compute.api_version = api_versions.APIVersion(
+ '2.11')
result = self.cmd.take_action(parsed_args)
self.service_mock.force_down.assert_called_once_with(
self.service.host, self.service.binary, force_down=True)
@@ -380,6 +384,8 @@ class TestServiceSet(TestService):
('service', self.service.binary),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.app.client_manager.compute.api_version = api_versions.APIVersion(
+ '2.11')
result = self.cmd.take_action(parsed_args)
self.service_mock.enable.assert_called_once_with(
self.service.host, self.service.binary)
@@ -402,6 +408,8 @@ class TestServiceSet(TestService):
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.app.client_manager.compute.api_version = api_versions.APIVersion(
+ '2.11')
with mock.patch.object(self.service_mock, 'enable',
side_effect=Exception()):
self.assertRaises(exceptions.CommandError,
diff --git a/openstackclient/tests/unit/compute/v2/test_usage.py b/openstackclient/tests/unit/compute/v2/test_usage.py
index a7aa1374..76dcc963 100644
--- a/openstackclient/tests/unit/compute/v2/test_usage.py
+++ b/openstackclient/tests/unit/compute/v2/test_usage.py
@@ -14,6 +14,7 @@
import datetime
import mock
+from novaclient import api_versions
from openstackclient.compute.v2 import usage
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
@@ -104,6 +105,31 @@ class TestUsageList(TestUsage):
self.assertEqual(self.columns, columns)
self.assertEqual(tuple(self.data), tuple(data))
+ def test_usage_list_with_pagination(self):
+ arglist = []
+ verifylist = [
+ ('start', None),
+ ('end', None),
+ ]
+
+ self.app.client_manager.compute.api_version = api_versions.APIVersion(
+ '2.40')
+ self.usage_mock.list.reset_mock()
+ self.usage_mock.list.side_effect = [self.usages, []]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.projects_mock.list.assert_called_with()
+ self.usage_mock.list.assert_has_calls([
+ mock.call(mock.ANY, mock.ANY, detailed=True),
+ mock.call(mock.ANY, mock.ANY, detailed=True,
+ marker=self.usages[0]['server_usages'][0]['instance_id'])
+ ])
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(tuple(self.data), tuple(data))
+
class TestUsageShow(TestUsage):
diff --git a/openstackclient/tests/unit/fakes.py b/openstackclient/tests/unit/fakes.py
index 954973ef..bca457e4 100644
--- a/openstackclient/tests/unit/fakes.py
+++ b/openstackclient/tests/unit/fakes.py
@@ -106,6 +106,7 @@ class FakeApp(object):
def __init__(self, _stdout, _log):
self.stdout = _stdout
self.client_manager = None
+ self.api_version = {}
self.stdin = sys.stdin
self.stdout = _stdout or sys.stdout
self.stderr = sys.stderr
diff --git a/openstackclient/tests/unit/identity/v3/fakes.py b/openstackclient/tests/unit/identity/v3/fakes.py
index 77928792..27ee9fd0 100644
--- a/openstackclient/tests/unit/identity/v3/fakes.py
+++ b/openstackclient/tests/unit/identity/v3/fakes.py
@@ -486,6 +486,50 @@ APP_CRED_OPTIONS = {
'secret': app_cred_secret
}
+registered_limit_id = 'registered-limit-id'
+registered_limit_default_limit = 10
+registered_limit_description = 'default limit of foobars'
+registered_limit_resource_name = 'foobars'
+REGISTERED_LIMIT = {
+ 'id': registered_limit_id,
+ 'default_limit': registered_limit_default_limit,
+ 'resource_name': registered_limit_resource_name,
+ 'service_id': service_id,
+ 'description': None,
+ 'region_id': None
+}
+REGISTERED_LIMIT_OPTIONS = {
+ 'id': registered_limit_id,
+ 'default_limit': registered_limit_default_limit,
+ 'resource_name': registered_limit_resource_name,
+ 'service_id': service_id,
+ 'description': registered_limit_description,
+ 'region_id': region_id
+}
+
+limit_id = 'limit-id'
+limit_resource_limit = 15
+limit_description = 'limit of foobars'
+limit_resource_name = 'foobars'
+LIMIT = {
+ 'id': limit_id,
+ 'project_id': project_id,
+ 'resource_limit': limit_resource_limit,
+ 'resource_name': limit_resource_name,
+ 'service_id': service_id,
+ 'description': None,
+ 'region_id': None
+}
+LIMIT_OPTIONS = {
+ 'id': limit_id,
+ 'project_id': project_id,
+ 'resource_limit': limit_resource_limit,
+ 'resource_name': limit_resource_name,
+ 'service_id': service_id,
+ 'description': limit_description,
+ 'region_id': region_id
+}
+
def fake_auth_ref(fake_token, fake_service=None):
"""Create an auth_ref using keystoneauth's fixtures"""
@@ -576,6 +620,12 @@ class FakeIdentityv3Client(object):
self.application_credentials = mock.Mock()
self.application_credentials.resource_class = fakes.FakeResource(None,
{})
+ self.inference_rules = mock.Mock()
+ self.inference_rules.resource_class = fakes.FakeResource(None, {})
+ self.registered_limits = mock.Mock()
+ self.registered_limits.resource_class = fakes.FakeResource(None, {})
+ self.limits = mock.Mock()
+ self.limits.resource_class = fakes.FakeResource(None, {})
class FakeFederationManager(object):
diff --git a/openstackclient/tests/unit/identity/v3/test_endpoint.py b/openstackclient/tests/unit/identity/v3/test_endpoint.py
index bfe930d6..62dcf58d 100644
--- a/openstackclient/tests/unit/identity/v3/test_endpoint.py
+++ b/openstackclient/tests/unit/identity/v3/test_endpoint.py
@@ -439,6 +439,47 @@ class TestEndpointList(TestEndpoint):
)
self.assertEqual(datalist, tuple(data))
+ def test_endpoint_list_project_with_project_domain(self):
+ project = identity_fakes.FakeProject.create_one_project()
+ domain = identity_fakes.FakeDomain.create_one_domain()
+
+ self.ep_filter_mock.list_endpoints_for_project.return_value = [
+ self.endpoint
+ ]
+ self.projects_mock.get.return_value = project
+
+ arglist = [
+ '--project', project.name,
+ '--project-domain', domain.name
+ ]
+ verifylist = [
+ ('project', project.name),
+ ('project_domain', domain.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ # In base command class Lister in cliff, abstract method take_action()
+ # returns a tuple containing the column names and an iterable
+ # containing the data to be listed.
+ columns, data = self.cmd.take_action(parsed_args)
+ self.ep_filter_mock.list_endpoints_for_project.assert_called_with(
+ project=project.id
+ )
+
+ self.assertEqual(self.columns, columns)
+ datalist = (
+ (
+ self.endpoint.id,
+ self.endpoint.region,
+ self.service.name,
+ self.service.type,
+ True,
+ self.endpoint.interface,
+ self.endpoint.url,
+ ),
+ )
+ self.assertEqual(datalist, tuple(data))
+
class TestEndpointSet(TestEndpoint):
diff --git a/openstackclient/tests/unit/identity/v3/test_implied_role.py b/openstackclient/tests/unit/identity/v3/test_implied_role.py
index 08273f73..74968129 100644
--- a/openstackclient/tests/unit/identity/v3/test_implied_role.py
+++ b/openstackclient/tests/unit/identity/v3/test_implied_role.py
@@ -25,26 +25,32 @@ class TestRole(identity_fakes.TestIdentityv3):
def setUp(self):
super(TestRole, self).setUp()
+ identity_client = self.app.client_manager.identity
+
# Get a shortcut to the UserManager Mock
- self.users_mock = self.app.client_manager.identity.users
+ self.users_mock = identity_client.users
self.users_mock.reset_mock()
# Get a shortcut to the UserManager Mock
- self.groups_mock = self.app.client_manager.identity.groups
+ self.groups_mock = identity_client.groups
self.groups_mock.reset_mock()
# Get a shortcut to the DomainManager Mock
- self.domains_mock = self.app.client_manager.identity.domains
+ self.domains_mock = identity_client.domains
self.domains_mock.reset_mock()
# Get a shortcut to the ProjectManager Mock
- self.projects_mock = self.app.client_manager.identity.projects
+ self.projects_mock = identity_client.projects
self.projects_mock.reset_mock()
# Get a shortcut to the RoleManager Mock
- self.roles_mock = self.app.client_manager.identity.roles
+ self.roles_mock = identity_client.roles
self.roles_mock.reset_mock()
+ # Get a shortcut to the InferenceRuleManager Mock
+ self.inference_rules_mock = identity_client.inference_rules
+ self.inference_rules_mock.reset_mock()
+
def _is_inheritance_testcase(self):
return False
@@ -67,12 +73,13 @@ class TestImpliedRoleCreate(TestRole):
),
]
- self.roles_mock.create_implied.return_value = fakes.FakeResource(
+ fake_resource = fakes.FakeResource(
None,
{'prior_role': copy.deepcopy(identity_fakes.ROLES[0]),
'implied': copy.deepcopy(identity_fakes.ROLES[1]), },
loaded=True,
)
+ self.inference_rules_mock.create.return_value = fake_resource
self.cmd = implied_role.CreateImpliedRole(self.app, None)
@@ -93,8 +100,8 @@ class TestImpliedRoleCreate(TestRole):
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
- # RoleManager.create_implied(prior, implied)
- self.roles_mock.create_implied.assert_called_with(
+ # InferenceRuleManager.create(prior, implied)
+ self.inference_rules_mock.create.assert_called_with(
identity_fakes.ROLES[0]['id'],
identity_fakes.ROLES[1]['id']
)
@@ -126,12 +133,13 @@ class TestImpliedRoleDelete(TestRole):
),
]
- self.roles_mock.delete_implied.return_value = fakes.FakeResource(
+ fake_resource = fakes.FakeResource(
None,
{'prior-role': copy.deepcopy(identity_fakes.ROLES[0]),
'implied': copy.deepcopy(identity_fakes.ROLES[1]), },
loaded=True,
)
+ self.inference_rules_mock.delete.return_value = fake_resource
self.cmd = implied_role.DeleteImpliedRole(self.app, None)
@@ -147,7 +155,7 @@ class TestImpliedRoleDelete(TestRole):
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
- self.roles_mock.delete_implied.assert_called_with(
+ self.inference_rules_mock.delete.assert_called_with(
identity_fakes.ROLES[0]['id'],
identity_fakes.ROLES[1]['id']
)
@@ -158,7 +166,7 @@ class TestImpliedRoleList(TestRole):
def setUp(self):
super(TestImpliedRoleList, self).setUp()
- self.roles_mock.list_inference_roles.return_value = (
+ self.inference_rules_mock.list_inference_roles.return_value = (
identity_fakes.FakeImpliedRoleResponse.create_list())
self.cmd = implied_role.ListImpliedRole(self.app, None)
@@ -168,7 +176,7 @@ class TestImpliedRoleList(TestRole):
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
- self.roles_mock.list_inference_roles.assert_called_with()
+ self.inference_rules_mock.list_inference_roles.assert_called_with()
collist = ['Prior Role ID', 'Prior Role Name',
'Implied Role ID', 'Implied Role Name']
diff --git a/openstackclient/tests/unit/identity/v3/test_limit.py b/openstackclient/tests/unit/identity/v3/test_limit.py
new file mode 100644
index 00000000..e5cd87b8
--- /dev/null
+++ b/openstackclient/tests/unit/identity/v3/test_limit.py
@@ -0,0 +1,383 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from keystoneauth1.exceptions import http as ksa_exceptions
+from osc_lib import exceptions
+
+from openstackclient.identity.v3 import limit
+from openstackclient.tests.unit import fakes
+from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
+
+
+class TestLimit(identity_fakes.TestIdentityv3):
+
+ def setUp(self):
+ super(TestLimit, self).setUp()
+
+ identity_manager = self.app.client_manager.identity
+
+ self.limit_mock = identity_manager.limits
+
+ self.services_mock = identity_manager.services
+ self.services_mock.reset_mock()
+
+ self.projects_mock = identity_manager.projects
+ self.projects_mock.reset_mock()
+
+ self.regions_mock = identity_manager.regions
+ self.regions_mock.reset_mock()
+
+
+class TestLimitCreate(TestLimit):
+
+ def setUp(self):
+ super(TestLimitCreate, self).setUp()
+
+ self.service = fakes.FakeResource(
+ None,
+ copy.deepcopy(identity_fakes.SERVICE),
+ loaded=True
+ )
+ self.services_mock.get.return_value = self.service
+
+ self.project = fakes.FakeResource(
+ None,
+ copy.deepcopy(identity_fakes.PROJECT),
+ loaded=True
+ )
+ self.projects_mock.get.return_value = self.project
+
+ self.region = fakes.FakeResource(
+ None,
+ copy.deepcopy(identity_fakes.REGION),
+ loaded=True
+ )
+ self.regions_mock.get.return_value = self.region
+
+ self.cmd = limit.CreateLimit(self.app, None)
+
+ def test_limit_create_without_options(self):
+ self.limit_mock.create.return_value = fakes.FakeResource(
+ None,
+ copy.deepcopy(identity_fakes.LIMIT),
+ loaded=True
+ )
+
+ resource_limit = 15
+ arglist = [
+ '--project', identity_fakes.project_id,
+ '--service', identity_fakes.service_id,
+ '--resource-limit', str(resource_limit),
+ identity_fakes.limit_resource_name
+ ]
+ verifylist = [
+ ('project', identity_fakes.project_id),
+ ('service', identity_fakes.service_id),
+ ('resource_name', identity_fakes.limit_resource_name),
+ ('resource_limit', resource_limit)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ kwargs = {'description': None, 'region': None}
+ self.limit_mock.create.assert_called_with(
+ self.project,
+ self.service,
+ identity_fakes.limit_resource_name,
+ resource_limit,
+ **kwargs
+ )
+
+ collist = ('description', 'id', 'project_id', 'region_id',
+ 'resource_limit', 'resource_name', 'service_id')
+ self.assertEqual(collist, columns)
+ datalist = (
+ None,
+ identity_fakes.limit_id,
+ identity_fakes.project_id,
+ None,
+ resource_limit,
+ identity_fakes.limit_resource_name,
+ identity_fakes.service_id
+ )
+ self.assertEqual(datalist, data)
+
+ def test_limit_create_with_options(self):
+ self.limit_mock.create.return_value = fakes.FakeResource(
+ None,
+ copy.deepcopy(identity_fakes.LIMIT_OPTIONS),
+ loaded=True
+ )
+
+ resource_limit = 15
+ arglist = [
+ '--project', identity_fakes.project_id,
+ '--service', identity_fakes.service_id,
+ '--resource-limit', str(resource_limit),
+ '--region', identity_fakes.region_id,
+ '--description', identity_fakes.limit_description,
+ identity_fakes.limit_resource_name
+ ]
+ verifylist = [
+ ('project', identity_fakes.project_id),
+ ('service', identity_fakes.service_id),
+ ('resource_name', identity_fakes.limit_resource_name),
+ ('resource_limit', resource_limit),
+ ('region', identity_fakes.region_id),
+ ('description', identity_fakes.limit_description)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ kwargs = {
+ 'description': identity_fakes.limit_description,
+ 'region': self.region
+ }
+ self.limit_mock.create.assert_called_with(
+ self.project,
+ self.service,
+ identity_fakes.limit_resource_name,
+ resource_limit,
+ **kwargs
+ )
+
+ collist = ('description', 'id', 'project_id', 'region_id',
+ 'resource_limit', 'resource_name', 'service_id')
+ self.assertEqual(collist, columns)
+ datalist = (
+ identity_fakes.limit_description,
+ identity_fakes.limit_id,
+ identity_fakes.project_id,
+ identity_fakes.region_id,
+ resource_limit,
+ identity_fakes.limit_resource_name,
+ identity_fakes.service_id
+ )
+ self.assertEqual(datalist, data)
+
+
+class TestLimitDelete(TestLimit):
+
+ def setUp(self):
+ super(TestLimitDelete, self).setUp()
+ self.cmd = limit.DeleteLimit(self.app, None)
+
+ def test_limit_delete(self):
+ self.limit_mock.delete.return_value = None
+
+ arglist = [identity_fakes.limit_id]
+ verifylist = [
+ ('limit_id', [identity_fakes.limit_id])
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.limit_mock.delete.assert_called_with(
+ identity_fakes.limit_id
+ )
+ self.assertIsNone(result)
+
+ def test_limit_delete_with_exception(self):
+ return_value = ksa_exceptions.NotFound()
+ self.limit_mock.delete.side_effect = return_value
+
+ arglist = ['fake-limit-id']
+ verifylist = [
+ ('limit_id', ['fake-limit-id'])
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ try:
+ self.cmd.take_action(parsed_args)
+ self.fail('CommandError should be raised.')
+ except exceptions.CommandError as e:
+ self.assertEqual(
+ '1 of 1 limits failed to delete.', str(e)
+ )
+
+
+class TestLimitShow(TestLimit):
+
+ def setUp(self):
+ super(TestLimitShow, self).setUp()
+
+ self.limit_mock.get.return_value = fakes.FakeResource(
+ None,
+ copy.deepcopy(identity_fakes.LIMIT),
+ loaded=True
+ )
+
+ self.cmd = limit.ShowLimit(self.app, None)
+
+ def test_limit_show(self):
+ arglist = [identity_fakes.limit_id]
+ verifylist = [('limit_id', identity_fakes.limit_id)]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.limit_mock.get.assert_called_with(identity_fakes.limit_id)
+
+ collist = (
+ 'description', 'id', 'project_id', 'region_id', 'resource_limit',
+ 'resource_name', 'service_id'
+ )
+ self.assertEqual(collist, columns)
+ datalist = (
+ None,
+ identity_fakes.limit_id,
+ identity_fakes.project_id,
+ None,
+ identity_fakes.limit_resource_limit,
+ identity_fakes.limit_resource_name,
+ identity_fakes.service_id
+ )
+ self.assertEqual(datalist, data)
+
+
+class TestLimitSet(TestLimit):
+
+ def setUp(self):
+ super(TestLimitSet, self).setUp()
+ self.cmd = limit.SetLimit(self.app, None)
+
+ def test_limit_set_description(self):
+ limit = copy.deepcopy(identity_fakes.LIMIT)
+ limit['description'] = identity_fakes.limit_description
+ self.limit_mock.update.return_value = fakes.FakeResource(
+ None, limit, loaded=True
+ )
+
+ arglist = [
+ '--description', identity_fakes.limit_description,
+ identity_fakes.limit_id
+ ]
+ verifylist = [
+ ('description', identity_fakes.limit_description),
+ ('limit_id', identity_fakes.limit_id)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.limit_mock.update.assert_called_with(
+ identity_fakes.limit_id,
+ description=identity_fakes.limit_description,
+ resource_limit=None
+ )
+
+ collist = (
+ 'description', 'id', 'project_id', 'region_id', 'resource_limit',
+ 'resource_name', 'service_id'
+ )
+ self.assertEqual(collist, columns)
+ datalist = (
+ identity_fakes.limit_description,
+ identity_fakes.limit_id,
+ identity_fakes.project_id,
+ None,
+ identity_fakes.limit_resource_limit,
+ identity_fakes.limit_resource_name,
+ identity_fakes.service_id
+ )
+ self.assertEqual(datalist, data)
+
+ def test_limit_set_resource_limit(self):
+ resource_limit = 20
+ limit = copy.deepcopy(identity_fakes.LIMIT)
+ limit['resource_limit'] = resource_limit
+ self.limit_mock.update.return_value = fakes.FakeResource(
+ None, limit, loaded=True
+ )
+
+ arglist = [
+ '--resource-limit', str(resource_limit),
+ identity_fakes.limit_id
+ ]
+ verifylist = [
+ ('resource_limit', resource_limit),
+ ('limit_id', identity_fakes.limit_id)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.limit_mock.update.assert_called_with(
+ identity_fakes.limit_id,
+ description=None,
+ resource_limit=resource_limit
+ )
+
+ collist = (
+ 'description', 'id', 'project_id', 'region_id', 'resource_limit',
+ 'resource_name', 'service_id'
+ )
+ self.assertEqual(collist, columns)
+ datalist = (
+ None,
+ identity_fakes.limit_id,
+ identity_fakes.project_id,
+ None,
+ resource_limit,
+ identity_fakes.limit_resource_name,
+ identity_fakes.service_id
+ )
+ self.assertEqual(datalist, data)
+
+
+class TestLimitList(TestLimit):
+
+ def setUp(self):
+ super(TestLimitList, self).setUp()
+
+ self.limit_mock.list.return_value = [
+ fakes.FakeResource(
+ None,
+ copy.deepcopy(identity_fakes.LIMIT),
+ loaded=True
+ )
+ ]
+
+ self.cmd = limit.ListLimit(self.app, None)
+
+ def test_limit_list(self):
+ arglist = []
+ verifylist = []
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.limit_mock.list.assert_called_with(
+ service=None, resource_name=None, region=None,
+ project=None
+ )
+
+ collist = (
+ 'ID', 'Project ID', 'Service ID', 'Resource Name',
+ 'Resource Limit', 'Description', 'Region ID'
+ )
+ self.assertEqual(collist, columns)
+ datalist = ((
+ identity_fakes.limit_id,
+ identity_fakes.project_id,
+ identity_fakes.service_id,
+ identity_fakes.limit_resource_name,
+ identity_fakes.limit_resource_limit,
+ None,
+ None
+ ), )
+ self.assertEqual(datalist, tuple(data))
diff --git a/openstackclient/tests/unit/identity/v3/test_registered_limit.py b/openstackclient/tests/unit/identity/v3/test_registered_limit.py
new file mode 100644
index 00000000..262ca4f9
--- /dev/null
+++ b/openstackclient/tests/unit/identity/v3/test_registered_limit.py
@@ -0,0 +1,510 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from keystoneauth1.exceptions import http as ksa_exceptions
+from osc_lib import exceptions
+
+from openstackclient.identity.v3 import registered_limit
+from openstackclient.tests.unit import fakes
+from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
+
+
+class TestRegisteredLimit(identity_fakes.TestIdentityv3):
+
+ def setUp(self):
+ super(TestRegisteredLimit, self).setUp()
+
+ identity_manager = self.app.client_manager.identity
+ self.registered_limit_mock = identity_manager.registered_limits
+
+ self.services_mock = identity_manager.services
+ self.services_mock.reset_mock()
+
+ self.regions_mock = identity_manager.regions
+ self.regions_mock.reset_mock()
+
+
+class TestRegisteredLimitCreate(TestRegisteredLimit):
+
+ def setUp(self):
+ super(TestRegisteredLimitCreate, self).setUp()
+
+ self.service = fakes.FakeResource(
+ None,
+ copy.deepcopy(identity_fakes.SERVICE),
+ loaded=True
+ )
+ self.services_mock.get.return_value = self.service
+
+ self.region = fakes.FakeResource(
+ None,
+ copy.deepcopy(identity_fakes.REGION),
+ loaded=True
+ )
+ self.regions_mock.get.return_value = self.region
+
+ self.cmd = registered_limit.CreateRegisteredLimit(self.app, None)
+
+ def test_registered_limit_create_without_options(self):
+ self.registered_limit_mock.create.return_value = fakes.FakeResource(
+ None,
+ copy.deepcopy(identity_fakes.REGISTERED_LIMIT),
+ loaded=True
+ )
+
+ resource_name = identity_fakes.registered_limit_resource_name
+ default_limit = identity_fakes.registered_limit_default_limit
+ arglist = [
+ '--service', identity_fakes.service_id,
+ '--default-limit', '10',
+ resource_name,
+ ]
+
+ verifylist = [
+ ('service', identity_fakes.service_id),
+ ('default_limit', default_limit),
+ ('resource_name', resource_name)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ kwargs = {'description': None, 'region': None}
+ self.registered_limit_mock.create.assert_called_with(
+ self.service, resource_name, default_limit, **kwargs
+ )
+
+ collist = ('default_limit', 'description', 'id', 'region_id',
+ 'resource_name', 'service_id')
+
+ self.assertEqual(collist, columns)
+ datalist = (
+ identity_fakes.registered_limit_default_limit,
+ None,
+ identity_fakes.registered_limit_id,
+ None,
+ identity_fakes.registered_limit_resource_name,
+ identity_fakes.service_id
+ )
+ self.assertEqual(datalist, data)
+
+ def test_registered_limit_create_with_options(self):
+ self.registered_limit_mock.create.return_value = fakes.FakeResource(
+ None,
+ copy.deepcopy(identity_fakes.REGISTERED_LIMIT_OPTIONS),
+ loaded=True
+ )
+
+ resource_name = identity_fakes.registered_limit_resource_name
+ default_limit = identity_fakes.registered_limit_default_limit
+ description = identity_fakes.registered_limit_description
+ arglist = [
+ '--region', identity_fakes.region_id,
+ '--description', description,
+ '--service', identity_fakes.service_id,
+ '--default-limit', '10',
+ resource_name
+ ]
+
+ verifylist = [
+ ('region', identity_fakes.region_id),
+ ('description', description),
+ ('service', identity_fakes.service_id),
+ ('default_limit', default_limit),
+ ('resource_name', resource_name)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ kwargs = {'description': description, 'region': self.region}
+ self.registered_limit_mock.create.assert_called_with(
+ self.service, resource_name, default_limit, **kwargs
+ )
+
+ collist = ('default_limit', 'description', 'id', 'region_id',
+ 'resource_name', 'service_id')
+
+ self.assertEqual(collist, columns)
+ datalist = (
+ identity_fakes.registered_limit_default_limit,
+ description,
+ identity_fakes.registered_limit_id,
+ identity_fakes.region_id,
+ identity_fakes.registered_limit_resource_name,
+ identity_fakes.service_id
+ )
+ self.assertEqual(datalist, data)
+
+
+class TestRegisteredLimitDelete(TestRegisteredLimit):
+
+ def setUp(self):
+ super(TestRegisteredLimitDelete, self).setUp()
+
+ self.cmd = registered_limit.DeleteRegisteredLimit(self.app, None)
+
+ def test_registered_limit_delete(self):
+ self.registered_limit_mock.delete.return_value = None
+
+ arglist = [identity_fakes.registered_limit_id]
+ verifylist = [
+ ('registered_limit_id', [identity_fakes.registered_limit_id])
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+
+ self.registered_limit_mock.delete.assert_called_with(
+ identity_fakes.registered_limit_id
+ )
+ self.assertIsNone(result)
+
+ def test_registered_limit_delete_with_exception(self):
+ return_value = ksa_exceptions.NotFound()
+ self.registered_limit_mock.delete.side_effect = return_value
+
+ arglist = ['fake-registered-limit-id']
+ verifylist = [
+ ('registered_limit_id', ['fake-registered-limit-id'])
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ try:
+ self.cmd.take_action(parsed_args)
+ self.fail('CommandError should be raised.')
+ except exceptions.CommandError as e:
+ self.assertEqual(
+ '1 of 1 registered limits failed to delete.', str(e)
+ )
+
+
+class TestRegisteredLimitShow(TestRegisteredLimit):
+
+ def setUp(self):
+ super(TestRegisteredLimitShow, self).setUp()
+
+ self.registered_limit_mock.get.return_value = fakes.FakeResource(
+ None,
+ copy.deepcopy(identity_fakes.REGISTERED_LIMIT),
+ loaded=True
+ )
+
+ self.cmd = registered_limit.ShowRegisteredLimit(self.app, None)
+
+ def test_registered_limit_show(self):
+ arglist = [identity_fakes.registered_limit_id]
+ verifylist = [
+ ('registered_limit_id', identity_fakes.registered_limit_id)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.registered_limit_mock.get.assert_called_with(
+ identity_fakes.registered_limit_id
+ )
+
+ collist = (
+ 'default_limit', 'description', 'id', 'region_id', 'resource_name',
+ 'service_id'
+ )
+ self.assertEqual(collist, columns)
+ datalist = (
+ identity_fakes.registered_limit_default_limit,
+ None,
+ identity_fakes.registered_limit_id,
+ None,
+ identity_fakes.registered_limit_resource_name,
+ identity_fakes.service_id
+ )
+ self.assertEqual(datalist, data)
+
+
+class TestRegisteredLimitSet(TestRegisteredLimit):
+
+ def setUp(self):
+ super(TestRegisteredLimitSet, self).setUp()
+ self.cmd = registered_limit.SetRegisteredLimit(self.app, None)
+
+ def test_registered_limit_set_description(self):
+ registered_limit = copy.deepcopy(identity_fakes.REGISTERED_LIMIT)
+ registered_limit['description'] = (
+ identity_fakes.registered_limit_description
+ )
+ self.registered_limit_mock.update.return_value = fakes.FakeResource(
+ None, registered_limit, loaded=True
+ )
+
+ arglist = [
+ '--description', identity_fakes.registered_limit_description,
+ identity_fakes.registered_limit_id
+ ]
+ verifylist = [
+ ('description', identity_fakes.registered_limit_description),
+ ('registered_limit_id', identity_fakes.registered_limit_id)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.registered_limit_mock.update.assert_called_with(
+ identity_fakes.registered_limit_id,
+ service=None,
+ resource_name=None,
+ default_limit=None,
+ description=identity_fakes.registered_limit_description,
+ region=None
+ )
+
+ collist = (
+ 'default_limit', 'description', 'id', 'region_id', 'resource_name',
+ 'service_id'
+ )
+ self.assertEqual(collist, columns)
+ datalist = (
+ identity_fakes.registered_limit_default_limit,
+ identity_fakes.registered_limit_description,
+ identity_fakes.registered_limit_id,
+ None,
+ identity_fakes.registered_limit_resource_name,
+ identity_fakes.service_id
+ )
+ self.assertEqual(datalist, data)
+
+ def test_registered_limit_set_default_limit(self):
+ registered_limit = copy.deepcopy(identity_fakes.REGISTERED_LIMIT)
+ default_limit = 20
+ registered_limit['default_limit'] = default_limit
+ self.registered_limit_mock.update.return_value = fakes.FakeResource(
+ None, registered_limit, loaded=True
+ )
+
+ arglist = [
+ '--default-limit', str(default_limit),
+ identity_fakes.registered_limit_id
+ ]
+ verifylist = [
+ ('default_limit', default_limit),
+ ('registered_limit_id', identity_fakes.registered_limit_id)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.registered_limit_mock.update.assert_called_with(
+ identity_fakes.registered_limit_id,
+ service=None,
+ resource_name=None,
+ default_limit=default_limit,
+ description=None,
+ region=None
+ )
+
+ collist = (
+ 'default_limit', 'description', 'id', 'region_id', 'resource_name',
+ 'service_id'
+ )
+ self.assertEqual(collist, columns)
+ datalist = (
+ default_limit,
+ None,
+ identity_fakes.registered_limit_id,
+ None,
+ identity_fakes.registered_limit_resource_name,
+ identity_fakes.service_id
+ )
+ self.assertEqual(datalist, data)
+
+ def test_registered_limit_set_resource_name(self):
+ registered_limit = copy.deepcopy(identity_fakes.REGISTERED_LIMIT)
+ resource_name = 'volumes'
+ registered_limit['resource_name'] = resource_name
+ self.registered_limit_mock.update.return_value = fakes.FakeResource(
+ None, registered_limit, loaded=True
+ )
+
+ arglist = [
+ '--resource-name', resource_name,
+ identity_fakes.registered_limit_id
+ ]
+ verifylist = [
+ ('resource_name', resource_name),
+ ('registered_limit_id', identity_fakes.registered_limit_id)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.registered_limit_mock.update.assert_called_with(
+ identity_fakes.registered_limit_id,
+ service=None,
+ resource_name=resource_name,
+ default_limit=None,
+ description=None,
+ region=None
+ )
+
+ collist = (
+ 'default_limit', 'description', 'id', 'region_id', 'resource_name',
+ 'service_id'
+ )
+ self.assertEqual(collist, columns)
+ datalist = (
+ identity_fakes.registered_limit_default_limit,
+ None,
+ identity_fakes.registered_limit_id,
+ None,
+ resource_name,
+ identity_fakes.service_id
+ )
+ self.assertEqual(datalist, data)
+
+ def test_registered_limit_set_service(self):
+ registered_limit = copy.deepcopy(identity_fakes.REGISTERED_LIMIT)
+ service = identity_fakes.FakeService.create_one_service()
+ registered_limit['service_id'] = service.id
+ self.registered_limit_mock.update.return_value = fakes.FakeResource(
+ None, registered_limit, loaded=True
+ )
+ self.services_mock.get.return_value = service
+
+ arglist = [
+ '--service', service.id,
+ identity_fakes.registered_limit_id
+ ]
+ verifylist = [
+ ('service', service.id),
+ ('registered_limit_id', identity_fakes.registered_limit_id)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.registered_limit_mock.update.assert_called_with(
+ identity_fakes.registered_limit_id,
+ service=service,
+ resource_name=None,
+ default_limit=None,
+ description=None,
+ region=None
+ )
+
+ collist = (
+ 'default_limit', 'description', 'id', 'region_id', 'resource_name',
+ 'service_id'
+ )
+ self.assertEqual(collist, columns)
+ datalist = (
+ identity_fakes.registered_limit_default_limit,
+ None,
+ identity_fakes.registered_limit_id,
+ None,
+ identity_fakes.registered_limit_resource_name,
+ service.id
+ )
+ self.assertEqual(datalist, data)
+
+ def test_registered_limit_set_region(self):
+ registered_limit = copy.deepcopy(identity_fakes.REGISTERED_LIMIT)
+ region = identity_fakes.REGION
+ region['id'] = 'RegionTwo'
+ region = fakes.FakeResource(
+ None,
+ copy.deepcopy(region),
+ loaded=True
+ )
+ registered_limit['region_id'] = region.id
+ self.registered_limit_mock.update.return_value = fakes.FakeResource(
+ None, registered_limit, loaded=True
+ )
+ self.regions_mock.get.return_value = region
+
+ arglist = [
+ '--region', region.id,
+ identity_fakes.registered_limit_id
+ ]
+ verifylist = [
+ ('region', region.id),
+ ('registered_limit_id', identity_fakes.registered_limit_id)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.registered_limit_mock.update.assert_called_with(
+ identity_fakes.registered_limit_id,
+ service=None,
+ resource_name=None,
+ default_limit=None,
+ description=None,
+ region=region
+ )
+
+ collist = (
+ 'default_limit', 'description', 'id', 'region_id', 'resource_name',
+ 'service_id'
+ )
+ self.assertEqual(collist, columns)
+ datalist = (
+ identity_fakes.registered_limit_default_limit,
+ None,
+ identity_fakes.registered_limit_id,
+ region.id,
+ identity_fakes.registered_limit_resource_name,
+ identity_fakes.service_id
+ )
+ self.assertEqual(datalist, data)
+
+
+class TestRegisteredLimitList(TestRegisteredLimit):
+
+ def setUp(self):
+ super(TestRegisteredLimitList, self).setUp()
+
+ self.registered_limit_mock.get.return_value = fakes.FakeResource(
+ None,
+ copy.deepcopy(identity_fakes.REGISTERED_LIMIT),
+ loaded=True
+ )
+
+ self.cmd = registered_limit.ShowRegisteredLimit(self.app, None)
+
+ def test_limit_show(self):
+ arglist = [identity_fakes.registered_limit_id]
+ verifylist = [
+ ('registered_limit_id', identity_fakes.registered_limit_id)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.registered_limit_mock.get.assert_called_with(
+ identity_fakes.registered_limit_id
+ )
+
+ collist = (
+ 'default_limit', 'description', 'id', 'region_id', 'resource_name',
+ 'service_id'
+ )
+ self.assertEqual(collist, columns)
+ datalist = (
+ identity_fakes.registered_limit_default_limit,
+ None,
+ identity_fakes.registered_limit_id,
+ None,
+ identity_fakes.registered_limit_resource_name,
+ identity_fakes.service_id
+ )
+ self.assertEqual(datalist, data)
diff --git a/openstackclient/tests/unit/identity/v3/test_role_assignment.py b/openstackclient/tests/unit/identity/v3/test_role_assignment.py
index 835837e6..bff6c56d 100644
--- a/openstackclient/tests/unit/identity/v3/test_role_assignment.py
+++ b/openstackclient/tests/unit/identity/v3/test_role_assignment.py
@@ -34,6 +34,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
'Group',
'Project',
'Domain',
+ 'System',
'Inherited',
)
@@ -95,6 +96,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
self.role_assignments_mock.list.assert_called_with(
domain=None,
+ system=None,
group=None,
effective=False,
role=None,
@@ -110,12 +112,14 @@ class TestRoleAssignmentList(TestRoleAssignment):
'',
identity_fakes.project_id,
'',
+ '',
False
), (identity_fakes.role_id,
'',
identity_fakes.group_id,
identity_fakes.project_id,
'',
+ '',
False
),)
self.assertEqual(datalist, tuple(data))
@@ -143,6 +147,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
verifylist = [
('user', identity_fakes.user_name),
('group', None),
+ ('system', None),
('domain', None),
('project', None),
('role', None),
@@ -159,6 +164,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
self.role_assignments_mock.list.assert_called_with(
domain=None,
+ system=None,
user=self.users_mock.get(),
group=None,
project=None,
@@ -174,12 +180,14 @@ class TestRoleAssignmentList(TestRoleAssignment):
'',
'',
identity_fakes.domain_id,
+ '',
False
), (identity_fakes.role_id,
identity_fakes.user_id,
'',
identity_fakes.project_id,
'',
+ '',
False
),)
self.assertEqual(datalist, tuple(data))
@@ -207,6 +215,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
verifylist = [
('user', None),
('group', identity_fakes.group_name),
+ ('system', None),
('domain', None),
('project', None),
('role', None),
@@ -223,6 +232,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
self.role_assignments_mock.list.assert_called_with(
domain=None,
+ system=None,
group=self.groups_mock.get(),
effective=False,
project=None,
@@ -238,12 +248,14 @@ class TestRoleAssignmentList(TestRoleAssignment):
identity_fakes.group_id,
'',
identity_fakes.domain_id,
+ '',
False
), (identity_fakes.role_id,
'',
identity_fakes.group_id,
identity_fakes.project_id,
'',
+ '',
False
),)
self.assertEqual(datalist, tuple(data))
@@ -271,6 +283,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
verifylist = [
('user', None),
('group', None),
+ ('system', None),
('domain', identity_fakes.domain_name),
('project', None),
('role', None),
@@ -287,6 +300,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
self.role_assignments_mock.list.assert_called_with(
domain=self.domains_mock.get(),
+ system=None,
group=None,
effective=False,
project=None,
@@ -302,12 +316,14 @@ class TestRoleAssignmentList(TestRoleAssignment):
'',
'',
identity_fakes.domain_id,
+ '',
False
), (identity_fakes.role_id,
'',
identity_fakes.group_id,
'',
identity_fakes.domain_id,
+ '',
False
),)
self.assertEqual(datalist, tuple(data))
@@ -335,6 +351,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
verifylist = [
('user', None),
('group', None),
+ ('system', None),
('domain', None),
('project', identity_fakes.project_name),
('role', None),
@@ -351,6 +368,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
self.role_assignments_mock.list.assert_called_with(
domain=None,
+ system=None,
group=None,
effective=False,
project=self.projects_mock.get(),
@@ -366,12 +384,14 @@ class TestRoleAssignmentList(TestRoleAssignment):
'',
identity_fakes.project_id,
'',
+ '',
False
), (identity_fakes.role_id,
'',
identity_fakes.group_id,
identity_fakes.project_id,
'',
+ '',
False
),)
self.assertEqual(datalist, tuple(data))
@@ -398,6 +418,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
verifylist = [
('user', None),
('group', None),
+ ('system', None),
('domain', None),
('project', None),
('role', None),
@@ -416,6 +437,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
self.role_assignments_mock.list.assert_called_with(
domain=None,
+ system=None,
user=self.users_mock.get(),
group=None,
project=self.projects_mock.get(),
@@ -431,6 +453,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
'',
identity_fakes.project_id,
'',
+ '',
False
),)
self.assertEqual(datalist, tuple(data))
@@ -456,6 +479,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
verifylist = [
('user', None),
('group', None),
+ ('system', None),
('domain', None),
('project', None),
('role', None),
@@ -472,6 +496,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
self.role_assignments_mock.list.assert_called_with(
domain=None,
+ system=None,
group=None,
effective=True,
project=None,
@@ -487,12 +512,14 @@ class TestRoleAssignmentList(TestRoleAssignment):
'',
identity_fakes.project_id,
'',
+ '',
False
), (identity_fakes.role_id,
identity_fakes.user_id,
'',
'',
identity_fakes.domain_id,
+ '',
False
),)
self.assertEqual(tuple(data), datalist)
@@ -520,6 +547,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
verifylist = [
('user', None),
('group', None),
+ ('system', None),
('domain', None),
('project', None),
('role', None),
@@ -536,6 +564,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
self.role_assignments_mock.list.assert_called_with(
domain=None,
+ system=None,
group=None,
effective=False,
project=None,
@@ -551,12 +580,14 @@ class TestRoleAssignmentList(TestRoleAssignment):
'',
identity_fakes.project_id,
'',
+ '',
True
), (identity_fakes.role_id,
identity_fakes.user_id,
'',
'',
identity_fakes.domain_id,
+ '',
True
),)
self.assertEqual(datalist, tuple(data))
@@ -584,6 +615,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
verifylist = [
('user', None),
('group', None),
+ ('system', None),
('domain', None),
('project', None),
('role', None),
@@ -602,6 +634,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
self.role_assignments_mock.list.assert_called_with(
domain=None,
+ system=None,
group=None,
effective=False,
project=None,
@@ -610,7 +643,9 @@ class TestRoleAssignmentList(TestRoleAssignment):
os_inherit_extension_inherited_to=None,
include_names=True)
- collist = ('Role', 'User', 'Group', 'Project', 'Domain', 'Inherited')
+ collist = (
+ 'Role', 'User', 'Group', 'Project', 'Domain', 'System', 'Inherited'
+ )
self.assertEqual(columns, collist)
datalist1 = ((
@@ -620,12 +655,14 @@ class TestRoleAssignmentList(TestRoleAssignment):
'@'.join([identity_fakes.project_name,
identity_fakes.domain_name]),
'',
+ '',
False
), (identity_fakes.role_name,
'@'.join([identity_fakes.user_name, identity_fakes.domain_name]),
'',
'',
identity_fakes.domain_name,
+ '',
False
),)
self.assertEqual(tuple(data), datalist1)
@@ -648,6 +685,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
verifylist = [
('user', None),
('group', None),
+ ('system', None),
('domain', None),
('project', None),
('role', identity_fakes.ROLE_2['name']),
@@ -664,6 +702,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
self.role_assignments_mock.list.assert_called_with(
domain=None,
+ system=None,
user=None,
group=None,
project=None,
@@ -679,6 +718,7 @@ class TestRoleAssignmentList(TestRoleAssignment):
'',
'',
identity_fakes.domain_id,
+ '',
False
),)
self.assertEqual(datalist, tuple(data))
diff --git a/openstackclient/tests/unit/image/v2/test_image.py b/openstackclient/tests/unit/image/v2/test_image.py
index 301cd037..087d8751 100644
--- a/openstackclient/tests/unit/image/v2/test_image.py
+++ b/openstackclient/tests/unit/image/v2/test_image.py
@@ -527,6 +527,7 @@ class TestImageList(TestImage):
verifylist = [
('public', False),
('private', False),
+ ('community', False),
('shared', False),
('long', False),
]
@@ -550,6 +551,7 @@ class TestImageList(TestImage):
verifylist = [
('public', True),
('private', False),
+ ('community', False),
('shared', False),
('long', False),
]
@@ -574,6 +576,7 @@ class TestImageList(TestImage):
verifylist = [
('public', False),
('private', True),
+ ('community', False),
('shared', False),
('long', False),
]
@@ -591,6 +594,31 @@ class TestImageList(TestImage):
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, tuple(data))
+ def test_image_list_community_option(self):
+ arglist = [
+ '--community',
+ ]
+ verifylist = [
+ ('public', False),
+ ('private', False),
+ ('community', True),
+ ('shared', False),
+ ('long', False),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ # In base command class Lister in cliff, abstract method take_action()
+ # returns a tuple containing the column names and an iterable
+ # containing the data to be listed.
+ columns, data = self.cmd.take_action(parsed_args)
+ self.api_mock.image_list.assert_called_with(
+ community=True,
+ marker=self._image.id,
+ )
+
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.datalist, tuple(data))
+
def test_image_list_shared_option(self):
arglist = [
'--shared',
@@ -598,8 +626,36 @@ class TestImageList(TestImage):
verifylist = [
('public', False),
('private', False),
+ ('community', False),
+ ('shared', True),
+ ('long', False),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ # In base command class Lister in cliff, abstract method take_action()
+ # returns a tuple containing the column names and an iterable
+ # containing the data to be listed.
+ columns, data = self.cmd.take_action(parsed_args)
+ self.api_mock.image_list.assert_called_with(
+ shared=True,
+ marker=self._image.id,
+ )
+
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.datalist, tuple(data))
+
+ def test_image_list_shared_member_status_option(self):
+ arglist = [
+ '--shared',
+ '--member-status', 'all'
+ ]
+ verifylist = [
+ ('public', False),
+ ('private', False),
+ ('community', False),
('shared', True),
('long', False),
+ ('member_status', 'all')
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -609,12 +665,28 @@ class TestImageList(TestImage):
columns, data = self.cmd.take_action(parsed_args)
self.api_mock.image_list.assert_called_with(
shared=True,
+ member_status='all',
marker=self._image.id,
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, tuple(data))
+ def test_image_list_shared_member_status_lower(self):
+ arglist = [
+ '--shared',
+ '--member-status', 'ALl'
+ ]
+ verifylist = [
+ ('public', False),
+ ('private', False),
+ ('community', False),
+ ('shared', True),
+ ('long', False),
+ ('member_status', 'all')
+ ]
+ self.check_parser(self.cmd, arglist, verifylist)
+
def test_image_list_long_option(self):
arglist = [
'--long',
@@ -708,7 +780,8 @@ class TestImageList(TestImage):
)
si_mock.assert_called_with(
[self._image],
- 'name:asc'
+ 'name:asc',
+ str,
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, tuple(data))
@@ -779,6 +852,20 @@ class TestImageList(TestImage):
status='active', marker=self._image.id
)
+ def test_image_list_tag_option(self):
+ arglist = [
+ '--tag', 'abc',
+ ]
+ verifylist = [
+ ('tag', 'abc'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+ self.api_mock.image_list.assert_called_with(
+ tag='abc', marker=self._image.id
+ )
+
class TestListImageProjects(TestImage):
diff --git a/openstackclient/tests/unit/network/v2/fakes.py b/openstackclient/tests/unit/network/v2/fakes.py
index 0e21e2f8..ee0919fd 100644
--- a/openstackclient/tests/unit/network/v2/fakes.py
+++ b/openstackclient/tests/unit/network/v2/fakes.py
@@ -335,6 +335,7 @@ class FakeNetwork(object):
'name': 'network-name-' + uuid.uuid4().hex,
'status': 'ACTIVE',
'description': 'network-description-' + uuid.uuid4().hex,
+ 'dns_domain': 'example.org.',
'mtu': '1350',
'tenant_id': 'project-id-' + uuid.uuid4().hex,
'admin_state_up': True,
@@ -580,6 +581,7 @@ class FakePort(object):
'tenant_id': 'project-id-' + uuid.uuid4().hex,
'qos_policy_id': 'qos-policy-id-' + uuid.uuid4().hex,
'tags': [],
+ 'uplink_status_propagation': False,
}
# Overwrite default attributes.
@@ -599,6 +601,8 @@ class FakePort(object):
port.project_id = port_attrs['tenant_id']
port.security_group_ids = port_attrs['security_group_ids']
port.qos_policy_id = port_attrs['qos_policy_id']
+ port.uplink_status_propagation = port_attrs[
+ 'uplink_status_propagation']
return port
@@ -1132,6 +1136,7 @@ class FakeSecurityGroup(object):
'description': 'security-group-description-' + uuid.uuid4().hex,
'project_id': 'project-id-' + uuid.uuid4().hex,
'security_group_rules': [],
+ 'tags': []
}
# Overwrite default attributes.
@@ -1695,3 +1700,26 @@ class FakeQuota(object):
info=copy.deepcopy(quota_attrs),
loaded=True)
return quota
+
+ @staticmethod
+ def create_one_net_detailed_quota(attrs=None):
+ """Create one quota"""
+ attrs = attrs or {}
+
+ quota_attrs = {
+ 'floating_ips': {'used': 0, 'reserved': 0, 'limit': 20},
+ 'networks': {'used': 0, 'reserved': 0, 'limit': 25},
+ 'ports': {'used': 0, 'reserved': 0, 'limit': 11},
+ 'rbac_policies': {'used': 0, 'reserved': 0, 'limit': 15},
+ 'routers': {'used': 0, 'reserved': 0, 'limit': 40},
+ 'security_groups': {'used': 0, 'reserved': 0, 'limit': 10},
+ 'security_group_rules': {'used': 0, 'reserved': 0, 'limit': 100},
+ 'subnets': {'used': 0, 'reserved': 0, 'limit': 20},
+ 'subnet_pools': {'used': 0, 'reserved': 0, 'limit': 30}}
+
+ quota_attrs.update(attrs)
+
+ quota = fakes.FakeResource(
+ info=copy.deepcopy(quota_attrs),
+ loaded=True)
+ return quota
diff --git a/openstackclient/tests/unit/network/v2/test_floating_ip_network.py b/openstackclient/tests/unit/network/v2/test_floating_ip_network.py
index 65d87377..cbd4da38 100644
--- a/openstackclient/tests/unit/network/v2/test_floating_ip_network.py
+++ b/openstackclient/tests/unit/network/v2/test_floating_ip_network.py
@@ -49,6 +49,8 @@ class TestCreateFloatingIPNetwork(TestFloatingIPNetwork):
attrs={
'floating_network_id': floating_network.id,
'port_id': port.id,
+ 'dns_domain': 'example.org.',
+ 'dns_name': 'fip1',
}
)
@@ -129,6 +131,8 @@ class TestCreateFloatingIPNetwork(TestFloatingIPNetwork):
'--floating-ip-address', self.floating_ip.floating_ip_address,
'--fixed-ip-address', self.floating_ip.fixed_ip_address,
'--description', self.floating_ip.description,
+ '--dns-domain', self.floating_ip.dns_domain,
+ '--dns-name', self.floating_ip.dns_name,
self.floating_ip.floating_network_id,
]
verifylist = [
@@ -137,6 +141,8 @@ class TestCreateFloatingIPNetwork(TestFloatingIPNetwork):
('fixed_ip_address', self.floating_ip.fixed_ip_address),
('network', self.floating_ip.floating_network_id),
('description', self.floating_ip.description),
+ ('dns_domain', self.floating_ip.dns_domain),
+ ('dns_name', self.floating_ip.dns_name),
('floating_ip_address', self.floating_ip.floating_ip_address),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -150,6 +156,8 @@ class TestCreateFloatingIPNetwork(TestFloatingIPNetwork):
'fixed_ip_address': self.floating_ip.fixed_ip_address,
'floating_network_id': self.floating_ip.floating_network_id,
'description': self.floating_ip.description,
+ 'dns_domain': self.floating_ip.dns_domain,
+ 'dns_name': self.floating_ip.dns_name,
})
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
@@ -393,6 +401,8 @@ class TestListFloatingIPNetwork(TestFloatingIPNetwork):
'Status',
'Description',
'Tags',
+ 'DNS Name',
+ 'DNS Domain',
)
data = []
@@ -417,6 +427,8 @@ class TestListFloatingIPNetwork(TestFloatingIPNetwork):
ip.status,
ip.description,
ip.tags,
+ ip.dns_domain,
+ ip.dns_name,
))
def setUp(self):
@@ -492,6 +504,23 @@ class TestListFloatingIPNetwork(TestFloatingIPNetwork):
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
+ def test_floating_ip_list_floating_ip_address(self):
+ arglist = [
+ '--floating-ip-address', self.floating_ips[0].floating_ip_address,
+ ]
+ verifylist = [
+ ('floating_ip_address', self.floating_ips[0].floating_ip_address),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.ips.assert_called_once_with(**{
+ 'floating_ip_address': self.floating_ips[0].floating_ip_address,
+ })
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
+
def test_floating_ip_list_long(self):
arglist = ['--long', ]
verifylist = [('long', True), ]
@@ -747,6 +776,31 @@ class TestSetFloatingIP(TestFloatingIPNetwork):
self.network.update_ip.assert_called_once_with(
self.floating_ip, **attrs)
+ def test_qos_policy_option(self):
+ qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy()
+ self.network.find_qos_policy = mock.Mock(return_value=qos_policy)
+ arglist = [
+ "--qos-policy", qos_policy.id,
+ self.floating_ip.id,
+ ]
+ verifylist = [
+ ('qos_policy', qos_policy.id),
+ ('floating_ip', self.floating_ip.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ attrs = {
+ 'qos_policy_id': qos_policy.id,
+ }
+ self.network.find_ip.assert_called_once_with(
+ self.floating_ip.id,
+ ignore_missing=False,
+ )
+ self.network.update_ip.assert_called_once_with(
+ self.floating_ip, **attrs)
+
def test_port_and_qos_policy_option(self):
qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy()
self.network.find_qos_policy = mock.Mock(return_value=qos_policy)
@@ -775,6 +829,29 @@ class TestSetFloatingIP(TestFloatingIPNetwork):
self.network.update_ip.assert_called_once_with(
self.floating_ip, **attrs)
+ def test_no_qos_policy_option(self):
+ arglist = [
+ "--no-qos-policy",
+ self.floating_ip.id,
+ ]
+ verifylist = [
+ ('no_qos_policy', True),
+ ('floating_ip', self.floating_ip.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ attrs = {
+ 'qos_policy_id': None,
+ }
+ self.network.find_ip.assert_called_once_with(
+ self.floating_ip.id,
+ ignore_missing=False,
+ )
+ self.network.update_ip.assert_called_once_with(
+ self.floating_ip, **attrs)
+
def test_port_and_no_qos_policy_option(self):
arglist = [
"--no-qos-policy",
@@ -810,16 +887,13 @@ class TestSetFloatingIP(TestFloatingIPNetwork):
arglist = ['--no-tag']
verifylist = [('no_tag', True)]
expected_args = []
- arglist.extend(['--port', self.floating_ip.port_id,
- self.floating_ip.id])
- verifylist.extend([
- ('port', self.floating_ip.port_id),
- ('floating_ip', self.floating_ip.id)])
+ arglist.extend([self.floating_ip.id])
+ verifylist.extend([('floating_ip', self.floating_ip.id)])
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
- self.assertTrue(self.network.update_ip.called)
+ self.assertFalse(self.network.update_ip.called)
self.network.set_tags.assert_called_once_with(
self.floating_ip,
tests_utils.CompareBySet(expected_args))
diff --git a/openstackclient/tests/unit/network/v2/test_network.py b/openstackclient/tests/unit/network/v2/test_network.py
index 9f4a6acc..0f57f0ee 100644
--- a/openstackclient/tests/unit/network/v2/test_network.py
+++ b/openstackclient/tests/unit/network/v2/test_network.py
@@ -60,6 +60,7 @@ class TestCreateNetworkIdentityV3(TestNetwork):
'availability_zone_hints',
'availability_zones',
'description',
+ 'dns_domain',
'id',
'ipv4_address_scope',
'ipv6_address_scope',
@@ -84,6 +85,7 @@ class TestCreateNetworkIdentityV3(TestNetwork):
utils.format_list(_network.availability_zone_hints),
utils.format_list(_network.availability_zones),
_network.description,
+ _network.dns_domain,
_network.id,
_network.ipv4_address_scope_id,
_network.ipv6_address_scope_id,
@@ -162,6 +164,7 @@ class TestCreateNetworkIdentityV3(TestNetwork):
"--qos-policy", self.qos_policy.id,
"--transparent-vlan",
"--enable-port-security",
+ "--dns-domain", "example.org.",
self._network.name,
]
verifylist = [
@@ -181,6 +184,7 @@ class TestCreateNetworkIdentityV3(TestNetwork):
('transparent_vlan', True),
('enable_port_security', True),
('name', self._network.name),
+ ('dns_domain', 'example.org.'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -204,6 +208,7 @@ class TestCreateNetworkIdentityV3(TestNetwork):
'qos_policy_id': self.qos_policy.id,
'vlan_transparent': True,
'port_security_enabled': True,
+ 'dns_domain': 'example.org.',
})
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
@@ -287,6 +292,7 @@ class TestCreateNetworkIdentityV2(TestNetwork):
'availability_zone_hints',
'availability_zones',
'description',
+ 'dns_domain',
'id',
'ipv4_address_scope',
'ipv6_address_scope',
@@ -311,6 +317,7 @@ class TestCreateNetworkIdentityV2(TestNetwork):
utils.format_list(_network.availability_zone_hints),
utils.format_list(_network.availability_zones),
_network.description,
+ _network.dns_domain,
_network.id,
_network.ipv4_address_scope_id,
_network.ipv6_address_scope_id,
@@ -901,6 +908,7 @@ class TestSetNetwork(TestNetwork):
'--name', 'noob',
'--share',
'--description', self._network.description,
+ '--dns-domain', 'example.org.',
'--external',
'--default',
'--provider-network-type', 'vlan',
@@ -922,6 +930,7 @@ class TestSetNetwork(TestNetwork):
('segmentation_id', '400'),
('enable_port_security', True),
('qos_policy', self.qos_policy.name),
+ ('dns_domain', 'example.org.'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -939,6 +948,7 @@ class TestSetNetwork(TestNetwork):
'provider:segmentation_id': '400',
'port_security_enabled': True,
'qos_policy_id': self.qos_policy.id,
+ 'dns_domain': 'example.org.',
}
self.network.update_network.assert_called_once_with(
self._network, **attrs)
@@ -1026,6 +1036,7 @@ class TestShowNetwork(TestNetwork):
'availability_zone_hints',
'availability_zones',
'description',
+ 'dns_domain',
'id',
'ipv4_address_scope',
'ipv6_address_scope',
@@ -1050,6 +1061,7 @@ class TestShowNetwork(TestNetwork):
utils.format_list(_network.availability_zone_hints),
utils.format_list(_network.availability_zones),
_network.description,
+ _network.dns_domain,
_network.id,
_network.ipv4_address_scope_id,
_network.ipv6_address_scope_id,
diff --git a/openstackclient/tests/unit/network/v2/test_port.py b/openstackclient/tests/unit/network/v2/test_port.py
index 03e1d841..8ac3e54f 100644
--- a/openstackclient/tests/unit/network/v2/test_port.py
+++ b/openstackclient/tests/unit/network/v2/test_port.py
@@ -64,6 +64,7 @@ class TestPort(network_fakes.TestNetworkV2):
'security_group_ids',
'status',
'tags',
+ 'uplink_status_propagation',
)
data = (
@@ -93,6 +94,7 @@ class TestPort(network_fakes.TestNetworkV2):
utils.format_list(fake_port.security_group_ids),
fake_port.status,
utils.format_list(fake_port.tags),
+ fake_port.uplink_status_propagation,
)
return columns, data
@@ -571,6 +573,43 @@ class TestCreatePort(TestPort):
def test_create_with_no_tag(self):
self._test_create_with_tag(add_tags=False)
+ def _test_create_with_uplink_status_propagation(self, enable=True):
+ arglist = [
+ '--network', self._port.network_id,
+ 'test-port',
+ ]
+ if enable:
+ arglist += ['--enable-uplink-status-propagation']
+ else:
+ arglist += ['--disable-uplink-status-propagation']
+ verifylist = [
+ ('network', self._port.network_id,),
+ ('name', 'test-port'),
+ ]
+ if enable:
+ verifylist.append(('enable_uplink_status_propagation', True))
+ else:
+ verifylist.append(('disable_uplink_status_propagation', True))
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = (self.cmd.take_action(parsed_args))
+
+ self.network.create_port.assert_called_once_with(**{
+ 'admin_state_up': True,
+ 'network_id': self._port.network_id,
+ 'propagate_uplink_status': enable,
+ 'name': 'test-port',
+ })
+
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, data)
+
+ def test_create_with_uplink_status_propagation_enabled(self):
+ self._test_create_with_uplink_status_propagation(enable=True)
+
+ def test_create_with_uplink_status_propagation_disabled(self):
+ self._test_create_with_uplink_status_propagation(enable=False)
+
class TestDeletePort(TestPort):
@@ -867,6 +906,24 @@ class TestListPort(TestPort):
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
+ def test_port_list_fixed_ip_opt_ip_address_substr(self):
+ ip_address_ss = self._ports[0].fixed_ips[0]['ip_address'][:-1]
+ arglist = [
+ '--fixed-ip', "ip-substring=%s" % ip_address_ss,
+ ]
+ verifylist = [
+ ('fixed_ip', [{'ip-substring': ip_address_ss}])
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.ports.assert_called_once_with(**{
+ 'fixed_ips': ['ip_address_substr=%s' % ip_address_ss]})
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
+
def test_port_list_fixed_ip_opt_subnet_id(self):
subnet_id = self._ports[0].fixed_ips[0]['subnet_id']
arglist = [
diff --git a/openstackclient/tests/unit/network/v2/test_router.py b/openstackclient/tests/unit/network/v2/test_router.py
index f383c1dd..618adf35 100644
--- a/openstackclient/tests/unit/network/v2/test_router.py
+++ b/openstackclient/tests/unit/network/v2/test_router.py
@@ -400,9 +400,9 @@ class TestListRouter(TestRouter):
'Name',
'Status',
'State',
+ 'Project',
'Distributed',
'HA',
- 'Project',
)
columns_long = columns + (
'Routes',
@@ -423,9 +423,9 @@ class TestListRouter(TestRouter):
r.name,
r.status,
router._format_admin_state(r.admin_state_up),
+ r.tenant_id,
r.distributed,
r.ha,
- r.tenant_id,
))
router_agent_data = []
@@ -496,6 +496,25 @@ class TestListRouter(TestRouter):
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
+ def test_router_list_no_ha_no_distributed(self):
+ _routers = network_fakes.FakeRouter.create_routers({
+ 'ha': None,
+ 'distributed': None},
+ count=3)
+
+ arglist = []
+ verifylist = [
+ ('long', False),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ with mock.patch.object(
+ self.network, "routers", return_value=_routers):
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertNotIn("is_distributed", columns)
+ self.assertNotIn("is_ha", columns)
+
def test_router_list_long(self):
arglist = [
'--long',
@@ -1113,6 +1132,102 @@ class TestSetRouter(TestRouter):
def test_set_with_no_tag(self):
self._test_set_tags(with_tags=False)
+ def test_set_gateway_ip_qos(self):
+ qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy()
+ self.network.find_qos_policy = mock.Mock(return_value=qos_policy)
+ arglist = [
+ "--external-gateway", self._network.id,
+ "--qos-policy", qos_policy.id,
+ self._router.id,
+ ]
+ verifylist = [
+ ('router', self._router.id),
+ ('external_gateway', self._network.id),
+ ('qos_policy', qos_policy.id),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+ self.network.update_router.assert_called_with(
+ self._router, **{'external_gateway_info': {
+ 'network_id': self._network.id,
+ 'qos_policy_id': qos_policy.id, }})
+ self.assertIsNone(result)
+
+ def test_unset_gateway_ip_qos(self):
+ arglist = [
+ "--external-gateway", self._network.id,
+ "--no-qos-policy",
+ self._router.id,
+ ]
+ verifylist = [
+ ('router', self._router.id),
+ ('external_gateway', self._network.id),
+ ('no_qos_policy', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+ self.network.update_router.assert_called_with(
+ self._router, **{'external_gateway_info': {
+ 'network_id': self._network.id,
+ 'qos_policy_id': None, }})
+ self.assertIsNone(result)
+
+ def test_set_unset_gateway_ip_qos(self):
+ qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy()
+ self.network.find_qos_policy = mock.Mock(return_value=qos_policy)
+ arglist = [
+ "--external-gateway", self._network.id,
+ "--qos-policy", qos_policy.id,
+ "--no-qos-policy",
+ self._router.id,
+ ]
+ verifylist = [
+ ('router', self._router.id),
+ ('external_gateway', self._network.id),
+ ('qos_policy', qos_policy.id),
+ ('no_qos_policy', True),
+ ]
+
+ self.assertRaises(tests_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ def test_set_gateway_ip_qos_no_gateway(self):
+ qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy()
+ self.network.find_qos_policy = mock.Mock(return_value=qos_policy)
+ router = network_fakes.FakeRouter.create_one_router()
+ self.network.find_router = mock.Mock(return_value=router)
+ arglist = [
+ "--qos-policy", qos_policy.id,
+ router.id,
+ ]
+ verifylist = [
+ ('router', router.id),
+ ('qos_policy', qos_policy.id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.assertRaises(exceptions.CommandError,
+ self.cmd.take_action, parsed_args)
+
+ def test_unset_gateway_ip_qos_no_gateway(self):
+ qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy()
+ self.network.find_qos_policy = mock.Mock(return_value=qos_policy)
+ router = network_fakes.FakeRouter.create_one_router()
+ self.network.find_router = mock.Mock(return_value=router)
+ arglist = [
+ "--no-qos-policy",
+ router.id,
+ ]
+ verifylist = [
+ ('router', router.id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.assertRaises(exceptions.CommandError,
+ self.cmd.take_action, parsed_args)
+
class TestShowRouter(TestRouter):
@@ -1196,17 +1311,44 @@ class TestShowRouter(TestRouter):
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
+ def test_show_no_ha_no_distributed(self):
+ _router = network_fakes.FakeRouter.create_one_router({
+ 'ha': None,
+ 'distributed': None})
+
+ arglist = [
+ _router.name,
+ ]
+ verifylist = [
+ ('router', _router.name),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ with mock.patch.object(
+ self.network, "find_router", return_value=_router):
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.assertNotIn("is_distributed", columns)
+ self.assertNotIn("is_ha", columns)
+
class TestUnsetRouter(TestRouter):
def setUp(self):
super(TestUnsetRouter, self).setUp()
+ self.fake_network = network_fakes.FakeNetwork.create_one_network()
+ self.fake_qos_policy = (
+ network_fakes.FakeNetworkQosPolicy.create_one_qos_policy())
self._testrouter = network_fakes.FakeRouter.create_one_router(
{'routes': [{"destination": "192.168.101.1/24",
"nexthop": "172.24.4.3"},
{"destination": "192.168.101.2/24",
"nexthop": "172.24.4.3"}],
- 'tags': ['green', 'red'], })
+ 'tags': ['green', 'red'],
+ 'external_gateway_info': {
+ 'network_id': self.fake_network.id,
+ 'qos_policy_id': self.fake_qos_policy.id
+ }})
self.fake_subnet = network_fakes.FakeSubnet.create_one_subnet()
self.network.find_router = mock.Mock(return_value=self._testrouter)
self.network.update_router = mock.Mock(return_value=None)
@@ -1289,3 +1431,54 @@ class TestUnsetRouter(TestRouter):
def test_unset_with_all_tag(self):
self._test_unset_tags(with_tags=False)
+
+ def test_unset_router_qos_policy(self):
+ arglist = [
+ '--qos-policy',
+ self._testrouter.name,
+ ]
+ verifylist = [
+ ('qos_policy', True)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+ attrs = {'external_gateway_info': {"network_id": self.fake_network.id,
+ "qos_policy_id": None}}
+ self.network.update_router.assert_called_once_with(
+ self._testrouter, **attrs)
+ self.assertIsNone(result)
+
+ def test_unset_gateway_ip_qos_no_network(self):
+ qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy()
+ self.network.find_qos_policy = mock.Mock(return_value=qos_policy)
+ router = network_fakes.FakeRouter.create_one_router()
+ self.network.find_router = mock.Mock(return_value=router)
+ arglist = [
+ "--qos-policy",
+ router.id,
+ ]
+ verifylist = [
+ ('router', router.id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.assertRaises(exceptions.CommandError,
+ self.cmd.take_action, parsed_args)
+
+ def test_unset_gateway_ip_qos_no_qos(self):
+ qos_policy = network_fakes.FakeNetworkQosPolicy.create_one_qos_policy()
+ self.network.find_qos_policy = mock.Mock(return_value=qos_policy)
+ router = network_fakes.FakeRouter.create_one_router(
+ {"external_gateway_info": {"network_id": "fake-id"}})
+ self.network.find_router = mock.Mock(return_value=router)
+ arglist = [
+ "--qos-policy",
+ router.id,
+ ]
+ verifylist = [
+ ('router', router.id),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ self.assertRaises(exceptions.CommandError,
+ self.cmd.take_action, parsed_args)
diff --git a/openstackclient/tests/unit/network/v2/test_security_group_network.py b/openstackclient/tests/unit/network/v2/test_security_group_network.py
index 35b7e366..83208287 100644
--- a/openstackclient/tests/unit/network/v2/test_security_group_network.py
+++ b/openstackclient/tests/unit/network/v2/test_security_group_network.py
@@ -40,8 +40,8 @@ class TestCreateSecurityGroupNetwork(TestSecurityGroupNetwork):
project = identity_fakes.FakeProject.create_one_project()
domain = identity_fakes.FakeDomain.create_one_domain()
# The security group to be created.
- _security_group = \
- network_fakes.FakeSecurityGroup.create_one_security_group()
+ _security_group = (
+ network_fakes.FakeSecurityGroup.create_one_security_group())
columns = (
'description',
@@ -49,6 +49,7 @@ class TestCreateSecurityGroupNetwork(TestSecurityGroupNetwork):
'name',
'project_id',
'rules',
+ 'tags',
)
data = (
@@ -57,6 +58,7 @@ class TestCreateSecurityGroupNetwork(TestSecurityGroupNetwork):
_security_group.name,
_security_group.project_id,
'',
+ _security_group.tags,
)
def setUp(self):
@@ -67,6 +69,7 @@ class TestCreateSecurityGroupNetwork(TestSecurityGroupNetwork):
self.projects_mock.get.return_value = self.project
self.domains_mock.get.return_value = self.domain
+ self.network.set_tags = mock.Mock(return_value=None)
# Get the command object to test
self.cmd = security_group.CreateSecurityGroup(self.app, self.namespace)
@@ -118,6 +121,43 @@ class TestCreateSecurityGroupNetwork(TestSecurityGroupNetwork):
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
+ def _test_create_with_tag(self, add_tags=True):
+ arglist = [self._security_group.name]
+ if add_tags:
+ arglist += ['--tag', 'red', '--tag', 'blue']
+ else:
+ arglist += ['--no-tag']
+
+ verifylist = [
+ ('name', self._security_group.name),
+ ]
+ if add_tags:
+ verifylist.append(('tags', ['red', 'blue']))
+ else:
+ verifylist.append(('no_tag', True))
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = (self.cmd.take_action(parsed_args))
+
+ self.network.create_security_group.assert_called_once_with(**{
+ 'description': self._security_group.name,
+ 'name': self._security_group.name,
+ })
+ if add_tags:
+ self.network.set_tags.assert_called_once_with(
+ self._security_group,
+ tests_utils.CompareBySet(['red', 'blue']))
+ else:
+ self.assertFalse(self.network.set_tags.called)
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, data)
+
+ def test_create_with_tags(self):
+ self._test_create_with_tag(add_tags=True)
+
+ def test_create_with_no_tag(self):
+ self._test_create_with_tag(add_tags=False)
+
class TestDeleteSecurityGroupNetwork(TestSecurityGroupNetwork):
@@ -214,6 +254,7 @@ class TestListSecurityGroupNetwork(TestSecurityGroupNetwork):
'Name',
'Description',
'Project',
+ 'Tags',
)
data = []
@@ -223,6 +264,7 @@ class TestListSecurityGroupNetwork(TestSecurityGroupNetwork):
grp.name,
grp.description,
grp.project_id,
+ grp.tags,
))
def setUp(self):
@@ -300,12 +342,38 @@ class TestListSecurityGroupNetwork(TestSecurityGroupNetwork):
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
+ def test_list_with_tag_options(self):
+ arglist = [
+ '--tags', 'red,blue',
+ '--any-tags', 'red,green',
+ '--not-tags', 'orange,yellow',
+ '--not-any-tags', 'black,white',
+ ]
+ verifylist = [
+ ('tags', ['red', 'blue']),
+ ('any_tags', ['red', 'green']),
+ ('not_tags', ['orange', 'yellow']),
+ ('not_any_tags', ['black', 'white']),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, data = self.cmd.take_action(parsed_args)
+
+ self.network.security_groups.assert_called_once_with(
+ **{'tags': 'red,blue',
+ 'any_tags': 'red,green',
+ 'not_tags': 'orange,yellow',
+ 'not_any_tags': 'black,white'}
+ )
+ self.assertEqual(self.columns, columns)
+ self.assertEqual(self.data, list(data))
+
class TestSetSecurityGroupNetwork(TestSecurityGroupNetwork):
# The security group to be set.
- _security_group = \
- network_fakes.FakeSecurityGroup.create_one_security_group()
+ _security_group = (
+ network_fakes.FakeSecurityGroup.create_one_security_group(
+ attrs={'tags': ['green', 'red']}))
def setUp(self):
super(TestSetSecurityGroupNetwork, self).setUp()
@@ -314,6 +382,7 @@ class TestSetSecurityGroupNetwork(TestSecurityGroupNetwork):
self.network.find_security_group = mock.Mock(
return_value=self._security_group)
+ self.network.set_tags = mock.Mock(return_value=None)
# Get the command object to test
self.cmd = security_group.SetSecurityGroup(self.app, self.namespace)
@@ -366,6 +435,34 @@ class TestSetSecurityGroupNetwork(TestSecurityGroupNetwork):
)
self.assertIsNone(result)
+ def _test_set_tags(self, with_tags=True):
+ if with_tags:
+ arglist = ['--tag', 'red', '--tag', 'blue']
+ verifylist = [('tags', ['red', 'blue'])]
+ expected_args = ['red', 'blue', 'green']
+ else:
+ arglist = ['--no-tag']
+ verifylist = [('no_tag', True)]
+ expected_args = []
+ arglist.append(self._security_group.name)
+ verifylist.append(
+ ('group', self._security_group.name))
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+
+ self.assertTrue(self.network.update_security_group.called)
+ self.network.set_tags.assert_called_once_with(
+ self._security_group,
+ tests_utils.CompareBySet(expected_args))
+ self.assertIsNone(result)
+
+ def test_set_with_tags(self):
+ self._test_set_tags(with_tags=True)
+
+ def test_set_with_no_tag(self):
+ self._test_set_tags(with_tags=False)
+
class TestShowSecurityGroupNetwork(TestSecurityGroupNetwork):
@@ -385,6 +482,7 @@ class TestShowSecurityGroupNetwork(TestSecurityGroupNetwork):
'name',
'project_id',
'rules',
+ 'tags',
)
data = (
@@ -394,6 +492,7 @@ class TestShowSecurityGroupNetwork(TestSecurityGroupNetwork):
_security_group.project_id,
security_group._format_network_security_group_rules(
[_security_group_rule._info]),
+ _security_group.tags,
)
def setUp(self):
@@ -424,3 +523,70 @@ class TestShowSecurityGroupNetwork(TestSecurityGroupNetwork):
self._security_group.id, ignore_missing=False)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
+
+
+class TestUnsetSecurityGroupNetwork(TestSecurityGroupNetwork):
+
+ # The security group to be unset.
+ _security_group = (
+ network_fakes.FakeSecurityGroup.create_one_security_group(
+ attrs={'tags': ['green', 'red']}))
+
+ def setUp(self):
+ super(TestUnsetSecurityGroupNetwork, self).setUp()
+
+ self.network.update_security_group = mock.Mock(return_value=None)
+
+ self.network.find_security_group = mock.Mock(
+ return_value=self._security_group)
+ self.network.set_tags = mock.Mock(return_value=None)
+
+ # Get the command object to test
+ self.cmd = security_group.UnsetSecurityGroup(self.app, self.namespace)
+
+ def test_set_no_options(self):
+ self.assertRaises(tests_utils.ParserException,
+ self.check_parser, self.cmd, [], [])
+
+ def test_set_no_updates(self):
+ arglist = [
+ self._security_group.name,
+ ]
+ verifylist = [
+ ('group', self._security_group.name),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+
+ self.assertFalse(self.network.update_security_group.called)
+ self.assertFalse(self.network.set_tags.called)
+ self.assertIsNone(result)
+
+ def _test_unset_tags(self, with_tags=True):
+ if with_tags:
+ arglist = ['--tag', 'red', '--tag', 'blue']
+ verifylist = [('tags', ['red', 'blue'])]
+ expected_args = ['green']
+ else:
+ arglist = ['--all-tag']
+ verifylist = [('all_tag', True)]
+ expected_args = []
+ arglist.append(self._security_group.name)
+ verifylist.append(
+ ('group', self._security_group.name))
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+
+ self.assertFalse(self.network.update_security_group.called)
+ self.network.set_tags.assert_called_once_with(
+ self._security_group,
+ tests_utils.CompareBySet(expected_args))
+ self.assertIsNone(result)
+
+ def test_unset_with_tags(self):
+ self._test_unset_tags(with_tags=True)
+
+ def test_unset_with_all_tag(self):
+ self._test_unset_tags(with_tags=False)
diff --git a/openstackclient/tests/unit/network/v2/test_subnet.py b/openstackclient/tests/unit/network/v2/test_subnet.py
index f5212c61..39cb4f53 100644
--- a/openstackclient/tests/unit/network/v2/test_subnet.py
+++ b/openstackclient/tests/unit/network/v2/test_subnet.py
@@ -1046,6 +1046,36 @@ class TestSetSubnet(TestSubnet):
_testsubnet, **attrs)
self.assertIsNone(result)
+ def test_clear_options(self):
+ _testsubnet = network_fakes.FakeSubnet.create_one_subnet(
+ {'host_routes': [{'destination': '10.20.20.0/24',
+ 'nexthop': '10.20.20.1'}],
+ 'allocation_pools': [{'start': '8.8.8.200',
+ 'end': '8.8.8.250'}],
+ 'dns_nameservers': ['10.0.0.1'], })
+ self.network.find_subnet = mock.Mock(return_value=_testsubnet)
+ arglist = [
+ '--no-host-route',
+ '--no-allocation-pool',
+ '--no-dns-nameservers',
+ _testsubnet.name,
+ ]
+ verifylist = [
+ ('no_dns_nameservers', True),
+ ('no_host_route', True),
+ ('no_allocation_pool', True),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ result = self.cmd.take_action(parsed_args)
+ attrs = {
+ 'host_routes': [],
+ 'allocation_pools': [],
+ 'dns_nameservers': [],
+ }
+ self.network.update_subnet.assert_called_once_with(
+ _testsubnet, **attrs)
+ self.assertIsNone(result)
+
def _test_set_tags(self, with_tags=True):
if with_tags:
arglist = ['--tag', 'red', '--tag', 'blue']
diff --git a/openstackclient/tests/unit/network/v2/test_subnet_pool.py b/openstackclient/tests/unit/network/v2/test_subnet_pool.py
index 81f0278f..3d8f1028 100644
--- a/openstackclient/tests/unit/network/v2/test_subnet_pool.py
+++ b/openstackclient/tests/unit/network/v2/test_subnet_pool.py
@@ -1016,9 +1016,7 @@ class TestUnsetSubnetPool(TestSubnetPool):
def setUp(self):
super(TestUnsetSubnetPool, self).setUp()
self._subnetpool = network_fakes.FakeSubnetPool.create_one_subnet_pool(
- {'prefixes': ['10.0.10.0/24', '10.1.10.0/24',
- '10.2.10.0/24'],
- 'tags': ['green', 'red']})
+ {'tags': ['green', 'red']})
self.network.find_subnet_pool = mock.Mock(
return_value=self._subnetpool)
self.network.update_subnet_pool = mock.Mock(return_value=None)
@@ -1026,37 +1024,6 @@ class TestUnsetSubnetPool(TestSubnetPool):
# Get the command object to test
self.cmd = subnet_pool.UnsetSubnetPool(self.app, self.namespace)
- def test_unset_subnet_pool(self):
- arglist = [
- '--pool-prefix', '10.0.10.0/24',
- '--pool-prefix', '10.1.10.0/24',
- self._subnetpool.name,
- ]
- verifylist = [
- ('prefixes', ['10.0.10.0/24', '10.1.10.0/24']),
- ('subnet_pool', self._subnetpool.name),
- ]
- parsed_args = self.check_parser(self.cmd, arglist, verifylist)
- result = self.cmd.take_action(parsed_args)
- attrs = {'prefixes': ['10.2.10.0/24']}
- self.network.update_subnet_pool.assert_called_once_with(
- self._subnetpool, **attrs)
- self.assertIsNone(result)
-
- def test_unset_subnet_pool_prefix_not_existent(self):
- arglist = [
- '--pool-prefix', '10.100.1.1/25',
- self._subnetpool.name,
- ]
- verifylist = [
- ('prefixes', ['10.100.1.1/25']),
- ('subnet_pool', self._subnetpool.name),
- ]
- parsed_args = self.check_parser(self.cmd, arglist, verifylist)
- self.assertRaises(exceptions.CommandError,
- self.cmd.take_action,
- parsed_args)
-
def _test_unset_tags(self, with_tags=True):
if with_tags:
arglist = ['--tag', 'red', '--tag', 'blue']
diff --git a/openstackclient/tests/unit/volume/test_find_resource.py b/openstackclient/tests/unit/volume/test_find_resource.py
index dbf9592f..60591eff 100644
--- a/openstackclient/tests/unit/volume/test_find_resource.py
+++ b/openstackclient/tests/unit/volume/test_find_resource.py
@@ -15,8 +15,8 @@
import mock
-from cinderclient.v1 import volume_snapshots
-from cinderclient.v1 import volumes
+from cinderclient.v3 import volume_snapshots
+from cinderclient.v3 import volumes
from osc_lib import exceptions
from osc_lib import utils
diff --git a/openstackclient/tests/unit/volume/v2/fakes.py b/openstackclient/tests/unit/volume/v2/fakes.py
index 481509f3..c245cbf6 100644
--- a/openstackclient/tests/unit/volume/v2/fakes.py
+++ b/openstackclient/tests/unit/volume/v2/fakes.py
@@ -193,46 +193,144 @@ class FakeService(object):
return services
+class FakeCapability(object):
+ """Fake capability."""
+
+ @staticmethod
+ def create_one_capability(attrs=None):
+ """Create a fake volume backend capability.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes of the Capabilities.
+ :return:
+ A FakeResource object with capability name and attrs.
+ """
+ # Set default attribute
+ capability_info = {
+ "namespace": "OS::Storage::Capabilities::fake",
+ "vendor_name": "OpenStack",
+ "volume_backend_name": "lvmdriver-1",
+ "pool_name": "pool",
+ "driver_version": "2.0.0",
+ "storage_protocol": "iSCSI",
+ "display_name": "Capabilities of Cinder LVM driver",
+ "description": "Blah, blah.",
+ "visibility": "public",
+ "replication_targets": [],
+ "properties": {
+ "compression": {
+ "title": "Compression",
+ "description": "Enables compression.",
+ "type": "boolean"
+ },
+ "qos": {
+ "title": "QoS",
+ "description": "Enables QoS.",
+ "type": "boolean"
+ },
+ "replication": {
+ "title": "Replication",
+ "description": "Enables replication.",
+ "type": "boolean"
+ },
+ "thin_provisioning": {
+ "title": "Thin Provisioning",
+ "description": "Sets thin provisioning.",
+ "type": "boolean"
+ }
+ }
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ capability_info.update(attrs or {})
+
+ capability = fakes.FakeResource(
+ None,
+ capability_info,
+ loaded=True)
+
+ return capability
+
+
+class FakePool(object):
+ """Fake Pools."""
+
+ @staticmethod
+ def create_one_pool(attrs=None):
+ """Create a fake pool.
+
+ :param Dictionary attrs:
+ A dictionary with all attributes of the pool
+ :return:
+ A FakeResource object with pool name and attrs.
+ """
+ # Set default attribute
+ pool_info = {
+ 'name': 'host@lvmdriver-1#lvmdriver-1',
+ 'storage_protocol': 'iSCSI',
+ 'thick_provisioning_support': False,
+ 'thin_provisioning_support': True,
+ 'total_volumes': 99,
+ 'total_capacity_gb': 1000.00,
+ 'allocated_capacity_gb': 100,
+ 'max_over_subscription_ratio': 200.0,
+ }
+
+ # Overwrite default attributes if there are some attributes set
+ pool_info.update(attrs or {})
+
+ pool = fakes.FakeResource(
+ None,
+ pool_info,
+ loaded=True)
+
+ return pool
+
+
class FakeVolumeClient(object):
def __init__(self, **kwargs):
- self.volumes = mock.Mock()
- self.volumes.resource_class = fakes.FakeResource(None, {})
+ self.auth_token = kwargs['token']
+ self.management_url = kwargs['endpoint']
+ self.availability_zones = mock.Mock()
+ self.availability_zones.resource_class = fakes.FakeResource(None, {})
+ self.backups = mock.Mock()
+ self.backups.resource_class = fakes.FakeResource(None, {})
+ self.capabilities = mock.Mock()
+ self.capabilities.resource_class = fakes.FakeResource(None, {})
+ self.cgsnapshots = mock.Mock()
+ self.cgsnapshots.resource_class = fakes.FakeResource(None, {})
+ self.consistencygroups = mock.Mock()
+ self.consistencygroups.resource_class = fakes.FakeResource(None, {})
self.extensions = mock.Mock()
self.extensions.resource_class = fakes.FakeResource(None, {})
self.limits = mock.Mock()
self.limits.resource_class = fakes.FakeResource(None, {})
- self.volume_snapshots = mock.Mock()
- self.volume_snapshots.resource_class = fakes.FakeResource(None, {})
- self.backups = mock.Mock()
- self.backups.resource_class = fakes.FakeResource(None, {})
- self.volume_types = mock.Mock()
- self.volume_types.resource_class = fakes.FakeResource(None, {})
- self.volume_type_access = mock.Mock()
- self.volume_type_access.resource_class = fakes.FakeResource(None, {})
- self.volume_encryption_types = mock.Mock()
- self.volume_encryption_types.resource_class = (
- fakes.FakeResource(None, {}))
- self.restores = mock.Mock()
- self.restores.resource_class = fakes.FakeResource(None, {})
+ self.pools = mock.Mock()
+ self.pools.resource_class = fakes.FakeResource(None, {})
self.qos_specs = mock.Mock()
self.qos_specs.resource_class = fakes.FakeResource(None, {})
- self.availability_zones = mock.Mock()
- self.availability_zones.resource_class = fakes.FakeResource(None, {})
- self.transfers = mock.Mock()
- self.transfers.resource_class = fakes.FakeResource(None, {})
- self.services = mock.Mock()
- self.services.resource_class = fakes.FakeResource(None, {})
- self.quotas = mock.Mock()
- self.quotas.resource_class = fakes.FakeResource(None, {})
self.quota_classes = mock.Mock()
self.quota_classes.resource_class = fakes.FakeResource(None, {})
- self.consistencygroups = mock.Mock()
- self.consistencygroups.resource_class = fakes.FakeResource(None, {})
- self.cgsnapshots = mock.Mock()
- self.cgsnapshots.resource_class = fakes.FakeResource(None, {})
- self.auth_token = kwargs['token']
- self.management_url = kwargs['endpoint']
+ self.quotas = mock.Mock()
+ self.quotas.resource_class = fakes.FakeResource(None, {})
+ self.restores = mock.Mock()
+ self.restores.resource_class = fakes.FakeResource(None, {})
+ self.services = mock.Mock()
+ self.services.resource_class = fakes.FakeResource(None, {})
+ self.transfers = mock.Mock()
+ self.transfers.resource_class = fakes.FakeResource(None, {})
+ self.volume_encryption_types = mock.Mock()
+ self.volume_encryption_types.resource_class = (
+ fakes.FakeResource(None, {}))
+ self.volume_snapshots = mock.Mock()
+ self.volume_snapshots.resource_class = fakes.FakeResource(None, {})
+ self.volume_type_access = mock.Mock()
+ self.volume_type_access.resource_class = fakes.FakeResource(None, {})
+ self.volume_types = mock.Mock()
+ self.volume_types.resource_class = fakes.FakeResource(None, {})
+ self.volumes = mock.Mock()
+ self.volumes.resource_class = fakes.FakeResource(None, {})
class TestVolume(utils.TestCommand):
@@ -498,6 +596,35 @@ class FakeBackup(object):
return mock.Mock(side_effect=backups)
+ @staticmethod
+ def create_backup_record():
+ """Gets a fake backup record for a given backup.
+
+ :return: An "exported" backup record.
+ """
+
+ return {
+ 'backup_service': 'cinder.backup.drivers.swift.SwiftBackupDriver',
+ 'backup_url': 'eyJzdGF0dXMiOiAiYXZh',
+ }
+
+ @staticmethod
+ def import_backup_record():
+ """Creates a fake backup record import response from a backup.
+
+ :return: The fake backup object that was encoded.
+ """
+ return {
+ 'backup': {
+ 'id': 'backup.id',
+ 'name': 'backup.name',
+ 'links': [
+ {'href': 'link1', 'rel': 'self'},
+ {'href': 'link2', 'rel': 'bookmark'},
+ ],
+ },
+ }
+
class FakeConsistencyGroup(object):
"""Fake one or more consistency group."""
diff --git a/openstackclient/tests/unit/volume/v2/test_backup.py b/openstackclient/tests/unit/volume/v2/test_backup.py
index a8e81c7e..9a2ce718 100644
--- a/openstackclient/tests/unit/volume/v2/test_backup.py
+++ b/openstackclient/tests/unit/volume/v2/test_backup.py
@@ -367,7 +367,9 @@ class TestBackupRestore(TestBackup):
self.backups_mock.get.return_value = self.backup
self.volumes_mock.get.return_value = self.volume
- self.restores_mock.restore.return_value = None
+ self.restores_mock.restore.return_value = (
+ volume_fakes.FakeVolume.create_one_volume(
+ {'id': self.volume['id']}))
# Get the command object to mock
self.cmd = backup.RestoreVolumeBackup(self.app, None)
@@ -385,7 +387,7 @@ class TestBackupRestore(TestBackup):
result = self.cmd.take_action(parsed_args)
self.restores_mock.restore.assert_called_with(self.backup.id,
self.backup.volume_id)
- self.assertIsNone(result)
+ self.assertIsNotNone(result)
class TestBackupSet(TestBackup):
diff --git a/openstackclient/tests/unit/volume/v2/test_backup_record.py b/openstackclient/tests/unit/volume/v2/test_backup_record.py
new file mode 100644
index 00000000..0e24174c
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v2/test_backup_record.py
@@ -0,0 +1,114 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
+from openstackclient.volume.v2 import backup_record
+
+
+class TestBackupRecord(volume_fakes.TestVolume):
+
+ def setUp(self):
+ super(TestBackupRecord, self).setUp()
+
+ self.backups_mock = self.app.client_manager.volume.backups
+ self.backups_mock.reset_mock()
+
+
+class TestBackupRecordExport(TestBackupRecord):
+
+ new_backup = volume_fakes.FakeBackup.create_one_backup(
+ attrs={'volume_id': 'a54708a2-0388-4476-a909-09579f885c25'})
+ new_record = volume_fakes.FakeBackup.create_backup_record()
+
+ def setUp(self):
+ super(TestBackupRecordExport, self).setUp()
+
+ self.backups_mock.export_record.return_value = self.new_record
+ self.backups_mock.get.return_value = self.new_backup
+
+ # Get the command object to mock
+ self.cmd = backup_record.ExportBackupRecord(self.app, None)
+
+ def test_backup_export_table(self):
+ arglist = [
+ self.new_backup.name,
+ ]
+ verifylist = [
+ ("backup", self.new_backup.name),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ parsed_args.formatter = 'table'
+ columns, __ = self.cmd.take_action(parsed_args)
+
+ self.backups_mock.export_record.assert_called_with(
+ self.new_backup.id,
+ )
+
+ expected_columns = ('Backup Service', 'Metadata')
+ self.assertEqual(columns, expected_columns)
+
+ def test_backup_export_json(self):
+ arglist = [
+ self.new_backup.name,
+ ]
+ verifylist = [
+ ("backup", self.new_backup.name),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ parsed_args.formatter = 'json'
+ columns, __ = self.cmd.take_action(parsed_args)
+
+ self.backups_mock.export_record.assert_called_with(
+ self.new_backup.id,
+ )
+
+ expected_columns = ('backup_service', 'backup_url')
+ self.assertEqual(columns, expected_columns)
+
+
+class TestBackupRecordImport(TestBackupRecord):
+
+ new_backup = volume_fakes.FakeBackup.create_one_backup(
+ attrs={'volume_id': 'a54708a2-0388-4476-a909-09579f885c25'})
+ new_import = volume_fakes.FakeBackup.import_backup_record()
+
+ def setUp(self):
+ super(TestBackupRecordImport, self).setUp()
+
+ self.backups_mock.import_record.return_value = self.new_import
+
+ # Get the command object to mock
+ self.cmd = backup_record.ImportBackupRecord(self.app, None)
+
+ def test_backup_import(self):
+ arglist = [
+ "cinder.backup.drivers.swift.SwiftBackupDriver",
+ "fake_backup_record_data",
+ ]
+ verifylist = [
+ ("backup_service",
+ "cinder.backup.drivers.swift.SwiftBackupDriver"),
+ ("backup_metadata", "fake_backup_record_data"),
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+ columns, __ = self.cmd.take_action(parsed_args)
+
+ self.backups_mock.import_record.assert_called_with(
+ "cinder.backup.drivers.swift.SwiftBackupDriver",
+ "fake_backup_record_data",
+ )
+ self.assertEqual(columns, ('backup',))
diff --git a/openstackclient/tests/unit/volume/v2/test_volume.py b/openstackclient/tests/unit/volume/v2/test_volume.py
index 2fa924b8..dbe69ea0 100644
--- a/openstackclient/tests/unit/volume/v2/test_volume.py
+++ b/openstackclient/tests/unit/volume/v2/test_volume.py
@@ -126,15 +126,11 @@ class TestVolumeCreate(TestVolume):
name=self.new_volume.name,
description=None,
volume_type=None,
- user_id=None,
- project_id=None,
availability_zone=None,
metadata=None,
imageRef=None,
source_volid=None,
consistencygroup_id=None,
- source_replica=None,
- multiattach=False,
scheduler_hints=None,
)
@@ -152,7 +148,6 @@ class TestVolumeCreate(TestVolume):
'--availability-zone', self.new_volume.availability_zone,
'--consistency-group', consistency_group.id,
'--hint', 'k=v',
- '--multi-attach',
self.new_volume.name,
]
verifylist = [
@@ -162,7 +157,6 @@ class TestVolumeCreate(TestVolume):
('availability_zone', self.new_volume.availability_zone),
('consistency_group', consistency_group.id),
('hint', {'k': 'v'}),
- ('multi_attach', True),
('name', self.new_volume.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
@@ -178,112 +172,50 @@ class TestVolumeCreate(TestVolume):
name=self.new_volume.name,
description=self.new_volume.description,
volume_type=self.new_volume.volume_type,
- user_id=None,
- project_id=None,
availability_zone=self.new_volume.availability_zone,
metadata=None,
imageRef=None,
source_volid=None,
consistencygroup_id=consistency_group.id,
- source_replica=None,
- multiattach=True,
scheduler_hints={'k': 'v'},
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.datalist, data)
- def test_volume_create_user_project_id(self):
- # Return a project
- self.projects_mock.get.return_value = self.project
- # Return a user
- self.users_mock.get.return_value = self.user
-
+ def test_volume_create_user(self):
arglist = [
'--size', str(self.new_volume.size),
- '--project', self.project.id,
'--user', self.user.id,
self.new_volume.name,
]
verifylist = [
('size', self.new_volume.size),
- ('project', self.project.id),
('user', self.user.id),
('name', self.new_volume.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
- # In base command class ShowOne in cliff, abstract method take_action()
- # returns a two-part tuple with a tuple of column names and a tuple of
- # data to be shown.
- columns, data = self.cmd.take_action(parsed_args)
-
- self.volumes_mock.create.assert_called_with(
- size=self.new_volume.size,
- snapshot_id=None,
- name=self.new_volume.name,
- description=None,
- volume_type=None,
- user_id=self.user.id,
- project_id=self.project.id,
- availability_zone=None,
- metadata=None,
- imageRef=None,
- source_volid=None,
- consistencygroup_id=None,
- source_replica=None,
- multiattach=False,
- scheduler_hints=None,
- )
-
- self.assertEqual(self.columns, columns)
- self.assertEqual(self.datalist, data)
-
- def test_volume_create_user_project_name(self):
- # Return a project
- self.projects_mock.get.return_value = self.project
- # Return a user
- self.users_mock.get.return_value = self.user
+ self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.volumes_mock.create.assert_not_called()
+ def test_volume_create_project(self):
arglist = [
'--size', str(self.new_volume.size),
- '--project', self.project.name,
- '--user', self.user.name,
+ '--project', self.project.id,
self.new_volume.name,
]
verifylist = [
('size', self.new_volume.size),
- ('project', self.project.name),
- ('user', self.user.name),
+ ('project', self.project.id),
('name', self.new_volume.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
- # In base command class ShowOne in cliff, abstract method take_action()
- # returns a two-part tuple with a tuple of column names and a tuple of
- # data to be shown.
- columns, data = self.cmd.take_action(parsed_args)
-
- self.volumes_mock.create.assert_called_with(
- size=self.new_volume.size,
- snapshot_id=None,
- name=self.new_volume.name,
- description=None,
- volume_type=None,
- user_id=self.user.id,
- project_id=self.project.id,
- availability_zone=None,
- metadata=None,
- imageRef=None,
- source_volid=None,
- consistencygroup_id=None,
- source_replica=None,
- multiattach=False,
- scheduler_hints=None,
- )
-
- self.assertEqual(self.columns, columns)
- self.assertEqual(self.datalist, data)
+ self.assertRaises(exceptions.CommandError, self.cmd.take_action,
+ parsed_args)
+ self.volumes_mock.create.assert_not_called()
def test_volume_create_properties(self):
arglist = [
@@ -310,15 +242,11 @@ class TestVolumeCreate(TestVolume):
name=self.new_volume.name,
description=None,
volume_type=None,
- user_id=None,
- project_id=None,
availability_zone=None,
metadata={'Alpha': 'a', 'Beta': 'b'},
imageRef=None,
source_volid=None,
consistencygroup_id=None,
- source_replica=None,
- multiattach=False,
scheduler_hints=None,
)
@@ -352,15 +280,11 @@ class TestVolumeCreate(TestVolume):
name=self.new_volume.name,
description=None,
volume_type=None,
- user_id=None,
- project_id=None,
availability_zone=None,
metadata=None,
imageRef=image.id,
source_volid=None,
consistencygroup_id=None,
- source_replica=None,
- multiattach=False,
scheduler_hints=None,
)
@@ -394,15 +318,11 @@ class TestVolumeCreate(TestVolume):
name=self.new_volume.name,
description=None,
volume_type=None,
- user_id=None,
- project_id=None,
availability_zone=None,
metadata=None,
imageRef=image.id,
source_volid=None,
consistencygroup_id=None,
- source_replica=None,
- multiattach=False,
scheduler_hints=None,
)
@@ -430,20 +350,16 @@ class TestVolumeCreate(TestVolume):
columns, data = self.cmd.take_action(parsed_args)
self.volumes_mock.create.assert_called_once_with(
- size=None,
+ size=snapshot.size,
snapshot_id=snapshot.id,
name=self.new_volume.name,
description=None,
volume_type=None,
- user_id=None,
- project_id=None,
availability_zone=None,
metadata=None,
imageRef=None,
source_volid=None,
consistencygroup_id=None,
- source_replica=None,
- multiattach=False,
scheduler_hints=None,
)
@@ -477,15 +393,11 @@ class TestVolumeCreate(TestVolume):
name=self.new_volume.name,
description=None,
volume_type=None,
- user_id=None,
- project_id=None,
availability_zone=None,
metadata=None,
imageRef=None,
source_volid=None,
consistencygroup_id=None,
- source_replica=None,
- multiattach=False,
scheduler_hints=None,
)
@@ -523,15 +435,11 @@ class TestVolumeCreate(TestVolume):
name=self.new_volume.name,
description=None,
volume_type=None,
- user_id=None,
- project_id=None,
availability_zone=None,
metadata=None,
imageRef=None,
source_volid=None,
consistencygroup_id=None,
- source_replica=None,
- multiattach=False,
scheduler_hints=None,
)
@@ -578,15 +486,11 @@ class TestVolumeCreate(TestVolume):
name=self.new_volume.name,
description=None,
volume_type=None,
- user_id=None,
- project_id=None,
availability_zone=None,
metadata=None,
imageRef=None,
source_volid=None,
consistencygroup_id=None,
- source_replica=None,
- multiattach=False,
scheduler_hints=None,
)
@@ -598,40 +502,6 @@ class TestVolumeCreate(TestVolume):
self.volumes_mock.update_readonly_flag.assert_called_with(
self.new_volume.id, True)
- def test_volume_create_with_source_replicated(self):
- self.volumes_mock.get.return_value = self.new_volume
- arglist = [
- '--source-replicated', self.new_volume.id,
- self.new_volume.name,
- ]
- verifylist = [
- ('source_replicated', self.new_volume.id),
- ('name', self.new_volume.name),
- ]
- parsed_args = self.check_parser(self.cmd, arglist, verifylist)
-
- columns, data = self.cmd.take_action(parsed_args)
- self.volumes_mock.create.assert_called_once_with(
- size=None,
- snapshot_id=None,
- name=self.new_volume.name,
- description=None,
- volume_type=None,
- user_id=None,
- project_id=None,
- availability_zone=None,
- metadata=None,
- imageRef=None,
- source_volid=None,
- consistencygroup_id=None,
- source_replica=self.new_volume.id,
- multiattach=False,
- scheduler_hints=None,
- )
-
- self.assertEqual(self.columns, columns)
- self.assertEqual(self.datalist, data)
-
def test_volume_create_without_size(self):
arglist = [
self.new_volume.name,
@@ -649,7 +519,6 @@ class TestVolumeCreate(TestVolume):
'--image', 'source_image',
'--source', 'source_volume',
'--snapshot', 'source_snapshot',
- '--source-replicated', 'source_replicated_volume',
'--size', str(self.new_volume.size),
self.new_volume.name,
]
@@ -657,7 +526,6 @@ class TestVolumeCreate(TestVolume):
('image', 'source_image'),
('source', 'source_volume'),
('snapshot', 'source_snapshot'),
- ('source-replicated', 'source_replicated_volume'),
('size', self.new_volume.size),
('name', self.new_volume.name),
]
@@ -1320,7 +1188,6 @@ class TestVolumeMigrate(TestVolume):
verifylist = [
("force_host_copy", False),
("lock_volume", False),
- ("unlock_volume", False),
("host", "host@backend-name#pool"),
("volume", self._volume.id),
]
@@ -1342,7 +1209,6 @@ class TestVolumeMigrate(TestVolume):
verifylist = [
("force_host_copy", True),
("lock_volume", True),
- ("unlock_volume", False),
("host", "host@backend-name#pool"),
("volume", self._volume.id),
]
@@ -1354,27 +1220,6 @@ class TestVolumeMigrate(TestVolume):
self._volume.id, "host@backend-name#pool", True, True)
self.assertIsNone(result)
- def test_volume_migrate_with_unlock_volume(self):
- arglist = [
- "--unlock-volume",
- "--host", "host@backend-name#pool",
- self._volume.id,
- ]
- verifylist = [
- ("force_host_copy", False),
- ("lock_volume", False),
- ("unlock_volume", True),
- ("host", "host@backend-name#pool"),
- ("volume", self._volume.id),
- ]
- parsed_args = self.check_parser(self.cmd, arglist, verifylist)
-
- result = self.cmd.take_action(parsed_args)
- self.volumes_mock.get.assert_called_once_with(self._volume.id)
- self.volumes_mock.migrate_volume.assert_called_once_with(
- self._volume.id, "host@backend-name#pool", False, False)
- self.assertIsNone(result)
-
def test_volume_migrate_without_host(self):
arglist = [
self._volume.id,
@@ -1382,7 +1227,6 @@ class TestVolumeMigrate(TestVolume):
verifylist = [
("force_host_copy", False),
("lock_volume", False),
- ("unlock_volume", False),
("volume", self._volume.id),
]
@@ -1483,6 +1327,42 @@ class TestVolumeSet(TestVolume):
self.volumes_mock.reset_state.assert_called_with(
self.new_volume.id, 'error')
+ def test_volume_set_attached(self):
+ arglist = [
+ '--attached',
+ self.new_volume.id
+ ]
+ verifylist = [
+ ('attached', True),
+ ('detached', False),
+ ('volume', self.new_volume.id)
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+ self.volumes_mock.reset_state.assert_called_with(
+ self.new_volume.id, attach_status='attached', state=None)
+ self.assertIsNone(result)
+
+ def test_volume_set_detached(self):
+ arglist = [
+ '--detached',
+ self.new_volume.id
+ ]
+ verifylist = [
+ ('attached', False),
+ ('detached', True),
+ ('volume', self.new_volume.id)
+ ]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ result = self.cmd.take_action(parsed_args)
+ self.volumes_mock.reset_state.assert_called_with(
+ self.new_volume.id, attach_status='detached', state=None)
+ self.assertIsNone(result)
+
def test_volume_set_bootable(self):
arglist = [
['--bootable', self.new_volume.id],
diff --git a/openstackclient/tests/unit/volume/v2/test_volume_backend.py b/openstackclient/tests/unit/volume/v2/test_volume_backend.py
new file mode 100644
index 00000000..db188660
--- /dev/null
+++ b/openstackclient/tests/unit/volume/v2/test_volume_backend.py
@@ -0,0 +1,168 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
+from openstackclient.volume.v2 import volume_backend
+
+
+class TestShowVolumeCapability(volume_fakes.TestVolume):
+ """Test backend capability functionality."""
+
+ # The capability to be listed
+ capability = volume_fakes.FakeCapability.create_one_capability()
+
+ def setUp(self):
+ super(TestShowVolumeCapability, self).setUp()
+
+ # Get a shortcut to the capability Mock
+ self.capability_mock = self.app.client_manager.volume.capabilities
+ self.capability_mock.get.return_value = self.capability
+
+ # Get the command object to test
+ self.cmd = volume_backend.ShowCapability(self.app, None)
+
+ def test_capability_show(self):
+ arglist = [
+ 'fake',
+ ]
+ verifylist = [
+ ('host', 'fake'),
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ # In base command class Lister in cliff, abstract method take_action()
+ # returns a tuple containing the column names and an iterable
+ # containing the data to be listed.
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'Title',
+ 'Key',
+ 'Type',
+ 'Description',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ capabilities = [
+ 'Compression',
+ 'Replication',
+ 'QoS',
+ 'Thin Provisioning',
+ ]
+
+ # confirming if all expected values are present in the result.
+ for cap in data:
+ self.assertTrue(cap[0] in capabilities)
+
+ # checking if proper call was made to get capabilities
+ self.capability_mock.get.assert_called_with(
+ 'fake',
+ )
+
+
+class TestListVolumePool(volume_fakes.TestVolume):
+ """Tests for volume backend pool listing."""
+
+ # The pool to be listed
+ pools = volume_fakes.FakePool.create_one_pool()
+
+ def setUp(self):
+ super(TestListVolumePool, self).setUp()
+
+ self.pool_mock = self.app.client_manager.volume.pools
+ self.pool_mock.list.return_value = [self.pools]
+
+ # Get the command object to test
+ self.cmd = volume_backend.ListPool(self.app, None)
+
+ def test_pool_list(self):
+ arglist = []
+ verifylist = []
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ # In base command class Lister in cliff, abstract method take_action()
+ # returns a tuple containing the column names and an iterable
+ # containing the data to be listed.
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'Name',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = ((
+ self.pools.name,
+ ), )
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ # checking if proper call was made to list pools
+ self.pool_mock.list.assert_called_with(
+ detailed=False,
+ )
+
+ # checking if long columns are present in output
+ self.assertNotIn("total_volumes", columns)
+ self.assertNotIn("storage_protocol", columns)
+
+ def test_service_list_with_long_option(self):
+ arglist = [
+ '--long'
+ ]
+ verifylist = [
+ ('long', True)
+ ]
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ # In base command class Lister in cliff, abstract method take_action()
+ # returns a tuple containing the column names and an iterable
+ # containing the data to be listed.
+ columns, data = self.cmd.take_action(parsed_args)
+
+ expected_columns = [
+ 'Name',
+ 'Protocol',
+ 'Thick',
+ 'Thin',
+ 'Volumes',
+ 'Capacity',
+ 'Allocated',
+ 'Max Over Ratio',
+ ]
+
+ # confirming if all expected columns are present in the result.
+ self.assertEqual(expected_columns, columns)
+
+ datalist = ((
+ self.pools.name,
+ self.pools.storage_protocol,
+ self.pools.thick_provisioning_support,
+ self.pools.thin_provisioning_support,
+ self.pools.total_volumes,
+ self.pools.total_capacity_gb,
+ self.pools.allocated_capacity_gb,
+ self.pools.max_over_subscription_ratio,
+ ), )
+
+ # confirming if all expected values are present in the result.
+ self.assertEqual(datalist, tuple(data))
+
+ self.pool_mock.list.assert_called_with(
+ detailed=True,
+ )
diff --git a/openstackclient/volume/client.py b/openstackclient/volume/client.py
index c4b0dfca..e0e670a9 100644
--- a/openstackclient/volume/client.py
+++ b/openstackclient/volume/client.py
@@ -37,13 +37,20 @@ def make_client(instance):
# Defer client imports until we actually need them
from cinderclient import extension
- from cinderclient.v1.contrib import list_extensions
- from cinderclient.v1 import volume_snapshots
- from cinderclient.v1 import volumes
-
- # Monkey patch for v1 cinderclient
- volumes.Volume.NAME_ATTR = 'display_name'
- volume_snapshots.Snapshot.NAME_ATTR = 'display_name'
+ from cinderclient.v3.contrib import list_extensions
+ from cinderclient.v3 import volume_snapshots
+ from cinderclient.v3 import volumes
+
+ # Try a small import to check if cinderclient v1 is supported
+ try:
+ from cinderclient.v1 import services # noqa
+ except Exception:
+ del API_VERSIONS['1']
+
+ if instance._api_version[API_NAME] == '1':
+ # Monkey patch for v1 cinderclient
+ volumes.Volume.NAME_ATTR = 'display_name'
+ volume_snapshots.Snapshot.NAME_ATTR = 'display_name'
volume_client = utils.get_client_class(
API_NAME,
diff --git a/openstackclient/volume/v2/backup.py b/openstackclient/volume/v2/backup.py
index 60633a70..d4aec8d7 100644
--- a/openstackclient/volume/v2/backup.py
+++ b/openstackclient/volume/v2/backup.py
@@ -319,7 +319,9 @@ class RestoreVolumeBackup(command.ShowOne):
backup = utils.find_resource(volume_client.backups, parsed_args.backup)
destination_volume = utils.find_resource(volume_client.volumes,
parsed_args.volume)
- return volume_client.restores.restore(backup.id, destination_volume.id)
+ backup = volume_client.restores.restore(backup.id,
+ destination_volume.id)
+ return zip(*sorted(six.iteritems(backup._info)))
class RestoreBackup(RestoreVolumeBackup):
diff --git a/openstackclient/volume/v2/backup_record.py b/openstackclient/volume/v2/backup_record.py
new file mode 100644
index 00000000..f4918032
--- /dev/null
+++ b/openstackclient/volume/v2/backup_record.py
@@ -0,0 +1,82 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Volume v2 Backup action implementations"""
+
+import logging
+
+from osc_lib.command import command
+from osc_lib import utils
+import six
+
+from openstackclient.i18n import _
+
+
+LOG = logging.getLogger(__name__)
+
+
+class ExportBackupRecord(command.ShowOne):
+ _description = _('Export volume backup details. Backup information can be '
+ 'imported into a new service instance to be able to '
+ 'restore.')
+
+ def get_parser(self, prog_name):
+ parser = super(ExportBackupRecord, self).get_parser(prog_name)
+ parser.add_argument(
+ "backup",
+ metavar="<backup>",
+ help=_("Backup to export (name or ID)")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+ backup = utils.find_resource(volume_client.backups, parsed_args.backup)
+ backup_data = volume_client.backups.export_record(backup.id)
+
+ # We only want to show "friendly" display names, but also want to keep
+ # json structure compatibility with cinderclient
+ if parsed_args.formatter == 'table':
+ backup_data['Backup Service'] = backup_data.pop('backup_service')
+ backup_data['Metadata'] = backup_data.pop('backup_url')
+
+ return zip(*sorted(six.iteritems(backup_data)))
+
+
+class ImportBackupRecord(command.ShowOne):
+ _description = _('Import volume backup details. Exported backup details '
+ 'contain the metadata necessary to restore to a new or '
+ 'rebuilt service instance')
+
+ def get_parser(self, prog_name):
+ parser = super(ImportBackupRecord, self).get_parser(prog_name)
+ parser.add_argument(
+ "backup_service",
+ metavar="<backup_service>",
+ help=_("Backup service containing the backup.")
+ )
+ parser.add_argument(
+ "backup_metadata",
+ metavar="<backup_metadata>",
+ help=_("Encoded backup metadata from export.")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+ backup_data = volume_client.backups.import_record(
+ parsed_args.backup_service,
+ parsed_args.backup_metadata)
+ backup_data.pop('links', None)
+ return zip(*sorted(six.iteritems(backup_data)))
diff --git a/openstackclient/volume/v2/volume.py b/openstackclient/volume/v2/volume.py
index 61f846b0..fa587b5f 100644
--- a/openstackclient/volume/v2/volume.py
+++ b/openstackclient/volume/v2/volume.py
@@ -14,6 +14,7 @@
"""Volume V2 Volume action implementations"""
+import argparse
import copy
import logging
@@ -37,7 +38,7 @@ def _check_size_arg(args):
volume is not specified.
"""
- if ((args.snapshot or args.source or args.source_replicated)
+ if ((args.snapshot or args.source)
is None and args.size is None):
msg = _("--size is a required option if snapshot "
"or source volume is not specified.")
@@ -59,7 +60,7 @@ class CreateVolume(command.ShowOne):
metavar="<size>",
type=int,
help=_("Volume size in GB (Required unless --snapshot or "
- "--source or --source-replicated is specified)"),
+ "--source is specified)"),
)
parser.add_argument(
"--type",
@@ -85,7 +86,7 @@ class CreateVolume(command.ShowOne):
source_group.add_argument(
"--source-replicated",
metavar="<replicated-volume>",
- help=_("Replicated volume to clone (name or ID)"),
+ help=argparse.SUPPRESS,
)
parser.add_argument(
"--description",
@@ -95,12 +96,12 @@ class CreateVolume(command.ShowOne):
parser.add_argument(
'--user',
metavar='<user>',
- help=_('Specify an alternate user (name or ID)'),
+ help=argparse.SUPPRESS,
)
parser.add_argument(
'--project',
metavar='<project>',
- help=_('Specify an alternate project (name or ID)'),
+ help=argparse.SUPPRESS,
)
parser.add_argument(
"--availability-zone",
@@ -158,7 +159,6 @@ class CreateVolume(command.ShowOne):
def take_action(self, parsed_args):
_check_size_arg(parsed_args)
- identity_client = self.app.client_manager.identity
volume_client = self.app.client_manager.volume
image_client = self.app.client_manager.image
@@ -168,12 +168,6 @@ class CreateVolume(command.ShowOne):
volume_client.volumes,
parsed_args.source).id
- replicated_source_volume = None
- if parsed_args.source_replicated:
- replicated_source_volume = utils.find_resource(
- volume_client.volumes,
- parsed_args.source_replicated).id
-
consistency_group = None
if parsed_args.consistency_group:
consistency_group = utils.find_resource(
@@ -186,39 +180,53 @@ class CreateVolume(command.ShowOne):
image_client.images,
parsed_args.image).id
+ size = parsed_args.size
+
snapshot = None
if parsed_args.snapshot:
- snapshot = utils.find_resource(
+ snapshot_obj = utils.find_resource(
volume_client.volume_snapshots,
- parsed_args.snapshot).id
-
- project = None
+ parsed_args.snapshot)
+ snapshot = snapshot_obj.id
+ # Cinder requires a value for size when creating a volume
+ # even if creating from a snapshot. Cinder will create the
+ # volume with at least the same size as the snapshot anyway,
+ # so since we have the object here, just override the size
+ # value if it's either not given or is smaller than the
+ # snapshot size.
+ size = max(size or 0, snapshot_obj.size)
+
+ # NOTE(abishop): Cinder's volumes.create() has 'project_id' and
+ # 'user_id' args, but they're not wired up to anything. The only way
+ # to specify an alternate project or user for the volume is to use
+ # the identity overrides (e.g. "--os-project-id").
+ #
+ # Now, if the project or user arg is specified then the command is
+ # rejected. Otherwise, Cinder would actually create a volume, but
+ # without the specified property.
if parsed_args.project:
- project = utils.find_resource(
- identity_client.projects,
- parsed_args.project).id
-
- user = None
+ raise exceptions.CommandError(
+ _("ERROR: --project is deprecated, please use"
+ " --os-project-name or --os-project-id instead."))
if parsed_args.user:
- user = utils.find_resource(
- identity_client.users,
- parsed_args.user).id
+ raise exceptions.CommandError(
+ _("ERROR: --user is deprecated, please use"
+ " --os-username instead."))
+ if parsed_args.multi_attach:
+ LOG.warning(_("'--multi-attach' option is no longer supported by "
+ "the block storage service."))
volume = volume_client.volumes.create(
- size=parsed_args.size,
+ size=size,
snapshot_id=snapshot,
name=parsed_args.name,
description=parsed_args.description,
volume_type=parsed_args.type,
- user_id=user,
- project_id=project,
availability_zone=parsed_args.availability_zone,
metadata=parsed_args.property,
imageRef=image,
source_volid=source_volume,
consistencygroup_id=consistency_group,
- source_replica=replicated_source_volume,
- multiattach=parsed_args.multi_attach,
scheduler_hints=parsed_args.hint,
)
@@ -472,21 +480,13 @@ class MigrateVolume(command.Command):
help=_("Enable generic host-based force-migration, "
"which bypasses driver optimizations")
)
- lock_group = parser.add_mutually_exclusive_group()
- lock_group.add_argument(
+ parser.add_argument(
'--lock-volume',
action="store_true",
help=_("If specified, the volume state will be locked "
"and will not allow a migration to be aborted "
"(possibly by another operation)")
)
- lock_group.add_argument(
- '--unlock-volume',
- action="store_true",
- help=_("If specified, the volume state will not be "
- "locked and the a migration can be aborted "
- "(default) (possibly by another operation)")
- )
return parser
def take_action(self, parsed_args):
@@ -559,6 +559,25 @@ class SetVolume(command.Command):
'in the database with no regard to actual status, '
'exercise caution when using)'),
)
+ attached_group = parser.add_mutually_exclusive_group()
+ attached_group.add_argument(
+ "--attached",
+ action="store_true",
+ help=_('Set volume attachment status to "attached" '
+ '(admin only) '
+ '(This option simply changes the state of the volume '
+ 'in the database with no regard to actual status, '
+ 'exercise caution when using)'),
+ )
+ attached_group.add_argument(
+ "--detached",
+ action="store_true",
+ help=_('Set volume attachment status to "detached" '
+ '(admin only) '
+ '(This option simply changes the state of the volume '
+ 'in the database with no regard to actual status, '
+ 'exercise caution when using)'),
+ )
parser.add_argument(
'--type',
metavar='<volume-type>',
@@ -645,6 +664,22 @@ class SetVolume(command.Command):
except Exception as e:
LOG.error(_("Failed to set volume state: %s"), e)
result += 1
+ if parsed_args.attached:
+ try:
+ volume_client.volumes.reset_state(
+ volume.id, state=None,
+ attach_status="attached")
+ except Exception as e:
+ LOG.error(_("Failed to set volume attach-status: %s"), e)
+ result += 1
+ if parsed_args.detached:
+ try:
+ volume_client.volumes.reset_state(
+ volume.id, state=None,
+ attach_status="detached")
+ except Exception as e:
+ LOG.error(_("Failed to set volume attach-status: %s"), e)
+ result += 1
if parsed_args.bootable or parsed_args.non_bootable:
try:
volume_client.volumes.set_bootable(
diff --git a/openstackclient/volume/v2/volume_backend.py b/openstackclient/volume/v2/volume_backend.py
new file mode 100644
index 00000000..c5194d35
--- /dev/null
+++ b/openstackclient/volume/v2/volume_backend.py
@@ -0,0 +1,113 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Storage backend action implementations"""
+
+from osc_lib.command import command
+from osc_lib import utils
+
+from openstackclient.i18n import _
+
+
+class ShowCapability(command.Lister):
+ _description = _("Show capability command")
+
+ def get_parser(self, prog_name):
+ parser = super(ShowCapability, self).get_parser(prog_name)
+ parser.add_argument(
+ "host",
+ metavar="<host>",
+ help=_("List capabilities of specified host (host@backend-name)")
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ columns = [
+ 'Title',
+ 'Key',
+ 'Type',
+ 'Description',
+ ]
+
+ data = volume_client.capabilities.get(parsed_args.host)
+
+ # The get capabilities API is... interesting. We only want the names of
+ # the capabilities that can set for a backend through extra specs, so
+ # we need to extract out that part of the mess that is returned.
+ print_data = []
+ keys = data.properties
+ for key in keys:
+ # Stuff the key into the details to make it easier to output
+ capability_data = data.properties[key]
+ capability_data['key'] = key
+ print_data.append(capability_data)
+
+ return (columns,
+ (utils.get_dict_properties(
+ s, columns,
+ ) for s in print_data))
+
+
+class ListPool(command.Lister):
+ _description = _("List pool command")
+
+ def get_parser(self, prog_name):
+ parser = super(ListPool, self).get_parser(prog_name)
+ parser.add_argument(
+ "--long",
+ action="store_true",
+ default=False,
+ help=_("Show detailed information about pools.")
+ )
+ # TODO(smcginnis): Starting with Cinder microversion 3.33, user is also
+ # able to pass in --filters with a <key>=<value> pair to filter on.
+ return parser
+
+ def take_action(self, parsed_args):
+ volume_client = self.app.client_manager.volume
+
+ if parsed_args.long:
+ columns = [
+ 'name',
+ 'storage_protocol',
+ 'thick_provisioning_support',
+ 'thin_provisioning_support',
+ 'total_volumes',
+ 'total_capacity_gb',
+ 'allocated_capacity_gb',
+ 'max_over_subscription_ratio',
+ ]
+ headers = [
+ 'Name',
+ 'Protocol',
+ 'Thick',
+ 'Thin',
+ 'Volumes',
+ 'Capacity',
+ 'Allocated',
+ 'Max Over Ratio'
+ ]
+ else:
+ columns = [
+ 'Name',
+ ]
+ headers = columns
+
+ data = volume_client.pools.list(detailed=parsed_args.long)
+ return (headers,
+ (utils.get_item_properties(
+ s, columns,
+ ) for s in data))