summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--.zuul.yaml221
-rw-r--r--README.rst4
-rw-r--r--api-ref/source/conf.py31
-rw-r--r--api-ref/source/v1/samples/resource-schema-response.json10
-rw-r--r--api-ref/source/v1/samples/resource-type-template-hot-response.json107
-rw-r--r--api-ref/source/v1/samples/resource-type-template-response.json102
-rwxr-xr-xbin/heat-api7
-rwxr-xr-xbin/heat-api-cfn8
-rwxr-xr-xbin/heat-engine8
-rwxr-xr-xbin/heat-manage8
-rw-r--r--config-generator.conf1
-rw-r--r--contrib/heat_docker/heat_docker/resources/docker_container.py3
-rw-r--r--contrib/heat_docker/heat_docker/tests/fake_docker_client.py2
-rw-r--r--contrib/heat_docker/heat_docker/tests/test_docker_container.py7
-rw-r--r--contrib/heat_docker/setup.cfg2
-rw-r--r--devstack/lib/heat78
-rw-r--r--devstack/plugin.sh7
-rw-r--r--devstack/settings2
-rw-r--r--doc/requirements.txt7
-rw-r--r--doc/source/_extra/.htaccess4
-rw-r--r--doc/source/_static/.placeholder0
-rw-r--r--doc/source/admin/auth-model.rst5
-rw-r--r--doc/source/admin/index.rst6
-rw-r--r--doc/source/admin/introduction.rst4
-rw-r--r--doc/source/admin/stack-domain-users.rst5
-rw-r--r--doc/source/conf.py59
-rw-r--r--doc/source/configuration/sample_config.rst12
-rw-r--r--doc/source/contributing/index.rst19
-rw-r--r--doc/source/contributor/architecture.rst (renamed from doc/source/developing_guides/architecture.rst)0
-rw-r--r--doc/source/contributor/blueprints.rst (renamed from doc/source/contributing/blueprints.rst)0
-rw-r--r--doc/source/contributor/gmr.rst (renamed from doc/source/developing_guides/gmr.rst)2
-rw-r--r--doc/source/contributor/index.rst26
-rw-r--r--doc/source/contributor/pluginguide.rst (renamed from doc/source/developing_guides/pluginguide.rst)27
-rw-r--r--doc/source/contributor/rally_on_gates.rst (renamed from doc/source/developing_guides/rally_on_gates.rst)6
-rw-r--r--doc/source/contributor/schedulerhints.rst (renamed from doc/source/developing_guides/schedulerhints.rst)0
-rw-r--r--doc/source/contributor/supportstatus.rst (renamed from doc/source/developing_guides/supportstatus.rst)0
-rw-r--r--doc/source/developing_guides/index.rst29
-rw-r--r--doc/source/ext/resources.py52
-rw-r--r--doc/source/getting_started/create_a_stack.rst9
-rw-r--r--doc/source/getting_started/index.rst1
-rw-r--r--doc/source/getting_started/jeos_building.rst84
-rw-r--r--doc/source/getting_started/on_devstack.rst17
-rw-r--r--doc/source/getting_started/standalone.rst4
-rw-r--r--doc/source/glossary.rst67
-rw-r--r--doc/source/index.rst19
-rw-r--r--doc/source/install/install-debian.rst404
-rw-r--r--doc/source/install/install-obs.rst4
-rw-r--r--doc/source/install/install-rdo.rst19
-rw-r--r--doc/source/install/install-ubuntu.rst4
-rw-r--r--doc/source/operating_guides/scale_deployment.rst18
-rw-r--r--doc/source/operating_guides/upgrades_guide.rst2
-rw-r--r--doc/source/template_guide/hot_spec.rst69
-rw-r--r--doc/source/template_guide/multi-clouds.rst52
-rw-r--r--doc/source/template_guide/software_deployment.rst14
-rw-r--r--etc/heat/api-paste.ini15
-rw-r--r--heat/api/aws/exception.py3
-rw-r--r--heat/api/middleware/fault.py3
-rw-r--r--heat/api/openstack/v1/__init__.py3
-rw-r--r--heat/api/openstack/v1/actions.py49
-rw-r--r--heat/api/openstack/v1/events.py11
-rw-r--r--heat/api/openstack/v1/resources.py31
-rw-r--r--heat/api/openstack/v1/software_configs.py7
-rw-r--r--heat/api/openstack/v1/software_deployments.py4
-rw-r--r--heat/api/openstack/v1/stacks.py44
-rw-r--r--heat/api/openstack/v1/util.py62
-rw-r--r--heat/api/openstack/v1/views/views_common.py2
-rw-r--r--heat/api/versions.py8
-rw-r--r--heat/cmd/all.py43
-rw-r--r--heat/cmd/api.py31
-rw-r--r--heat/cmd/api_cfn.py33
-rw-r--r--heat/cmd/engine.py28
-rw-r--r--heat/cmd/manage.py6
-rw-r--r--heat/cmd/status.py1
-rw-r--r--heat/common/config.py25
-rw-r--r--heat/common/context.py10
-rw-r--r--heat/common/crypt.py9
-rw-r--r--heat/common/environment_util.py15
-rw-r--r--heat/common/exception.py23
-rw-r--r--heat/common/grouputils.py4
-rw-r--r--heat/common/i18n.py19
-rw-r--r--heat/common/identifier.py4
-rw-r--r--heat/common/lifecycle_plugin_utils.py2
-rw-r--r--heat/common/password_gen.py9
-rw-r--r--heat/common/plugin_loader.py6
-rw-r--r--heat/common/pluginutils.py5
-rw-r--r--heat/common/policy.py5
-rw-r--r--heat/common/serializers.py9
-rw-r--r--heat/common/service_utils.py12
-rw-r--r--heat/common/short_id.py8
-rw-r--r--heat/common/template_format.py7
-rw-r--r--heat/common/urlfetch.py4
-rw-r--r--heat/common/wsgi.py12
-rw-r--r--heat/db/sqlalchemy/api.py143
-rw-r--r--heat/db/sqlalchemy/filters.py4
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/072_raw_template_files.py40
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/073_newton.py (renamed from heat/db/sqlalchemy/migrate_repo/versions/071_mitaka.py)28
-rw-r--r--heat/db/sqlalchemy/migrate_repo/versions/073_resource_data_fk_ondelete_cascade.py44
-rw-r--r--heat/db/sqlalchemy/migration.py2
-rw-r--r--heat/db/sqlalchemy/utils.py2
-rw-r--r--heat/engine/api.py15
-rw-r--r--heat/engine/attributes.py24
-rw-r--r--heat/engine/cfn/functions.py37
-rw-r--r--heat/engine/cfn/template.py12
-rw-r--r--heat/engine/check_resource.py58
-rw-r--r--heat/engine/clients/__init__.py3
-rw-r--r--heat/engine/clients/client_plugin.py39
-rw-r--r--heat/engine/clients/microversion_mixin.py5
-rw-r--r--heat/engine/clients/os/__init__.py21
-rw-r--r--heat/engine/clients/os/aodh.py2
-rw-r--r--heat/engine/clients/os/barbican.py2
-rw-r--r--heat/engine/clients/os/blazar.py17
-rw-r--r--heat/engine/clients/os/cinder.py9
-rw-r--r--heat/engine/clients/os/designate.py69
-rw-r--r--heat/engine/clients/os/glance.py2
-rw-r--r--heat/engine/clients/os/heat_plugin.py1
-rw-r--r--heat/engine/clients/os/ironic.py82
-rw-r--r--heat/engine/clients/os/keystone/__init__.py73
-rw-r--r--heat/engine/clients/os/keystone/fake_keystoneclient.py10
-rw-r--r--heat/engine/clients/os/keystone/heat_keystoneclient.py88
-rw-r--r--heat/engine/clients/os/magnum.py2
-rw-r--r--heat/engine/clients/os/manila.py6
-rw-r--r--heat/engine/clients/os/monasca.py9
-rw-r--r--heat/engine/clients/os/neutron/__init__.py27
-rw-r--r--heat/engine/clients/os/nova.py119
-rw-r--r--heat/engine/clients/os/octavia.py19
-rw-r--r--heat/engine/clients/os/sahara.py5
-rw-r--r--heat/engine/clients/os/senlin.py29
-rw-r--r--heat/engine/clients/os/swift.py9
-rw-r--r--heat/engine/clients/os/trove.py2
-rw-r--r--heat/engine/clients/os/vitrage.py30
-rw-r--r--heat/engine/clients/os/zaqar.py4
-rw-r--r--heat/engine/clients/os/zun.py4
-rw-r--r--heat/engine/conditions.py8
-rw-r--r--heat/engine/constraint/common_constraints.py13
-rw-r--r--heat/engine/constraints.py34
-rw-r--r--heat/engine/dependencies.py32
-rw-r--r--heat/engine/environment.py129
-rw-r--r--heat/engine/function.py81
-rw-r--r--heat/engine/hot/functions.py195
-rw-r--r--heat/engine/hot/template.py78
-rw-r--r--heat/engine/node_data.py10
-rw-r--r--heat/engine/output.py6
-rw-r--r--heat/engine/parameters.py77
-rw-r--r--heat/engine/plugin_manager.py12
-rw-r--r--heat/engine/properties.py131
-rw-r--r--heat/engine/properties_group.py4
-rw-r--r--heat/engine/resource.py188
-rw-r--r--heat/engine/resources/alarm_base.py4
-rw-r--r--heat/engine/resources/aws/autoscaling/autoscaling_group.py7
-rw-r--r--heat/engine/resources/aws/autoscaling/launch_config.py5
-rw-r--r--heat/engine/resources/aws/autoscaling/scaling_policy.py9
-rw-r--r--heat/engine/resources/aws/cfn/stack.py3
-rw-r--r--heat/engine/resources/aws/cfn/wait_condition_handle.py6
-rw-r--r--heat/engine/resources/aws/ec2/eip.py7
-rw-r--r--heat/engine/resources/aws/ec2/instance.py15
-rw-r--r--heat/engine/resources/aws/ec2/internet_gateway.py4
-rw-r--r--heat/engine/resources/aws/ec2/security_group.py6
-rw-r--r--heat/engine/resources/aws/iam/user.py5
-rw-r--r--heat/engine/resources/aws/lb/loadbalancer.py3
-rw-r--r--heat/engine/resources/aws/s3/s3.py7
-rw-r--r--heat/engine/resources/openstack/aodh/alarm.py106
-rw-r--r--heat/engine/resources/openstack/barbican/container.py10
-rw-r--r--heat/engine/resources/openstack/barbican/order.py6
-rw-r--r--heat/engine/resources/openstack/blazar/host.py2
-rw-r--r--heat/engine/resources/openstack/cinder/qos_specs.py1
-rw-r--r--heat/engine/resources/openstack/cinder/volume.py37
-rw-r--r--heat/engine/resources/openstack/designate/domain.py108
-rw-r--r--heat/engine/resources/openstack/designate/record.py156
-rw-r--r--heat/engine/resources/openstack/designate/recordset.py11
-rw-r--r--heat/engine/resources/openstack/designate/zone.py42
-rw-r--r--heat/engine/resources/openstack/glance/image.py134
-rw-r--r--heat/engine/resources/openstack/heat/autoscaling_group.py8
-rw-r--r--heat/engine/resources/openstack/heat/delay.py6
-rw-r--r--heat/engine/resources/openstack/heat/deployed_server.py2
-rw-r--r--heat/engine/resources/openstack/heat/instance_group.py26
-rw-r--r--heat/engine/resources/openstack/heat/multi_part.py14
-rw-r--r--heat/engine/resources/openstack/heat/none_resource.py3
-rw-r--r--heat/engine/resources/openstack/heat/random_string.py4
-rw-r--r--heat/engine/resources/openstack/heat/remote_stack.py26
-rw-r--r--heat/engine/resources/openstack/heat/resource_chain.py7
-rw-r--r--heat/engine/resources/openstack/heat/resource_group.py115
-rw-r--r--heat/engine/resources/openstack/heat/scaling_policy.py8
-rw-r--r--heat/engine/resources/openstack/heat/software_config.py4
-rw-r--r--heat/engine/resources/openstack/heat/software_deployment.py13
-rw-r--r--heat/engine/resources/openstack/heat/structured_config.py14
-rw-r--r--heat/engine/resources/openstack/heat/swiftsignal.py5
-rw-r--r--heat/engine/resources/openstack/heat/test_resource.py5
-rw-r--r--heat/engine/resources/openstack/heat/value.py3
-rw-r--r--heat/engine/resources/openstack/heat/wait_condition.py3
-rw-r--r--heat/engine/resources/openstack/ironic/__init__.py0
-rw-r--r--heat/engine/resources/openstack/ironic/port.py240
-rw-r--r--heat/engine/resources/openstack/keystone/project.py18
-rw-r--r--heat/engine/resources/openstack/keystone/region.py2
-rw-r--r--heat/engine/resources/openstack/keystone/role.py3
-rw-r--r--heat/engine/resources/openstack/magnum/bay.py4
-rw-r--r--heat/engine/resources/openstack/magnum/cluster.py4
-rw-r--r--heat/engine/resources/openstack/magnum/cluster_template.py4
-rw-r--r--heat/engine/resources/openstack/manila/share.py17
-rw-r--r--heat/engine/resources/openstack/mistral/external_resource.py3
-rw-r--r--heat/engine/resources/openstack/mistral/workflow.py37
-rw-r--r--heat/engine/resources/openstack/monasca/notification.py4
-rw-r--r--heat/engine/resources/openstack/neutron/extraroute.py12
-rw-r--r--heat/engine/resources/openstack/neutron/extrarouteset.py236
-rw-r--r--heat/engine/resources/openstack/neutron/firewall.py6
-rw-r--r--heat/engine/resources/openstack/neutron/floatingip.py9
-rw-r--r--heat/engine/resources/openstack/neutron/l2_gateway.py11
-rw-r--r--heat/engine/resources/openstack/neutron/lbaas/pool.py4
-rw-r--r--heat/engine/resources/openstack/neutron/net.py7
-rw-r--r--heat/engine/resources/openstack/neutron/neutron.py14
-rw-r--r--heat/engine/resources/openstack/neutron/port.py28
-rw-r--r--heat/engine/resources/openstack/neutron/provider_net.py22
-rw-r--r--heat/engine/resources/openstack/neutron/qos.py91
-rw-r--r--heat/engine/resources/openstack/neutron/router.py6
-rw-r--r--heat/engine/resources/openstack/neutron/sfc/flow_classifier.py4
-rw-r--r--heat/engine/resources/openstack/neutron/sfc/port_pair.py4
-rw-r--r--heat/engine/resources/openstack/neutron/sfc/port_pair_group.py2
-rw-r--r--heat/engine/resources/openstack/neutron/taas/tap_flow.py4
-rw-r--r--heat/engine/resources/openstack/neutron/taas/tap_service.py4
-rw-r--r--heat/engine/resources/openstack/neutron/vpnservice.py10
-rw-r--r--heat/engine/resources/openstack/nova/flavor.py2
-rw-r--r--heat/engine/resources/openstack/nova/floatingip.py5
-rw-r--r--heat/engine/resources/openstack/nova/host_aggregate.py3
-rw-r--r--heat/engine/resources/openstack/nova/keypair.py5
-rw-r--r--heat/engine/resources/openstack/nova/quota.py26
-rw-r--r--heat/engine/resources/openstack/nova/server.py115
-rw-r--r--heat/engine/resources/openstack/nova/server_group.py2
-rw-r--r--heat/engine/resources/openstack/nova/server_network_mixin.py8
-rw-r--r--heat/engine/resources/openstack/octavia/flavor.py132
-rw-r--r--heat/engine/resources/openstack/octavia/flavor_profile.py90
-rw-r--r--heat/engine/resources/openstack/octavia/health_monitor.py4
-rw-r--r--heat/engine/resources/openstack/octavia/listener.py19
-rw-r--r--heat/engine/resources/openstack/octavia/loadbalancer.py31
-rw-r--r--heat/engine/resources/openstack/octavia/pool.py19
-rw-r--r--heat/engine/resources/openstack/octavia/quota.py150
-rw-r--r--heat/engine/resources/openstack/sahara/job.py7
-rw-r--r--heat/engine/resources/openstack/sahara/templates.py7
-rw-r--r--heat/engine/resources/openstack/senlin/cluster.py24
-rw-r--r--heat/engine/resources/openstack/senlin/node.py17
-rw-r--r--heat/engine/resources/openstack/senlin/policy.py9
-rw-r--r--heat/engine/resources/openstack/swift/container.py7
-rw-r--r--heat/engine/resources/openstack/trove/cluster.py12
-rw-r--r--heat/engine/resources/openstack/trove/instance.py11
-rw-r--r--heat/engine/resources/openstack/vitrage/__init__.py0
-rw-r--r--heat/engine/resources/openstack/vitrage/vitrage_template.py135
-rw-r--r--heat/engine/resources/openstack/zaqar/queue.py6
-rw-r--r--heat/engine/resources/openstack/zun/container.py16
-rw-r--r--heat/engine/resources/server_base.py81
-rw-r--r--heat/engine/resources/signal_responder.py41
-rw-r--r--heat/engine/resources/stack_resource.py66
-rw-r--r--heat/engine/resources/template_resource.py7
-rw-r--r--heat/engine/resources/wait_condition.py18
-rw-r--r--heat/engine/rsrc_defn.py42
-rw-r--r--heat/engine/scheduler.py100
-rw-r--r--heat/engine/service.py56
-rw-r--r--heat/engine/service_software_config.py15
-rw-r--r--heat/engine/software_config_io.py15
-rw-r--r--heat/engine/stack.py268
-rw-r--r--heat/engine/stk_defn.py7
-rw-r--r--heat/engine/support.py10
-rw-r--r--heat/engine/sync_point.py3
-rw-r--r--heat/engine/template.py27
-rw-r--r--heat/engine/template_common.py26
-rw-r--r--heat/engine/template_files.py13
-rw-r--r--heat/engine/translation.py3
-rw-r--r--heat/engine/update.py42
-rw-r--r--heat/engine/worker.py8
-rw-r--r--heat/hacking/checks.py13
-rw-r--r--heat/httpd/heat_api.py21
-rw-r--r--heat/httpd/heat_api_cfn.py25
-rw-r--r--heat/locale/de/LC_MESSAGES/heat.po175
-rw-r--r--heat/locale/es/LC_MESSAGES/heat.po92
-rw-r--r--heat/locale/fr/LC_MESSAGES/heat.po90
-rw-r--r--heat/locale/it/LC_MESSAGES/heat.po90
-rw-r--r--heat/locale/ja/LC_MESSAGES/heat.po88
-rw-r--r--heat/locale/ko_KR/LC_MESSAGES/heat.po89
-rw-r--r--heat/locale/pt_BR/LC_MESSAGES/heat.po91
-rw-r--r--heat/locale/ru/LC_MESSAGES/heat.po88
-rw-r--r--heat/locale/zh_CN/LC_MESSAGES/heat.po81
-rw-r--r--heat/locale/zh_TW/LC_MESSAGES/heat.po80
-rw-r--r--heat/objects/event.py2
-rw-r--r--heat/objects/fields.py3
-rw-r--r--heat/objects/resource.py13
-rw-r--r--heat/objects/stack.py5
-rw-r--r--heat/objects/user_creds.py2
-rw-r--r--heat/policies/actions.py133
-rw-r--r--heat/policies/base.py36
-rw-r--r--heat/policies/build_info.py19
-rw-r--r--heat/policies/cloudformation.py149
-rw-r--r--heat/policies/events.py31
-rw-r--r--heat/policies/resource.py66
-rw-r--r--heat/policies/resource_types.py15
-rw-r--r--heat/policies/service.py16
-rw-r--r--heat/policies/software_configs.py66
-rw-r--r--heat/policies/software_deployments.py70
-rw-r--r--heat/policies/stacks.py367
-rw-r--r--heat/scaling/cooldown.py9
-rw-r--r--heat/tests/__init__.py1
-rw-r--r--heat/tests/api/aws/test_api_ec2token.py5
-rw-r--r--heat/tests/api/cfn/test_api_cfn_v1.py5
-rw-r--r--heat/tests/api/openstack_v1/test_actions.py47
-rw-r--r--heat/tests/api/openstack_v1/test_build_info.py5
-rw-r--r--heat/tests/api/openstack_v1/test_events.py14
-rw-r--r--heat/tests/api/openstack_v1/test_resources.py26
-rw-r--r--heat/tests/api/openstack_v1/test_services.py3
-rw-r--r--heat/tests/api/openstack_v1/test_software_configs.py2
-rw-r--r--heat/tests/api/openstack_v1/test_software_deployments.py10
-rw-r--r--heat/tests/api/openstack_v1/test_stacks.py67
-rw-r--r--heat/tests/api/openstack_v1/test_util.py41
-rw-r--r--heat/tests/api/openstack_v1/test_views_common.py4
-rw-r--r--heat/tests/api/openstack_v1/test_views_stacks_view.py2
-rw-r--r--heat/tests/api/openstack_v1/tools.py7
-rw-r--r--heat/tests/api/test_wsgi.py23
-rw-r--r--heat/tests/autoscaling/test_heat_scaling_group.py17
-rw-r--r--heat/tests/autoscaling/test_heat_scaling_policy.py8
-rw-r--r--heat/tests/autoscaling/test_launch_config.py15
-rw-r--r--heat/tests/autoscaling/test_lbutils.py5
-rw-r--r--heat/tests/autoscaling/test_scaling_group.py21
-rw-r--r--heat/tests/autoscaling/test_scaling_policy.py7
-rw-r--r--heat/tests/aws/test_eip.py7
-rw-r--r--heat/tests/aws/test_instance.py30
-rw-r--r--heat/tests/aws/test_loadbalancer.py2
-rw-r--r--heat/tests/aws/test_s3.py7
-rw-r--r--heat/tests/aws/test_security_group.py2
-rw-r--r--heat/tests/aws/test_user.py3
-rw-r--r--heat/tests/aws/test_volume.py25
-rw-r--r--heat/tests/aws/test_waitcondition.py16
-rw-r--r--heat/tests/clients/test_barbican_client.py2
-rw-r--r--heat/tests/clients/test_blazar_client.py3
-rw-r--r--heat/tests/clients/test_cinder_client.py2
-rw-r--r--heat/tests/clients/test_clients.py8
-rw-r--r--heat/tests/clients/test_designate_client.py290
-rw-r--r--heat/tests/clients/test_glance_client.py2
-rw-r--r--heat/tests/clients/test_heat_client.py119
-rw-r--r--heat/tests/clients/test_ironic_client.py78
-rw-r--r--heat/tests/clients/test_keystone_client.py311
-rw-r--r--heat/tests/clients/test_magnum_client.py3
-rw-r--r--heat/tests/clients/test_manila_client.py2
-rw-r--r--heat/tests/clients/test_mistral_client.py3
-rw-r--r--heat/tests/clients/test_monasca_client.py10
-rw-r--r--heat/tests/clients/test_neutron_client.py27
-rw-r--r--heat/tests/clients/test_nova_client.py119
-rw-r--r--heat/tests/clients/test_sahara_client.py7
-rw-r--r--heat/tests/clients/test_sdk_client.py2
-rw-r--r--heat/tests/clients/test_senlin_client.py3
-rw-r--r--heat/tests/clients/test_swift_client.py2
-rw-r--r--heat/tests/clients/test_vitrage_client.py24
-rw-r--r--heat/tests/clients/test_zaqar_client.py3
-rw-r--r--heat/tests/clients/test_zun_client.py2
-rw-r--r--heat/tests/constraints/test_common_constraints.py25
-rw-r--r--heat/tests/convergence/framework/engine_wrapper.py4
-rw-r--r--heat/tests/convergence/framework/message_processor.py11
-rw-r--r--heat/tests/convergence/framework/processes.py1
-rw-r--r--heat/tests/convergence/framework/reality.py1
-rw-r--r--heat/tests/convergence/framework/worker_wrapper.py2
-rw-r--r--heat/tests/convergence/test_converge.py5
-rw-r--r--heat/tests/db/test_migrations.py522
-rw-r--r--heat/tests/db/test_sqlalchemy_api.py234
-rw-r--r--heat/tests/db/test_sqlalchemy_filters.py2
-rw-r--r--heat/tests/db/test_utils.py2
-rw-r--r--heat/tests/engine/service/test_service_engine.py2
-rw-r--r--heat/tests/engine/service/test_software_config.py23
-rw-r--r--heat/tests/engine/service/test_stack_action.py6
-rw-r--r--heat/tests/engine/service/test_stack_adopt.py6
-rw-r--r--heat/tests/engine/service/test_stack_create.py18
-rw-r--r--heat/tests/engine/service/test_stack_delete.py3
-rw-r--r--heat/tests/engine/service/test_stack_events.py5
-rw-r--r--heat/tests/engine/service/test_stack_resources.py21
-rw-r--r--heat/tests/engine/service/test_stack_snapshot.py19
-rw-r--r--heat/tests/engine/service/test_stack_update.py47
-rw-r--r--heat/tests/engine/service/test_threadgroup_mgr.py6
-rw-r--r--heat/tests/engine/test_check_resource.py15
-rw-r--r--heat/tests/engine/test_dependencies.py24
-rw-r--r--heat/tests/engine/test_engine_worker.py25
-rw-r--r--heat/tests/engine/test_plugin_manager.py6
-rw-r--r--heat/tests/engine/test_resource_type.py9
-rw-r--r--heat/tests/engine/test_scheduler.py12
-rw-r--r--heat/tests/engine/test_sync_point.py3
-rw-r--r--heat/tests/engine/tools.py11
-rw-r--r--heat/tests/fakes.py36
-rw-r--r--heat/tests/generic_resource.py6
-rw-r--r--heat/tests/openstack/aodh/test_alarm.py133
-rw-r--r--heat/tests/openstack/aodh/test_composite_alarm.py6
-rw-r--r--heat/tests/openstack/aodh/test_gnocchi_alarm.py2
-rw-r--r--heat/tests/openstack/barbican/test_container.py7
-rw-r--r--heat/tests/openstack/barbican/test_order.py7
-rw-r--r--heat/tests/openstack/barbican/test_secret.py2
-rw-r--r--heat/tests/openstack/blazar/test_host.py3
-rw-r--r--heat/tests/openstack/blazar/test_lease.py3
-rw-r--r--heat/tests/openstack/cinder/test_qos_specs.py6
-rw-r--r--heat/tests/openstack/cinder/test_quota.py11
-rw-r--r--heat/tests/openstack/cinder/test_volume.py198
-rw-r--r--heat/tests/openstack/cinder/test_volume_type.py5
-rw-r--r--heat/tests/openstack/cinder/test_volume_type_encryption.py2
-rw-r--r--heat/tests/openstack/cinder/test_volume_utils.py6
-rw-r--r--heat/tests/openstack/designate/test_domain.py216
-rw-r--r--heat/tests/openstack/designate/test_record.py290
-rw-r--r--heat/tests/openstack/designate/test_recordset.py17
-rw-r--r--heat/tests/openstack/designate/test_zone.py93
-rw-r--r--heat/tests/openstack/glance/test_image.py141
-rw-r--r--heat/tests/openstack/heat/test_cloud_config.py2
-rw-r--r--heat/tests/openstack/heat/test_deployed_server.py4
-rw-r--r--heat/tests/openstack/heat/test_instance_group.py89
-rw-r--r--heat/tests/openstack/heat/test_instance_group_update_policy.py3
-rw-r--r--heat/tests/openstack/heat/test_multi_part.py17
-rw-r--r--heat/tests/openstack/heat/test_none_resource.py2
-rw-r--r--heat/tests/openstack/heat/test_random_string.py7
-rw-r--r--heat/tests/openstack/heat/test_remote_stack.py29
-rw-r--r--heat/tests/openstack/heat/test_resource_chain.py5
-rw-r--r--heat/tests/openstack/heat/test_resource_group.py114
-rw-r--r--heat/tests/openstack/heat/test_software_component.py5
-rw-r--r--heat/tests/openstack/heat/test_software_config.py2
-rw-r--r--heat/tests/openstack/heat/test_software_deployment.py12
-rw-r--r--heat/tests/openstack/heat/test_structured_config.py2
-rw-r--r--heat/tests/openstack/heat/test_swiftsignal.py13
-rw-r--r--heat/tests/openstack/heat/test_value.py3
-rw-r--r--heat/tests/openstack/heat/test_waitcondition.py5
-rw-r--r--heat/tests/openstack/ironic/__init__.py0
-rw-r--r--heat/tests/openstack/ironic/test_port.py276
-rw-r--r--heat/tests/openstack/keystone/test_domain.py2
-rw-r--r--heat/tests/openstack/keystone/test_endpoint.py3
-rw-r--r--heat/tests/openstack/keystone/test_group.py2
-rw-r--r--heat/tests/openstack/keystone/test_project.py2
-rw-r--r--heat/tests/openstack/keystone/test_region.py4
-rw-r--r--heat/tests/openstack/keystone/test_role.py2
-rw-r--r--heat/tests/openstack/keystone/test_role_assignments.py4
-rw-r--r--heat/tests/openstack/keystone/test_service.py3
-rw-r--r--heat/tests/openstack/keystone/test_user.py2
-rw-r--r--heat/tests/openstack/magnum/test_bay.py12
-rw-r--r--heat/tests/openstack/magnum/test_cluster.py10
-rw-r--r--heat/tests/openstack/magnum/test_cluster_template.py8
-rw-r--r--heat/tests/openstack/manila/test_security_service.py7
-rw-r--r--heat/tests/openstack/manila/test_share.py25
-rw-r--r--heat/tests/openstack/manila/test_share_network.py2
-rw-r--r--heat/tests/openstack/manila/test_share_type.py2
-rw-r--r--heat/tests/openstack/mistral/test_cron_trigger.py2
-rw-r--r--heat/tests/openstack/mistral/test_external_resource.py2
-rw-r--r--heat/tests/openstack/mistral/test_workflow.py118
-rw-r--r--heat/tests/openstack/monasca/test_alarm_definition.py2
-rw-r--r--heat/tests/openstack/monasca/test_notification.py13
-rw-r--r--heat/tests/openstack/neutron/lbaas/test_health_monitor.py2
-rw-r--r--heat/tests/openstack/neutron/lbaas/test_l7policy.py2
-rw-r--r--heat/tests/openstack/neutron/lbaas/test_l7rule.py2
-rw-r--r--heat/tests/openstack/neutron/lbaas/test_listener.py2
-rw-r--r--heat/tests/openstack/neutron/lbaas/test_loadbalancer.py2
-rw-r--r--heat/tests/openstack/neutron/lbaas/test_pool.py2
-rw-r--r--heat/tests/openstack/neutron/lbaas/test_pool_member.py2
-rw-r--r--heat/tests/openstack/neutron/test_address_scope.py2
-rw-r--r--heat/tests/openstack/neutron/test_extraroute.py60
-rw-r--r--heat/tests/openstack/neutron/test_neutron.py7
-rw-r--r--heat/tests/openstack/neutron/test_neutron_extrarouteset.py237
-rw-r--r--heat/tests/openstack/neutron/test_neutron_firewall.py24
-rw-r--r--heat/tests/openstack/neutron/test_neutron_floating_ip.py2
-rw-r--r--heat/tests/openstack/neutron/test_neutron_l2_gateway.py12
-rw-r--r--heat/tests/openstack/neutron/test_neutron_l2_gateway_connection.py3
-rw-r--r--heat/tests/openstack/neutron/test_neutron_loadbalancer.py24
-rw-r--r--heat/tests/openstack/neutron/test_neutron_metering.py11
-rw-r--r--heat/tests/openstack/neutron/test_neutron_net.py7
-rw-r--r--heat/tests/openstack/neutron/test_neutron_network_gateway.py11
-rw-r--r--heat/tests/openstack/neutron/test_neutron_port.py122
-rw-r--r--heat/tests/openstack/neutron/test_neutron_provider_net.py2
-rw-r--r--heat/tests/openstack/neutron/test_neutron_rbac_policy.py2
-rw-r--r--heat/tests/openstack/neutron/test_neutron_router.py15
-rw-r--r--heat/tests/openstack/neutron/test_neutron_security_group.py2
-rw-r--r--heat/tests/openstack/neutron/test_neutron_security_group_rule.py2
-rw-r--r--heat/tests/openstack/neutron/test_neutron_segment.py12
-rw-r--r--heat/tests/openstack/neutron/test_neutron_subnet.py18
-rw-r--r--heat/tests/openstack/neutron/test_neutron_subnetpool.py11
-rw-r--r--heat/tests/openstack/neutron/test_neutron_trunk.py3
-rw-r--r--heat/tests/openstack/neutron/test_neutron_vpnservice.py56
-rw-r--r--heat/tests/openstack/neutron/test_qos.py125
-rw-r--r--heat/tests/openstack/neutron/test_quota.py5
-rw-r--r--heat/tests/openstack/neutron/test_sfc/test_flow_classifier.py4
-rw-r--r--heat/tests/openstack/neutron/test_sfc/test_port_chain.py4
-rw-r--r--heat/tests/openstack/neutron/test_sfc/test_port_pair.py8
-rw-r--r--heat/tests/openstack/neutron/test_sfc/test_port_pair_group.py8
-rw-r--r--heat/tests/openstack/neutron/test_taas/test_tap_flow.py8
-rw-r--r--heat/tests/openstack/neutron/test_taas/test_tap_service.py6
-rw-r--r--heat/tests/openstack/nova/fakes.py209
-rw-r--r--heat/tests/openstack/nova/test_flavor.py9
-rw-r--r--heat/tests/openstack/nova/test_floatingip.py2
-rw-r--r--heat/tests/openstack/nova/test_host_aggregate.py7
-rw-r--r--heat/tests/openstack/nova/test_keypair.py18
-rw-r--r--heat/tests/openstack/nova/test_quota.py8
-rw-r--r--heat/tests/openstack/nova/test_server.py397
-rw-r--r--heat/tests/openstack/nova/test_server_group.py6
-rw-r--r--heat/tests/openstack/octavia/inline_templates.py31
-rw-r--r--heat/tests/openstack/octavia/test_flavor.py95
-rw-r--r--heat/tests/openstack/octavia/test_flavor_profile.py92
-rw-r--r--heat/tests/openstack/octavia/test_health_monitor.py2
-rw-r--r--heat/tests/openstack/octavia/test_l7policy.py2
-rw-r--r--heat/tests/openstack/octavia/test_l7rule.py2
-rw-r--r--heat/tests/openstack/octavia/test_listener.py3
-rw-r--r--heat/tests/openstack/octavia/test_loadbalancer.py6
-rw-r--r--heat/tests/openstack/octavia/test_pool.py5
-rw-r--r--heat/tests/openstack/octavia/test_pool_member.py2
-rw-r--r--heat/tests/openstack/octavia/test_quota.py141
-rw-r--r--heat/tests/openstack/sahara/test_cluster.py14
-rw-r--r--heat/tests/openstack/sahara/test_data_source.py5
-rw-r--r--heat/tests/openstack/sahara/test_image.py2
-rw-r--r--heat/tests/openstack/sahara/test_job.py2
-rw-r--r--heat/tests/openstack/sahara/test_job_binary.py7
-rw-r--r--heat/tests/openstack/sahara/test_templates.py21
-rw-r--r--heat/tests/openstack/senlin/test_cluster.py32
-rw-r--r--heat/tests/openstack/senlin/test_node.py10
-rw-r--r--heat/tests/openstack/senlin/test_policy.py27
-rw-r--r--heat/tests/openstack/senlin/test_profile.py2
-rw-r--r--heat/tests/openstack/senlin/test_receiver.py2
-rw-r--r--heat/tests/openstack/swift/test_container.py7
-rw-r--r--heat/tests/openstack/trove/test_cluster.py8
-rw-r--r--heat/tests/openstack/trove/test_instance.py31
-rw-r--r--heat/tests/openstack/vitrage/__init__.py0
-rw-r--r--heat/tests/openstack/vitrage/test_vitrage_template.py183
-rw-r--r--heat/tests/openstack/zaqar/test_queue.py8
-rw-r--r--heat/tests/openstack/zaqar/test_subscription.py11
-rw-r--r--heat/tests/openstack/zun/test_container.py14
-rw-r--r--heat/tests/policy/check_admin.json3
-rw-r--r--heat/tests/policy/deny_stack_user.json15
-rw-r--r--heat/tests/policy/notallowed.json14
-rw-r--r--heat/tests/policy/resources.json4
-rw-r--r--heat/tests/policy/test_acl_personas.yaml241
-rw-r--r--heat/tests/policy/test_deprecated_access.yaml22
-rw-r--r--heat/tests/test_attributes.py9
-rw-r--r--heat/tests/test_auth_password.py9
-rw-r--r--heat/tests/test_auth_url.py6
-rw-r--r--heat/tests/test_common_auth_plugin.py8
-rw-r--r--heat/tests/test_common_context.py2
-rw-r--r--heat/tests/test_common_policy.py125
-rw-r--r--heat/tests/test_common_serializers.py15
-rw-r--r--heat/tests/test_common_service_utils.py12
-rw-r--r--heat/tests/test_constraints.py91
-rw-r--r--heat/tests/test_convg_stack.py5
-rw-r--r--heat/tests/test_crypt.py5
-rw-r--r--heat/tests/test_engine_api_utils.py17
-rw-r--r--heat/tests/test_engine_service.py17
-rw-r--r--heat/tests/test_environment.py9
-rw-r--r--heat/tests/test_environment_format.py2
-rw-r--r--heat/tests/test_event.py5
-rw-r--r--heat/tests/test_exception.py8
-rw-r--r--heat/tests/test_fault_middleware.py8
-rw-r--r--heat/tests/test_function.py42
-rw-r--r--heat/tests/test_grouputils.py5
-rw-r--r--heat/tests/test_hacking.py6
-rw-r--r--heat/tests/test_hot.py230
-rw-r--r--heat/tests/test_lifecycle_plugin_utils.py2
-rw-r--r--heat/tests/test_loguserdata.py2
-rw-r--r--heat/tests/test_metadata_refresh.py8
-rw-r--r--heat/tests/test_nested_stack.py15
-rw-r--r--heat/tests/test_noauth.py6
-rw-r--r--heat/tests/test_notifications.py3
-rw-r--r--heat/tests/test_parameters.py53
-rw-r--r--heat/tests/test_plugin_loader.py3
-rw-r--r--heat/tests/test_properties.py116
-rw-r--r--heat/tests/test_properties_group.py3
-rw-r--r--heat/tests/test_provider_template.py21
-rw-r--r--heat/tests/test_resource.py129
-rw-r--r--heat/tests/test_rpc_client.py2
-rw-r--r--heat/tests/test_rpc_listener_client.py3
-rw-r--r--heat/tests/test_rpc_worker_client.py2
-rw-r--r--heat/tests/test_rsrc_defn.py37
-rw-r--r--heat/tests/test_server_tags.py3
-rw-r--r--heat/tests/test_signal.py14
-rw-r--r--heat/tests/test_stack.py100
-rw-r--r--heat/tests/test_stack_collect_attributes.py5
-rw-r--r--heat/tests/test_stack_delete.py2
-rw-r--r--heat/tests/test_stack_lock.py4
-rw-r--r--heat/tests/test_stack_resource.py44
-rw-r--r--heat/tests/test_stack_update.py38
-rw-r--r--heat/tests/test_stack_user.py8
-rw-r--r--heat/tests/test_support.py3
-rw-r--r--heat/tests/test_template.py95
-rw-r--r--heat/tests/test_template_format.py11
-rw-r--r--heat/tests/test_translation_rule.py19
-rw-r--r--heat/tests/test_urlfetch.py15
-rw-r--r--heat/tests/test_validate.py40
-rw-r--r--heat/tests/test_vpc.py4
-rw-r--r--heat/tests/utils.py26
-rw-r--r--heat_integrationtests/README.rst7
-rw-r--r--heat_integrationtests/__init__.py6
-rwxr-xr-xheat_integrationtests/cleanup_test_env.sh4
-rw-r--r--heat_integrationtests/common/clients.py8
-rw-r--r--heat_integrationtests/common/config.py147
-rw-r--r--heat_integrationtests/common/test.py83
-rw-r--r--heat_integrationtests/config-generator.conf4
-rw-r--r--heat_integrationtests/functional/test_autoscaling.py3
-rw-r--r--heat_integrationtests/functional/test_aws_stack.py2
-rw-r--r--heat_integrationtests/functional/test_cancel_update.py4
-rw-r--r--heat_integrationtests/functional/test_conditions.py156
-rw-r--r--heat_integrationtests/functional/test_create_update.py73
-rw-r--r--heat_integrationtests/functional/test_heat_autoscaling.py4
-rw-r--r--heat_integrationtests/functional/test_keystone_user_with_domain.py183
-rw-r--r--heat_integrationtests/functional/test_resource_group.py5
-rw-r--r--heat_integrationtests/functional/test_simultaneous_update.py83
-rw-r--r--heat_integrationtests/functional/test_template_resource.py5
-rw-r--r--heat_integrationtests/functional/test_template_versions.py3
-rw-r--r--heat_integrationtests/functional/test_update_restricted.py10
-rw-r--r--heat_integrationtests/functional/test_validation.py15
-rwxr-xr-xheat_integrationtests/post_test_hook.sh27
-rwxr-xr-xheat_integrationtests/pre_test_hook.sh53
-rwxr-xr-xheat_integrationtests/prepare_test_env.sh2
-rwxr-xr-xheat_integrationtests/prepare_test_network.sh4
-rw-r--r--lower-constraints.txt75
-rw-r--r--playbooks/devstack/functional/post.yaml28
-rw-r--r--playbooks/devstack/functional/run.yaml125
-rw-r--r--playbooks/devstack/grenade/run.yaml60
-rw-r--r--playbooks/devstack/multinode-networking/pre.yaml3
-rw-r--r--releasenotes/config.yaml4
-rw-r--r--releasenotes/notes/Change-logger-path-e7a13878e5bb0bc2.yaml10
-rw-r--r--releasenotes/notes/SOURCE_IP_PORT-to-LB_ALGORITHM-11f0edf22096df74.yaml4
-rw-r--r--releasenotes/notes/add-aodh-lbmemberhealth-alarm-c59502aac1944b8b.yaml4
-rw-r--r--releasenotes/notes/add-dedicated-auth-endpoint-config-for-servers-b20f7eb351f619d0.yaml16
-rw-r--r--releasenotes/notes/add-dns_domain-to-ProviderNet-84b14a85b8653c7c.yaml6
-rw-r--r--releasenotes/notes/add-octavia-flavor-flavorprofile-support-90ef922d19591c60.yaml6
-rw-r--r--releasenotes/notes/add-port-uplink-status-propagation-abd90d794e330d31.yaml8
-rw-r--r--releasenotes/notes/add-tty-property-to-container-1b8bf92f0f47deca.yaml5
-rw-r--r--releasenotes/notes/add-vitrage-client-plugin-cb9e6b51ec2cc6ec.yaml4
-rw-r--r--releasenotes/notes/delay-resource-7d44c512081026c8.yaml4
-rw-r--r--releasenotes/notes/deprecate-nova-quota-injected_file-properties-6c6fd7f5231e4c40.yaml5
-rw-r--r--releasenotes/notes/designate-zone-primaries-c48c37222ea06eb9.yaml5
-rw-r--r--releasenotes/notes/fix-autoscalinggroup-reference-id-caf8b80c9288ad0f.yaml10
-rw-r--r--releasenotes/notes/granular-action-policy-b8c143bb5f203b68.yaml10
-rw-r--r--releasenotes/notes/heat-template-support-trove-cluster-996efba5dfb6f02d.yaml5
-rw-r--r--releasenotes/notes/hidden-multiattach-c761af6165c9571f.yaml6
-rw-r--r--releasenotes/notes/honoring_oslo_db_config-bf32711bf99a2e47.yaml13
-rw-r--r--releasenotes/notes/if-macro-optional-properties-40647f036903731b.yaml8
-rw-r--r--releasenotes/notes/ike-properties-updateable-for-vpnaas-c42af7a4631e5dd3.yaml5
-rw-r--r--releasenotes/notes/ip-version-n-attribute-deprecation-bea1c6e4ca3678f1.yaml7
-rw-r--r--releasenotes/notes/neutron-extrarouteset-379c5354e1ac7795.yaml10
-rw-r--r--releasenotes/notes/neutron-qos-minimum-bandwidth-rule-cb38db4ebc27688e.yaml7
-rw-r--r--releasenotes/notes/octavia-pool-tls-enabled-373a8c74f7c7664b.yaml6
-rw-r--r--releasenotes/notes/octavia-quota-resource-52c1ea86f16d9513.yaml4
-rw-r--r--releasenotes/notes/os-neutron-net-segments-attribute-semi-predictable-b40a869317d053cc.yaml16
-rw-r--r--releasenotes/notes/port-mac-address-update-b377d23434e7b48a.yaml6
-rw-r--r--releasenotes/notes/providernet-segments-attribute-cc20b22bf3a25e96.yaml4
-rw-r--r--releasenotes/notes/python2-7125a4d5b441e7a6.yaml5
-rw-r--r--releasenotes/notes/remove-default-domain-from-templates-b5965242bfb78145.yaml13
-rw-r--r--releasenotes/notes/remove-designate-v1-support-107de4784f8da2a6.yaml8
-rw-r--r--releasenotes/notes/remove-nova-api-extension-934f8389ea42e9e4.yaml6
-rw-r--r--releasenotes/notes/support-allowed-cidrs-for-octavia-listener-d563a759d34da8b0.yaml6
-rw-r--r--releasenotes/notes/support-cephx-access-type-in-manila-share-71a416bf55aea214.yaml8
-rw-r--r--releasenotes/notes/support-domain-in-keystone-lookups-f657da8322f17938.yaml7
-rw-r--r--releasenotes/notes/support-handling-empty-string-for-volume-az-22ad78eb0f931954.yaml6
-rw-r--r--releasenotes/notes/support-ignition-93daac40f43a2cfe.yaml7
-rw-r--r--releasenotes/notes/support-ironic-client-plugin-b7b91b7090579c81.yaml7
-rw-r--r--releasenotes/notes/support-ironic-port-resource-type-304284a7c508d5d5.yaml4
-rw-r--r--releasenotes/notes/support-rbac-824a2d02c8746d3d.yaml15
-rw-r--r--releasenotes/notes/support-shared-servies-multi-region-mode-d9f167fb52d9c0a8.yaml4
-rw-r--r--releasenotes/notes/support_case_insensitive_user_name_search-92d6126d8be2ce4f.yaml11
-rw-r--r--releasenotes/notes/support_set_group_for_multipart-79b5819b9b3a82ad.yaml8
-rw-r--r--releasenotes/notes/update-firwallpolicy-ruls-90a8904e899b2365.yaml5
-rw-r--r--releasenotes/notes/update-webimage-resource-properties-c3e06b2c98b7d127.yaml10
-rw-r--r--releasenotes/notes/vitrage-template-resource-8869a8e34418b22f.yaml6
-rw-r--r--releasenotes/source/conf.py23
-rw-r--r--releasenotes/source/index.rst3
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po739
-rw-r--r--releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po73
-rw-r--r--releasenotes/source/train.rst6
-rw-r--r--releasenotes/source/ussuri.rst6
-rw-r--r--releasenotes/source/victoria.rst6
-rw-r--r--requirements.txt48
-rw-r--r--roles/run-heat-tests/defaults/main.yaml2
-rw-r--r--roles/run-heat-tests/tasks/main.yaml9
-rw-r--r--setup.cfg19
-rw-r--r--setup.py8
-rw-r--r--test-requirements.txt15
-rw-r--r--tools/README.rst2
-rw-r--r--tools/custom_guidelines.py12
-rw-r--r--tools/dashboards/heat.dash3
-rwxr-xr-xtools/test-setup.sh4
-rw-r--r--tox.ini81
670 files changed, 13050 insertions, 7903 deletions
diff --git a/.gitignore b/.gitignore
index 5babaa2c2..c81bc0589 100644
--- a/.gitignore
+++ b/.gitignore
@@ -35,6 +35,7 @@ doc/source/_static/heat.policy.yaml.sample
# Files created by releasenotes build
releasenotes/build
+releasenotes/notes/reno.cache
# sample config included in docs
doc/source/_static/heat.conf.sample
diff --git a/.zuul.yaml b/.zuul.yaml
index e1478bcca..0daa2a8bf 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,26 +1,103 @@
- job:
- name: heat-functional-devstack-base
- parent: legacy-dsvm-base
+ name: heat-functional-base
+ parent: devstack
+ abstract: true
run: playbooks/devstack/functional/run.yaml
post-run: playbooks/devstack/functional/post.yaml
+ description: Base heat functional test job
timeout: 7800
+ roles:
+ - zuul: opendev.org/openstack/devstack
+ - zuul: opendev.org/openstack/tempest
required-projects:
- openstack/devstack-gate
- - openstack/aodh
- openstack/barbican
- - openstack/barbican-tempest-plugin
- - openstack/ceilometer
- - openstack/devstack-plugin-amqp1
- openstack/heat
+ - openstack/heat-templates
- openstack/heat-tempest-plugin
- - openstack/neutron
- openstack/octavia
+ - openstack/neutron
- openstack/oslo.messaging
- openstack/python-barbicanclient
- openstack/python-heatclient
- openstack/heat-agents
- openstack/python-zaqarclient
- openstack/zaqar
+ - openstack/tempest
+ vars:
+ configure_swap_size: 8192
+ tempest_plugins:
+ - heat-tempest-plugin
+ devstack_localrc:
+ TEMPEST_PLUGINS: '/opt/stack/heat-tempest-plugin'
+ HEAT_USE_MOD_WSGI: True
+ CEILOMETER_PIPELINE_INTERVAL: 60
+ devstack_services:
+ tls-proxy: false
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ tempest: true
+ devstack_plugins:
+ barbican: https://opendev.org/openstack/barbican
+ zaqar: https://opendev.org/openstack/zaqar
+ heat: https://opendev.org/openstack/heat
+ octavia: https://opendev.org/openstack/octavia
+ devstack_local_conf:
+ post-config:
+ $HEAT_CONF:
+ DEFAULT:
+ convergence_engine: true
+ stack_scheduler_hints: true
+ hidden_stack_tags: hidden
+ encrypt_parameters_and_properties: True
+ logging_exception_prefix: "%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s"
+ enable_stack_adopt: true
+ enable_stack_abandon: true
+ heat_api:
+ workers: 2
+ heat_api_cfn:
+ workers: 2
+ cache:
+ enabled: True
+ eventlet_opts:
+ client_socket_timeout: 120
+ oslo_messaging_notifications:
+ driver: messagingv2
+ max_stacks_per_tenant: 256
+ test-config:
+ $TEMPEST_CONFIG:
+ service_available:
+ heat: True
+ heat_plugin:
+ convergence_engine_enabled: true
+ minimal_image_ref: ${DEFAULT_IMAGE_NAME:-cirros-0.3.6-x86_64-disk}
+ instance_type: m1.heat_int
+ minimal_instance_type: m1.heat_micro
+ image_ref: Fedora-Cloud-Base-33-1.2.x86_64
+ hidden_stack_tag: hidden
+ heat_config_notify_script: /opt/stack/heat-agents/heat-config/bin/heat-config-notify
+ boot_config_env: /opt/stack/heat-templates/hot/software-config/boot-config/test_image_env.yaml
+ credential_secret_id: $OS_CREDENTIAL_SECRET_ID
+ heat_features_enabled:
+ multi_cloud: True
+ # disable cinder backup feature
+ volume-feature-enabled:
+ backup: False
+ test_results_stage_name: test_results
+ zuul_copy_output:
+ '{{ devstack_base_dir }}/tempest/etc/tempest.conf': logs
+ '{{ devstack_base_dir }}/tempest/etc/accounts.yaml': logs
+ '{{ devstack_base_dir }}/tempest/tempest.log': logs
+ '{{ stage_dir }}/{{ test_results_stage_name }}.subunit': logs
+ '{{ stage_dir }}/{{ test_results_stage_name }}.html': logs
+ '{{ stage_dir }}/stackviz': logs
+ extensions_to_txt:
+ conf: true
+ log: true
+ yaml: true
+ yml: true
irrelevant-files:
- ^.*\.rst$
- ^api-ref/.*$
@@ -28,105 +105,107 @@
- ^heat/locale/.*$
- ^heat/tests/.*$
- ^releasenotes/.*$
- vars:
- disable_convergence: 'false'
- sql: mysql
- use_amqp1: 0
- use_apache: 1
- use_python3: 0
- branch_override: default
-- job:
- name: heat-functional-orig-mysql-lbaasv2
- parent: heat-functional-devstack-base
- vars:
- disable_convergence: 'true'
- job:
- name: heat-functional-convg-mysql-lbaasv2
- parent: heat-functional-devstack-base
+ name: heat-functional
+ parent: heat-functional-base
- job:
- name: heat-functional-convg-mysql-lbaasv2-amqp1
- parent: heat-functional-devstack-base
- voting: false
- branches: master
+ name: heat-functional-legacy
+ parent: heat-functional-base
vars:
- use_amqp1: 1
+ devstack_local_conf:
+ post-config:
+ $HEAT_CONF:
+ DEFAULT:
+ convergence_engine: false
+ test-config:
+ $TEMPEST_CONFIG:
+ heat_plugin:
+ convergence_engine_enabled: false
- job:
- name: heat-functional-convg-mysql-lbaasv2-non-apache
- parent: heat-functional-devstack-base
+ name: heat-functional-non-apache
+ parent: heat-functional-base
voting: false
vars:
- use_apache: 0
-
-- job:
- name: heat-functional-convg-mysql-lbaasv2-py3
- parent: heat-functional-devstack-base
- vars:
- use_python3: 1
+ devstack_localrc:
+ HEAT_USE_MOD_WSGI: False
- job:
- name: grenade-heat
- parent: legacy-dsvm-base
- run: playbooks/devstack/grenade/run.yaml
- post-run: playbooks/devstack/functional/post.yaml
- timeout: 7800
+ name: grenade-heat-multinode
+ parent: grenade-multinode
+ # FIXME(ramishra): Make it voting once stable/victoria
+ # starts using available images from mirror.
required-projects:
- - openstack/grenade
- - openstack/devstack-gate
- - openstack/heat
- - openstack/heat-tempest-plugin
+ - opendev.org/openstack/heat
+ - opendev.org/openstack/heat-tempest-plugin
+ - opendev.org/openstack/python-heatclient
+ vars:
+ grenade_devstack_localrc:
+ shared:
+ HOST_TOPOLOGY: multinode
+ HOST_TOPOLOGY_ROLE: primary
+ HOST_TOPOLOGY_SUBNODES: "{{ hostvars['compute1']['nodepool']['public_ipv4'] }}"
+ configure_swap_size: 8192
+ devstack_services:
+ h-api: true
+ h-api-cfn: true
+ h-eng: true
+ heat: true
+ # We do run a list of tests after upgrade. This is just to bypass the req from parent.
+ tempest_test_regex: ^heat_tempest_plugin\.tests\.functional\.test_nova_server_networks
+ tox_envlist: all
+ devstack_plugins:
+ heat: https://opendev.org/openstack/heat
+ tempest_plugins:
+ - heat-tempest-plugin
+ group-vars:
+ subnode:
+ grenade_devstack_localrc:
+ shared:
+ HOST_TOPOLOGY: multinode
+ HOST_TOPOLOGY_ROLE: subnode
+ HOST_TOPOLOGY_SUBNODES: "{{ hostvars['compute1']['nodepool']['public_ipv4'] }}"
+ configure_swap_size: 8192
+ devstack_services:
+ h-api: true
+ h-api-cfn: true
+ h-eng: true
+ heat: true
+ # We do run a list of tests after upgrade. This is just to bypass the req from parent.
+ devstack_plugins:
+ heat: https://opendev.org/openstack/heat
irrelevant-files:
- - ^(test-|)requirements.txt$
- ^.*\.rst$
- ^api-ref/.*$
- ^doc/.*$
- ^heat/locale/.*$
- ^heat/tests/.*$
- ^releasenotes/.*$
- - ^setup.cfg$
- vars:
- topology: aio
- branch_override: default
-
-- job:
- name: grenade-heat-multinode
- parent: grenade-heat
- nodeset: ubuntu-bionic-2-node
- roles:
- - zuul: zuul/zuul-jobs
- pre-run: playbooks/devstack/multinode-networking/pre.yaml
- vars:
- topology: multinode
- project:
templates:
- openstack-cover-jobs
- openstack-lower-constraints-jobs
- - openstack-python-jobs
- - openstack-python3-train-jobs
+ - openstack-python3-wallaby-jobs
+ - openstack-python3-wallaby-jobs-arm64
+ - periodic-stable-jobs
- publish-openstack-docs-pti
- check-requirements
- release-notes-jobs-python3
check:
jobs:
- - grenade-heat
- grenade-heat-multinode
- - heat-functional-orig-mysql-lbaasv2
- - heat-functional-convg-mysql-lbaasv2
- - heat-functional-convg-mysql-lbaasv2-amqp1
- - heat-functional-convg-mysql-lbaasv2-non-apache
- - heat-functional-convg-mysql-lbaasv2-py3
+ - heat-functional
+ - heat-functional-legacy
gate:
queue: heat
jobs:
- - grenade-heat
- grenade-heat-multinode
- - heat-functional-orig-mysql-lbaasv2
- - heat-functional-convg-mysql-lbaasv2
- - heat-functional-convg-mysql-lbaasv2-py3
+ - heat-functional
+ - heat-functional-legacy
experimental:
jobs:
- tripleo-ci-centos-7-scenario002-standalone
diff --git a/README.rst b/README.rst
index 83a587236..6e1ff8e95 100644
--- a/README.rst
+++ b/README.rst
@@ -73,3 +73,7 @@ We have integration with
* https://opendev.org/openstack/python-monascaclient (monitoring service)
* https://opendev.org/openstack/python-zunclient (container management service)
* https://opendev.org/openstack/python-blazarclient (reservation service)
+* https://opendev.org/openstack/python-octaviaclient.git (Load-balancer service)
+* https://opendev.org/openstack/python-senlinclient (Clustering service)
+* https://opendev.org/openstack/python-vitrageclient.git (RCA service)
+* https://opendev.org/openstack/python-ironicclient (baremetal provisioning service)
diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py
index 17886a2f0..5f1657cb2 100644
--- a/api-ref/source/conf.py
+++ b/api-ref/source/conf.py
@@ -26,9 +26,7 @@
# serve to show the default.
import os
-import subprocess
import sys
-import warnings
html_theme = 'openstackdocs'
html_theme_options = {
@@ -63,23 +61,11 @@ source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
-project = u'Orchestration API Reference'
copyright = u'2010-present, OpenStack Foundation'
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-from heat.version import version_info
-# The full version, including alpha/beta/rc tags.
-release = version_info.release_string()
-# The short X.Y version.
-version = version_info.version_string()
-
# openstackdocstheme options
-repository_name = 'openstack/heat'
-bug_project = 'heat'
-bug_tag = 'api-ref'
+openstackdocs_repo_name = 'openstack/heat'
+openstackdocs_use_storyboard = True
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -108,7 +94,7 @@ add_module_names = False
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
# -- Options for man page output ----------------------------------------------
@@ -152,17 +138,6 @@ pygments_style = 'sphinx'
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-# html_last_updated_fmt = '%b %d, %Y'
-git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
- "-n1"]
-try:
- html_last_updated_fmt = subprocess.check_output(git_cmd).decode('utf-8')
-except Exception:
- warnings.warn('Cannot get last updated time from git repository. '
- 'Not setting "html_last_updated_fmt".')
-
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
diff --git a/api-ref/source/v1/samples/resource-schema-response.json b/api-ref/source/v1/samples/resource-schema-response.json
index 9f930f1fd..9d28b5025 100644
--- a/api-ref/source/v1/samples/resource-schema-response.json
+++ b/api-ref/source/v1/samples/resource-schema-response.json
@@ -1,7 +1,8 @@
{
"attributes": {
"an_attribute": {
- "description": "A runtime value of the resource."
+ "description": "A runtime value of the resource.",
+ "type": "string"
}
},
"properties": {
@@ -15,16 +16,17 @@
}
}
],
- "description": "A resource description.",
+ "description": "A resource property description.",
"required": true,
"type": "string",
- "update_allowed": false
+ "update_allowed": false,
+ "immutable": false
}
},
"resource_type": "OS::Heat::AResourceName",
"support_status": {
"message": "A status message",
"status": "SUPPORTED",
- "version": "2014.1"
+ "version": "10.0.0"
}
}
diff --git a/api-ref/source/v1/samples/resource-type-template-hot-response.json b/api-ref/source/v1/samples/resource-type-template-hot-response.json
index a6f04d1cf..3f97746b2 100644
--- a/api-ref/source/v1/samples/resource-type-template-hot-response.json
+++ b/api-ref/source/v1/samples/resource-type-template-hot-response.json
@@ -1,22 +1,38 @@
{
- "description": "Initial template of KeyPair",
"heat_template_version": "2016-10-14",
- "outputs": {
- "private_key": {
- "description": "The private key if it has been saved.",
- "value": "{\"get_attr\": [\"KeyPair\", \"private_key\"]}"
- },
+ "description": "Initial template of KeyPair",
+ "parameters": {
"public_key": {
- "description": "The public key.",
- "value": "{\"get_attr\": [\"KeyPair\", \"public_key\"]}"
+ "type": "string",
+ "description": "The optional public key. This allows users to supply the public key from a pre-existing key pair. If not supplied, a new key pair will be generated."
+ },
+ "save_private_key": {
+ "default": false,
+ "type": "boolean",
+ "description": "True if the system should remember a generated private key; False otherwise."
+ },
+ "type": {
+ "type": "string",
+ "description": "Keypair type. Supported since Nova api version 2.2.",
+ "constraints": [
+ {
+ "allowed_values": [
+ "ssh",
+ "x509"
+ ]
+ }
+ ]
+ },
+ "user": {
+ "type": "string",
+ "description": "ID or name of user to whom to add key-pair. The usage of this property is limited to being used by administrators only. Supported since Nova api version 2.10.",
+ "constraints": [
+ {}
+ ]
},
- "show": {
- "description": "Detailed information about resource.",
- "value": "{\"get_attr\": [\"KeyPair\", \"show\"]}"
- }
- },
- "parameters": {
"name": {
+ "type": "string",
+ "description": "The name of the key pair.",
"constraints": [
{
"length": {
@@ -24,34 +40,63 @@
"min": 1
}
}
- ],
- "description": "The name of the key pair.",
- "type": "string"
- },
- "public_key": {
- "description": "The optional public key. This allows users to supply the public key from a pre-existing key pair. If not supplied, a new key pair will be generated.",
- "type": "string"
- },
- "save_private_key": {
- "default": false,
- "description": "True if the system should remember a generated private key; False otherwise.",
- "type": "boolean"
+ ]
}
},
"resources": {
"KeyPair": {
+ "type": "OS::Nova::KeyPair",
"properties": {
- "name": {
- "get_param": "name"
- },
"public_key": {
"get_param": "public_key"
},
"save_private_key": {
"get_param": "save_private_key"
+ },
+ "type": {
+ "get_param": "type"
+ },
+ "user": {
+ "get_param": "user"
+ },
+ "name": {
+ "get_param": "name"
}
- },
- "type": "OS::Nova::KeyPair"
+ }
+ }
+ },
+ "outputs": {
+ "public_key": {
+ "description": "The public key.",
+ "value": {
+ "get_attr": [
+ "KeyPair",
+ "public_key"
+ ]
+ }
+ },
+ "private_key": {
+ "description": "The private key if it has been saved.",
+ "value": {
+ "get_attr": [
+ "KeyPair",
+ "private_key"
+ ]
+ }
+ },
+ "OS::stack_id": {
+ "value": {
+ "get_resource": "KeyPair"
+ }
+ },
+ "show": {
+ "description": "Detailed information about resource.",
+ "value": {
+ "get_attr": [
+ "KeyPair",
+ "show"
+ ]
+ }
}
}
}
diff --git a/api-ref/source/v1/samples/resource-type-template-response.json b/api-ref/source/v1/samples/resource-type-template-response.json
index 3cc189cd5..a645c88ff 100644
--- a/api-ref/source/v1/samples/resource-type-template-response.json
+++ b/api-ref/source/v1/samples/resource-type-template-response.json
@@ -1,57 +1,95 @@
{
- "Description": "Initial template of KeyPair",
"HeatTemplateFormatVersion": "2012-12-12",
- "Outputs": {
- "private_key": {
- "Description": "The private key if it has been saved.",
- "Value": "{\"Fn::GetAtt\": [\"KeyPair\", \"private_key\"]}"
- },
- "public_key": {
- "Description": "The public key.",
- "Value": "{\"Fn::GetAtt\": [\"KeyPair\", \"public_key\"]}"
- },
- "show": {
- "Description": "Detailed information about resource.",
- "Value": "{\"Fn::GetAtt\": [\"KeyPair\", \"show\"]}"
- }
- },
+ "Description": "Initial template of KeyPair",
"Parameters": {
- "name": {
- "Description": "The name of the key pair.",
- "MaxLength": 255,
- "MinLength": 1,
- "Type": "String"
- },
"public_key": {
- "Description": "The optional public key. This allows users to supply the public key from a pre-existing key pair. If not supplied, a new key pair will be generated.",
- "Type": "String"
+ "Type": "String",
+ "Description": "The optional public key. This allows users to supply the public key from a pre-existing key pair. If not supplied, a new key pair will be generated."
},
"save_private_key": {
+ "Default": false,
+ "Type": "Boolean",
+ "Description": "True if the system should remember a generated private key; False otherwise.",
"AllowedValues": [
"True",
"true",
"False",
"false"
- ],
- "Default": false,
- "Description": "True if the system should remember a generated private key; False otherwise.",
- "Type": "Boolean"
+ ]
+ },
+ "type": {
+ "Type": "String",
+ "Description": "Keypair type. Supported since Nova api version 2.2.",
+ "AllowedValues": [
+ "ssh",
+ "x509"
+ ]
+ },
+ "user": {
+ "Type": "String",
+ "Description": "ID or name of user to whom to add key-pair. The usage of this property is limited to being used by administrators only. Supported since Nova api version 2.10."
+ },
+ "name": {
+ "MinLength": 1,
+ "Type": "String",
+ "Description": "The name of the key pair.",
+ "MaxLength": 255
}
},
"Resources": {
"KeyPair": {
+ "Type": "OS::Nova::KeyPair",
"Properties": {
- "name": {
- "Ref": "name"
- },
"public_key": {
"Ref": "public_key"
},
"save_private_key": {
"Ref": "save_private_key"
+ },
+ "type": {
+ "Ref": "type"
+ },
+ "user": {
+ "Ref": "user"
+ },
+ "name": {
+ "Ref": "name"
}
- },
- "Type": "OS::Nova::KeyPair"
+ }
+ }
+ },
+ "Outputs": {
+ "public_key": {
+ "Description": "The public key.",
+ "Value": {
+ "Fn::GetAtt": [
+ "KeyPair",
+ "public_key"
+ ]
+ }
+ },
+ "private_key": {
+ "Description": "The private key if it has been saved.",
+ "Value": {
+ "Fn::GetAtt": [
+ "KeyPair",
+ "private_key"
+ ]
+ }
+ },
+ "OS::stack_id": {
+ "Value": {
+ "Ref": "KeyPair"
+ }
+ },
+ "show": {
+ "Description": "Detailed information about resource.",
+ "Value": {
+ "Fn::GetAtt": [
+ "KeyPair",
+ "show"
+ ]
+ }
}
}
}
diff --git a/bin/heat-api b/bin/heat-api
index 60288fa97..a8429bfe3 100755
--- a/bin/heat-api
+++ b/bin/heat-api
@@ -18,6 +18,9 @@
An OpenStack REST API to Heat.
"""
+import os
+import sys
+
from oslo_log import log as logging
@@ -27,8 +30,6 @@ LOG.warning('DEPRECATED: `heat-api` script is deprecated. Please use the '
'system level heat binaries installed to start '
'any of the heat services.')
-import os
-import sys
# If ../heat/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
@@ -39,6 +40,6 @@ POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'heat', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
-from heat.cmd import api
+from heat.cmd import api # noqa: E402
api.main()
diff --git a/bin/heat-api-cfn b/bin/heat-api-cfn
index 28358631c..6208c1f44 100755
--- a/bin/heat-api-cfn
+++ b/bin/heat-api-cfn
@@ -20,6 +20,9 @@ translates it into a native representation. It then calls the heat-engine via
AMQP RPC to implement them.
"""
+import os
+import sys
+
from oslo_log import log as logging
@@ -29,9 +32,6 @@ LOG.warning('DEPRECATED: `heat-api-cfn` script is deprecated. Please use '
'the system level heat binaries installed to start '
'any of the heat services.')
-import os
-import sys
-
# If ../heat/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
@@ -41,6 +41,6 @@ POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'heat', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
-from heat.cmd import api_cfn
+from heat.cmd import api_cfn # noqa: E402
api_cfn.main()
diff --git a/bin/heat-engine b/bin/heat-engine
index 4a698bbe7..13f50f9d3 100755
--- a/bin/heat-engine
+++ b/bin/heat-engine
@@ -20,6 +20,10 @@ Normal communications is done via the heat API which then calls into this
engine.
"""
+
+import os
+import sys
+
from oslo_log import log as logging
@@ -29,8 +33,6 @@ LOG.warning('DEPRECATED: `heat-engine` script is deprecated. '
'Please use the system level heat binaries installed to '
'start any of the heat services.')
-import os
-import sys
# If ../heat/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
@@ -41,6 +43,6 @@ POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'heat', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
-from heat.cmd import engine
+from heat.cmd import engine # noqa: E402
engine.main()
diff --git a/bin/heat-manage b/bin/heat-manage
index d51374fbf..34e4a0107 100755
--- a/bin/heat-manage
+++ b/bin/heat-manage
@@ -13,6 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+
+import os
+import sys
+
from oslo_log import log as logging
@@ -21,8 +25,6 @@ LOG = logging.getLogger(__name__)
LOG.warning('DEPRECATED: `heat-manage` script is deprecated. Please use '
'the system level heat-manage binary.')
-import os
-import sys
# If ../heat/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
@@ -32,6 +34,6 @@ POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'heat', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
-from heat.cmd import manage
+from heat.cmd import manage # noqa: E402
manage.main()
diff --git a/config-generator.conf b/config-generator.conf
index 752851d66..e6e8c910c 100644
--- a/config-generator.conf
+++ b/config-generator.conf
@@ -13,7 +13,6 @@ namespace = heat.api.aws.ec2token
namespace = keystonemiddleware.auth_token
namespace = oslo.messaging
namespace = oslo.middleware
-namespace = oslo.db
namespace = oslo.log
namespace = oslo.policy
namespace = oslo.service.service
diff --git a/contrib/heat_docker/heat_docker/resources/docker_container.py b/contrib/heat_docker/heat_docker/resources/docker_container.py
index b21225da6..a6703bc16 100644
--- a/contrib/heat_docker/heat_docker/resources/docker_container.py
+++ b/contrib/heat_docker/heat_docker/resources/docker_container.py
@@ -17,7 +17,6 @@
import distutils
from oslo_log import log as logging
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -340,7 +339,7 @@ class DockerContainer(resource.Resource):
def _parse_networkinfo_ports(self, networkinfo):
tcp = []
udp = []
- for port, info in six.iteritems(networkinfo['Ports']):
+ for port, info in networkinfo['Ports'].items():
p = port.split('/')
if not info or len(p) != 2 or 'HostPort' not in info[0]:
continue
diff --git a/contrib/heat_docker/heat_docker/tests/fake_docker_client.py b/contrib/heat_docker/heat_docker/tests/fake_docker_client.py
index 98d7747d8..ecb79789d 100644
--- a/contrib/heat_docker/heat_docker/tests/fake_docker_client.py
+++ b/contrib/heat_docker/heat_docker/tests/fake_docker_client.py
@@ -14,9 +14,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
import random
import string
+from unittest import mock
class APIError(Exception):
diff --git a/contrib/heat_docker/heat_docker/tests/test_docker_container.py b/contrib/heat_docker/heat_docker/tests/test_docker_container.py
index a183723dc..19c872a12 100644
--- a/contrib/heat_docker/heat_docker/tests/test_docker_container.py
+++ b/contrib/heat_docker/heat_docker/tests/test_docker_container.py
@@ -14,8 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common.i18n import _
@@ -126,7 +125,7 @@ class DockerContainerTest(common.HeatTestCase):
exc = self.assertRaises(exception.ResourceInError,
docker_res.check_create_complete,
'foo')
- self.assertIn("Container startup failed", six.text_type(exc))
+ self.assertIn("Container startup failed", str(exc))
def test_start_with_bindings_and_links(self):
t = template_format.parse(template)
@@ -331,7 +330,7 @@ class DockerContainerTest(common.HeatTestCase):
args = dict(arg=arg, min_version=min_version)
expected = _('"%(arg)s" is not supported for API version '
'< "%(min_version)s"') % args
- self.assertEqual(expected, six.text_type(msg))
+ self.assertEqual(expected, str(msg))
def test_start_with_read_only_for_low_api_version(self):
self.arg_for_low_api_version('read_only', True, '1.16')
diff --git a/contrib/heat_docker/setup.cfg b/contrib/heat_docker/setup.cfg
index 7e56e1531..40cee7f2b 100644
--- a/contrib/heat_docker/setup.cfg
+++ b/contrib/heat_docker/setup.cfg
@@ -4,7 +4,7 @@ summary = Heat resource for Docker containers
description-file =
README.md
author = OpenStack
-author-email = openstack-dev@lists.openstack.org
+author-email = openstack-discuss@lists.openstack.org
home-page = https://docs.openstack.org/heat/latest/
classifier =
Environment :: OpenStack
diff --git a/devstack/lib/heat b/devstack/lib/heat
index 82e88e6c3..5993d7de2 100644
--- a/devstack/lib/heat
+++ b/devstack/lib/heat
@@ -31,10 +31,6 @@ set +o xtrace
# set up default directories
GITDIR["python-heatclient"]=$DEST/python-heatclient
-# heat service
-HEAT_REPO=${HEAT_REPO:-${GIT_BASE}/openstack/heat.git}
-HEAT_BRANCH=${HEAT_BRANCH:-master}
-
# python heat client library
GITREPO["python-heatclient"]=${HEATCLIENT_REPO:-${GIT_BASE}/openstack/python-heatclient.git}
GITBRANCH["python-heatclient"]=${HEATCLIENT_BRANCH:-master}
@@ -51,7 +47,7 @@ HEAT_CONF_DIR=/etc/heat
HEAT_CONF=$HEAT_CONF_DIR/heat.conf
HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d
HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates
-HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP}
+HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST}
HEAT_API_PORT=${HEAT_API_PORT:-8004}
HEAT_SERVICE_USER=${HEAT_SERVICE_USER:-heat}
HEAT_TRUSTEE_USER=${HEAT_TRUSTEE_USER:-$HEAT_SERVICE_USER}
@@ -82,6 +78,7 @@ else
fi
HEAT_PLUGIN_DIR=${HEAT_PLUGIN_DIR:-$DATA_DIR/heat/plugins}
ENABLE_HEAT_PLUGINS=${ENABLE_HEAT_PLUGINS:-}
+HEAT_ENGINE_WORKERS=${HEAT_ENGINE_WORKERS:=$(( ($(nproc)/4)<2 ? 2 : ($(nproc)/4) ))}
# Functions
# ---------
@@ -111,7 +108,7 @@ function configure_heat {
# remove old config files
rm -f $HEAT_CONF_DIR/heat-*.conf
- HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$HOST_IP}
+ HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$SERVICE_HOST}
HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000}
HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST}
HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001}
@@ -139,6 +136,9 @@ function configure_heat {
# logging
iniset $HEAT_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+ # reduce Heat engine workers
+ iniset $HEAT_CONF DEFAULT num_engine_workers "$HEAT_ENGINE_WORKERS"
+
local no_format="False"
if [[ "$HEAT_USE_APACHE" == "True" && "$WSGI_MODE" != "uwsgi" ]]; then
no_format="True"
@@ -276,7 +276,6 @@ function install_heatclient {
# install_heat() - Collect source and prepare
function install_heat {
- git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH
setup_develop $HEAT_DIR
if [[ "$HEAT_USE_APACHE" == "True" ]]; then
if [ "$WSGI_MODE" == "uwsgi" ]; then
@@ -299,10 +298,6 @@ function start_heat {
enable_apache_site heat-api
enable_apache_site heat-api-cfn
restart_apache_server
- tail_log heat-api /var/log/$APACHE_NAME/heat_api.log
- tail_log heat-api-access /var/log/$APACHE_NAME/heat_api_access.log
- tail_log heat-api-cfn /var/log/$APACHE_NAME/heat_api_cfn.log
- tail_log heat-api-cfn-access /var/log/$APACHE_NAME/heat_api_cfn_access.log
else
run_process h-api "$HEAT_BIN_DIR/uwsgi --ini $HEAT_API_UWSGI_CONF" ""
run_process h-api-cfn "$HEAT_BIN_DIR/uwsgi --ini $HEAT_CFN_API_UWSGI_CONF" ""
@@ -419,13 +414,13 @@ function create_heat_accounts {
get_or_create_endpoint \
"orchestration" \
"$REGION_NAME" \
- "$heat_api_service_url" "$heat_api_service_url" "$heat_api_service_url"
+ "$heat_api_service_url"
get_or_create_service "heat-cfn" "cloudformation" "Heat CloudFormation Service"
get_or_create_endpoint \
"cloudformation" \
"$REGION_NAME" \
- "$heat_cfn_api_service_url" "$heat_cfn_api_service_url" "$heat_cfn_api_service_url"
+ "$heat_cfn_api_service_url"
# heat_stack_user role is for users created by Heat
get_or_create_role "heat_stack_user"
@@ -446,15 +441,58 @@ function create_heat_accounts {
# NOTE (gmann): Configure all the Tempest setting for Heat service in
# this function.
function configure_tempest_for_heat {
- if is_service_enabled tempest; then
- iniset $TEMPEST_CONFIG service_available heat True
+ # Skip SoftwareConfigIntegrationTest because it requires a custom image
+ # Skip AutoscalingLoadBalancerTest and AutoscalingLoadBalancerv2Test as deprecated neutron-lbaas service is not enabled
+ # Skip CfnInitIntegrationTest as latest fedora images don't have heat-cfntools
+ iniset $TEMPEST_CONFIG heat_plugin skip_scenario_test_list 'AutoscalingLoadBalancerTest, AutoscalingLoadBalancerv2Test, \
+ SoftwareConfigIntegrationTest, AodhAlarmTest, CfnInitIntegrationTest'
+ # Skip LoadBalancerv2Test as deprecated neutron-lbaas service is not enabled
+ iniset $TEMPEST_CONFIG heat_plugin skip_functional_test_list 'LoadBalancerv2Test, NotificationTest'
+
+ openstack flavor show m1.heat_int || openstack flavor create m1.heat_int --ram 512 --disk 4
+ openstack flavor show m1.heat_micro || openstack flavor create m1.heat_micro --ram 128 --disk 1
+
+ source $TOP_DIR/openrc demo demo
+ openstack network show heat-net || openstack network create heat-net
+ openstack subnet show heat-subnet || openstack subnet create heat-subnet --network heat-net --subnet-range 10.0.5.0/24
+ openstack router add subnet router1 heat-subnet
+
+ iniset $TEMPEST_CONFIG heat_plugin username $OS_USERNAME
+ iniset $TEMPEST_CONFIG heat_plugin password $OS_PASSWORD
+ iniset $TEMPEST_CONFIG heat_plugin project_name $OS_PROJECT_NAME
+ iniset $TEMPEST_CONFIG heat_plugin auth_url $OS_AUTH_URL
+ iniset $TEMPEST_CONFIG heat_plugin user_domain_id $OS_USER_DOMAIN_ID
+ iniset $TEMPEST_CONFIG heat_plugin project_domain_id $OS_PROJECT_DOMAIN_ID
+ iniset $TEMPEST_CONFIG heat_plugin user_domain_name $OS_USER_DOMAIN_NAME
+ iniset $TEMPEST_CONFIG heat_plugin project_domain_name $OS_PROJECT_DOMAIN_NAME
+ iniset $TEMPEST_CONFIG heat_plugin region $OS_REGION_NAME
+ iniset $TEMPEST_CONFIG heat_plugin auth_version $OS_IDENTITY_API_VERSION
+
+ source $TOP_DIR/openrc admin admin
+ iniset $TEMPEST_CONFIG heat_plugin admin_username $OS_USERNAME
+ iniset $TEMPEST_CONFIG heat_plugin admin_password $OS_PASSWORD
+ if [[ -e /etc/ci/mirror_info.sh ]]; then
+ source /etc/ci/mirror_info.sh
+ fi
+ export HEAT_TEST_FEDORA_IMAGE=${NODEPOOL_FEDORA_MIRROR:-https://download.fedoraproject.org/pub/fedora/linux}/releases/33/Cloud/x86_64/images/Fedora-Cloud-Base-33-1.2.x86_64.qcow2
+ TOKEN=$(openstack token issue -c id -f value)
+ local image_exists=$( openstack image list | grep "Fedora-Cloud-Base-33-1.2.x86_64" )
+ if [[ -z $image_exists ]]; then
+ if is_service_enabled g-api; then
+ upload_image $HEAT_TEST_FEDORA_IMAGE $TOKEN
+ fi
fi
+
+ if is_service_enabled tls-proxy; then
+ iniset $TEMPEST_CONFIG heat_plugin ca_file $SSL_BUNDLE_FILE
+ fi
+ # add application credential and secret to support test multi-cloud
+ app_cred_id=$(openstack application credential show heat_multicloud || openstack application credential create heat_multicloud \
+ --secret secret --unrestricted -c id -f value)
+ export OS_CREDENTIAL_SECRET_ID=$(openstack secret store -n heat-multi-cloud-test-cred --payload \
+ '{"auth_type": "v3applicationcredential", "auth": {"auth_url": $OS_AUTH_URL, "application_credential_id": $app_cred_id, "application_credential_secret": "secret"}}'\
+ -c "Secret href" -f value)
}
# Restore xtrace
$_XTRACE_HEAT
-
-# Tell emacs to use shell-script-mode
-## Local variables:
-## mode: shell-script
-## End:
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index ae919b6bb..86c8b0985 100644
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -18,7 +18,8 @@ if is_heat_enabled; then
elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
if is_service_enabled tempest; then
- setup_develop $TEMPEST_DIR
+ echo_summary "Configuring Tempest for Heat"
+ configure_tempest_for_heat
fi
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
@@ -35,10 +36,6 @@ if is_heat_enabled; then
# Start the heat API and heat taskmgr components
echo_summary "Starting heat"
start_heat
-
- elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
- echo_summary "Configuring Tempest for Heat"
- configure_tempest_for_heat
fi
if [[ "$1" == "unstack" ]]; then
diff --git a/devstack/settings b/devstack/settings
index f185d2a65..089c00e4f 100644
--- a/devstack/settings
+++ b/devstack/settings
@@ -1,7 +1,7 @@
# Devstack settings
# We have to add Heat to enabled services for screen_it to work
-# It consists of 4 parts
+# It consists of three parts
enable_service h-eng
enable_service h-api
diff --git a/doc/requirements.txt b/doc/requirements.txt
index 94f9fa042..d47bc45af 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -2,10 +2,9 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-openstackdocstheme>=1.18.1 # Apache-2.0
+openstackdocstheme>=2.2.1 # Apache-2.0
os-api-ref>=1.4.0 # Apache-2.0
-sphinx!=1.6.6,!=1.6.7,>=1.6.2;python_version>='3.4' # BSD
-sphinx!=1.6.6,!=1.6.7,>=1.6.2,<2.0.0;python_version=='2.7' # BSD
-reno>=2.5.0 # Apache-2.0
+sphinx>=2.0.0,!=2.1.0 # BSD
+reno>=3.1.0 # Apache-2.0
sphinxcontrib-apidoc>=0.2.0 # BSD
sphinxcontrib-httpdomain>=1.3.0 # BSD
diff --git a/doc/source/_extra/.htaccess b/doc/source/_extra/.htaccess
index 83cb01a95..566b27e8e 100644
--- a/doc/source/_extra/.htaccess
+++ b/doc/source/_extra/.htaccess
@@ -1,3 +1,5 @@
-redirectmatch 301 ^/heat/([^/]+)/(architecture|pluginguide|schedulerhints|gmr|supportstatus)\.html$ /heat/$1/developing_guides/$2.html
+redirectmatch 301 ^/heat/([^/]+)/(architecture|pluginguide|schedulerhints|gmr|supportstatus)\.html$ /heat/$1/contributor/$2.html
+redirectmatch 301 ^/heat/([^/]+)/developing_guides/(index|architecture|pluginguide|schedulerhints|gmr|supportstatus)\.html$ /heat/$1/contributor/$2.html
redirectmatch 301 ^/heat/([^/]+)/(scale_deployment)\.html$ /heat/$1/operating_guides/$2.html
redirectmatch 301 ^/heat/([^/]+)/configuration/(api|clients)\.html /heat/$1/configuration/config-options.html
+redirectmatch 301 ^/heat/([^/]+)/contributing/(index|blueprints)\.html /heat/$1/contributor/$2.html
diff --git a/doc/source/_static/.placeholder b/doc/source/_static/.placeholder
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/doc/source/_static/.placeholder
diff --git a/doc/source/admin/auth-model.rst b/doc/source/admin/auth-model.rst
index 4c7373103..ab8bf6954 100644
--- a/doc/source/admin/auth-model.rst
+++ b/doc/source/admin/auth-model.rst
@@ -77,7 +77,8 @@ which is consumed by *only* the trustee to obtain a
such that the trustee has limited access to those
roles delegated. In addition, the trustee has effective impersonation
of the trustor user if it was selected when creating the trust.
-For more information, see `Identity Management trusts`_.
+For more information, see :keystone-doc:`Identity management trusts
+<user/trusts.html>`.
Trusts authorization involves the following steps:
@@ -146,5 +147,3 @@ defined, then all the trustor roles are delegated to trustee.
The trustor delegated roles must be pre-configured in the
OpenStack Identity service before using them in the Orchestration service.
-
-.. _Identity management trusts: <https://docs.openstack.org/keystone/latest/admin/identity-use-trusts.html>
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index f30e3de76..450c895c9 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -1,6 +1,6 @@
-=================
-Adminstering Heat
-=================
+==================
+Administering Heat
+==================
.. toctree::
:maxdepth: 2
diff --git a/doc/source/admin/introduction.rst b/doc/source/admin/introduction.rst
index 61187b9c5..a66673413 100644
--- a/doc/source/admin/introduction.rst
+++ b/doc/source/admin/introduction.rst
@@ -28,5 +28,5 @@ integrated with the OpenStack dashboard to perform stack functions through
a web interface.
For more information about using the Orchestration service through the
-command line, see the `Heat Command-Line Interface reference
-<https://docs.openstack.org/python-heatclient/latest/#openstackclient-command-line>`_.
+command line, see the :python-heatclient-doc:`Heat Command-Line Interface
+reference <#openstackclient-command-line>`.
diff --git a/doc/source/admin/stack-domain-users.rst b/doc/source/admin/stack-domain-users.rst
index f5f043bc7..fa869d943 100644
--- a/doc/source/admin/stack-domain-users.rst
+++ b/doc/source/admin/stack-domain-users.rst
@@ -125,7 +125,8 @@ The following steps are run during stack creation:
in the stack domain are still assigned the ``heat_stack_user`` role, so
the API surface they can access is limited through
the :file:`policy.json` file.
- For more information, see `OpenStack Identity documentation`_.
+ For more information, see :keystone-doc:`OpenStack Identity documentation
+ <>`.
#. When API requests are processed, the Orchestration service performs
an internal lookup, and allows stack details for a given stack to be
@@ -149,5 +150,3 @@ or::
The stack owner uses the former (via ``openstack stack resource metadata
STACK RESOURCE``), and any agents in the instance
use the latter.
-
-.. _OpenStack Identity documentation: https://docs.openstack.org/keystone/latest/
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 053241144..eb772897a 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -26,10 +26,8 @@
import glob
import os
-import subprocess
import sys
import tempfile
-import warnings
from oslo_config import cfg
@@ -96,12 +94,25 @@ config_generator_config_file = '../../config-generator.conf'
sample_config_basename = '_static/heat'
# openstackdocstheme options
-repository_name = 'openstack/heat'
-bug_project = '989'
-bug_tag = 'docs'
+openstackdocs_repo_name = 'openstack/heat'
+openstackdocs_pdf_link = True
+openstackdocs_use_storyboard = True
todo_include_todos = True
+# openstackdocstheme external link helper projects
+openstackdocs_projects = [
+ 'devstack',
+ 'diskimage-builder',
+ 'keystone',
+ 'keystoneauth',
+ 'nova',
+ 'oslo.reports',
+ 'python-barbicanclient',
+ 'python-heatclient',
+ 'python-openstackclient',
+]
+
# Add any paths that contain templates here, relative to this directory.
templates_path = []
@@ -115,7 +126,6 @@ source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
-project = u'Heat'
copyright = u'(c) 2012- Heat Developers'
# The language for content autogenerated by Sphinx. Refer to documentation
@@ -148,7 +158,7 @@ exclude_patterns = ['**/#*', '**~', '**/#*#']
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['heat.']
@@ -183,7 +193,6 @@ apidoc_excluded_paths = [
# html_theme = '_theme'
html_theme = 'openstackdocs'
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
@@ -217,17 +226,6 @@ html_static_path = ['_static']
# robots.txt.
html_extra_path = ['_extra']
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-# html_last_updated_fmt = '%b %d, %Y'
-git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
- "-n1"]
-try:
- html_last_updated_fmt = subprocess.check_output(git_cmd).decode('utf-8')
-except Exception:
- warnings.warn('Cannot get last updated time from git repository. '
- 'Not setting "html_last_updated_fmt".')
-
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
@@ -271,21 +269,10 @@ htmlhelp_basename = 'Heatdoc'
# -- Options for LaTeX output -------------------------------------------------
-latex_elements = {
- # The paper size ('letterpaper' or 'a4paper').
- # 'papersize': 'letterpaper',
-
- # The font size ('10pt', '11pt' or '12pt').
- # 'pointsize': '10pt',
-
- # Additional stuff for the LaTeX preamble.
- # 'preamble': '',
-}
-
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
- ('index', 'Heat.tex', u'Heat Documentation',
+ ('index', 'doc-heat.tex', u'Heat Documentation',
u'Heat Developers', 'manual'),
]
@@ -307,8 +294,16 @@ latex_documents = [
# latex_appendices = []
# If false, no module index is generated.
-# latex_domain_indices = True
+latex_domain_indices = False
+
+# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664
+latex_use_xindy = False
+latex_elements = {
+ 'makeindex': '',
+ 'printindex': '',
+ 'preamble': r'\setcounter{tocdepth}{3}',
+}
# -- Options for manual page output -------------------------------------------
diff --git a/doc/source/configuration/sample_config.rst b/doc/source/configuration/sample_config.rst
index f572f4844..5e4b4fc1b 100644
--- a/doc/source/configuration/sample_config.rst
+++ b/doc/source/configuration/sample_config.rst
@@ -7,6 +7,14 @@ auto-generated from heat when this documentation is built, so if you are
having issues with an option, please compare your version of heat with the
version of this documentation.
-The sample configuration can also be downloaded in `file form <../_static/heat.conf.sample>`_.
+.. only:: html
-.. literalinclude:: ../_static/heat.conf.sample
+ The sample configuration can also be downloaded in
+ `file form <../_static/heat.conf.sample>`_.
+
+ .. literalinclude:: ../_static/heat.conf.sample
+
+.. only:: latex
+
+ See the online version of this documentation for the full example config
+ file.
diff --git a/doc/source/contributing/index.rst b/doc/source/contributing/index.rst
deleted file mode 100644
index 7acea26b8..000000000
--- a/doc/source/contributing/index.rst
+++ /dev/null
@@ -1,19 +0,0 @@
-Heat Contribution Guidelines
-============================
-
-In the Contributions Guide, you will find documented policies for
-developing with heat. This includes the processes we use for
-blueprints and specs, bugs, contributor onboarding, core reviewer
-memberships, and other procedural items.
-
-Policies
---------
-.. toctree::
- :maxdepth: 3
-
- blueprints
-.. bugs
- contributor-onboarding
- core-reviewers
- gate-failure-triage
- code-reviews
diff --git a/doc/source/developing_guides/architecture.rst b/doc/source/contributor/architecture.rst
index 17ba51040..17ba51040 100644
--- a/doc/source/developing_guides/architecture.rst
+++ b/doc/source/contributor/architecture.rst
diff --git a/doc/source/contributing/blueprints.rst b/doc/source/contributor/blueprints.rst
index e8d7f74e0..e8d7f74e0 100644
--- a/doc/source/contributing/blueprints.rst
+++ b/doc/source/contributor/blueprints.rst
diff --git a/doc/source/developing_guides/gmr.rst b/doc/source/contributor/gmr.rst
index f2daeaae8..26d1d41f7 100644
--- a/doc/source/developing_guides/gmr.rst
+++ b/doc/source/contributor/gmr.rst
@@ -90,4 +90,4 @@ Extending the GMR
As mentioned above, additional sections can be added to the GMR for a
particular executable. For more information, see the documentation about
-`oslo.reports <https://docs.openstack.org/oslo.reports/>`_
+:oslo.reports-doc:`oslo.reports <>`.
diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst
new file mode 100644
index 000000000..ac4481fb3
--- /dev/null
+++ b/doc/source/contributor/index.rst
@@ -0,0 +1,26 @@
+Heat Contributor Guidelines
+===========================
+
+In the contributor guide, you will find documented policies for
+developing with heat. This includes the processes we use for
+blueprints and specs, bugs, contributor onboarding, core reviewer
+memberships, and other procedural items.
+
+.. note:: This guideline also includes documentation for developers.
+
+.. toctree::
+ :maxdepth: 3
+
+ ../getting_started/on_devstack
+ blueprints
+ architecture
+ pluginguide
+ schedulerhints
+ gmr
+ supportstatus
+ rally_on_gates
+.. bugs
+ contributor-onboarding
+ core-reviewers
+ gate-failure-triage
+ code-reviews
diff --git a/doc/source/developing_guides/pluginguide.rst b/doc/source/contributor/pluginguide.rst
index eb69449ac..7177c4b38 100644
--- a/doc/source/developing_guides/pluginguide.rst
+++ b/doc/source/contributor/pluginguide.rst
@@ -248,7 +248,7 @@ the end user.
*AllowedValues(allowed, description)*:
Lists the allowed values. ``allowed`` must be a
- ``collections.Sequence`` or ``basestring``. Applicable to all types
+ ``collections.abc.Sequence`` or ``basestring``. Applicable to all types
of value except MAP.
*Length(min, max, description)*:
@@ -347,16 +347,19 @@ overridden:
"""Default implementation; should be overridden by resources.
:returns: the map of resource information or None
- """
+ """
if self.entity:
try:
obj = getattr(self.client(), self.entity)
resource = obj.get(self.resource_id)
- return resource.to_dict()
- except AttributeError as ex:
- LOG.warning(_LW("Resolving 'show' attribute has "
- "failed : %s"), ex)
- return None
+ if isinstance(resource, dict):
+ return resource
+ else:
+ return resource.to_dict()
+ except AttributeError as ex:
+ LOG.warning("Resolving 'show' attribute has failed : %s",
+ ex)
+ return None
Property and Attribute Example
******************************
@@ -477,16 +480,16 @@ that updates require the engine to delete and re-create the resource
Update the physical resources using updated information.
:param json_snippet: the resource definition from the updated template
- :type json_snippet: collections.Mapping
+ :type json_snippet: collections.abc.Mapping
:param tmpl_diff: values in the updated definition that have changed
with respect to the original template definition.
- :type tmpl_diff: collections.Mapping
+ :type tmpl_diff: collections.abc.Mapping
:param prop_diff: property values that are different between the original
definition and the updated definition; keys are
property names and values are the new values. Deleted or
properties that were originally present but now absent
have values of ``None``
- :type prop_diff: collections.Mapping
+ :type prop_diff: collections.abc.Mapping
*Note* Before calling ``handle_update`` we check whether need to replace
the resource, especially for resource in ``*_FAILED`` state, there is a
@@ -642,8 +645,8 @@ your resource plugin! This has previously caused `problems
<https://bugs.launchpad.net/heat/+bug/1554625>`_ for multiple operations,
usually due to uncaught exceptions, If you feel you need to override
`add_dependencies()`, please reach out to Heat developers on the `#heat` IRC
-channel on FreeNode or on the `openstack-dev
-<mailto:openstack-dev@lists.openstack.org>`_ mailing list to discuss the
+channel on FreeNode or on the `openstack-discuss
+<mailto:openstack-discuss@lists.openstack.org>`_ mailing list to discuss the
possibility of a better solution.
Registering Resource Plug-ins
diff --git a/doc/source/developing_guides/rally_on_gates.rst b/doc/source/contributor/rally_on_gates.rst
index 5909fe7ac..61fe237d3 100644
--- a/doc/source/developing_guides/rally_on_gates.rst
+++ b/doc/source/contributor/rally_on_gates.rst
@@ -57,7 +57,7 @@ Check performance or how to detect regression
The easiest way of using Rally is to execute already existing scenarios.
One of the examples is presented in patch
-https://review.openstack.org/#/c/279450/ . In this patch was executed scenario
+https://review.opendev.org/#/c/279450/ . In this patch was executed scenario
already existing in Rally ``HeatStacks.create_and_delete_stack``.
During executing this scenario Rally creates and then, when stack is created,
delete Heat stack. All existing scenarios can be found here:
@@ -94,7 +94,7 @@ is disabled (e.g. Patch Set 7). The follow results were gotten:
In the next patch set (Patch Set 8) was updated by adding Depends-On reference
to commit message. It let to execute the same test with patch for devstack,
-which turns on caching (https://review.openstack.org/#/c/279400/).
+which turns on caching (https://review.opendev.org/#/c/279400/).
The results for this case were:
+------------------+----------+----------+----------+--------+------+
@@ -130,7 +130,7 @@ Compare output API performance
Another example of using Rally job is writing custom Rally scenarios in Heat
repository. There is an example of this is presented on review:
-https://review.openstack.org/#/c/270225/
+https://review.opendev.org/#/c/270225/
It's similar on the first example, but requires more Rally specific coding.
New tasks in ``heat-fakevirt.yaml`` use undefined in Rally repository
diff --git a/doc/source/developing_guides/schedulerhints.rst b/doc/source/contributor/schedulerhints.rst
index 44b14ebf4..44b14ebf4 100644
--- a/doc/source/developing_guides/schedulerhints.rst
+++ b/doc/source/contributor/schedulerhints.rst
diff --git a/doc/source/developing_guides/supportstatus.rst b/doc/source/contributor/supportstatus.rst
index ef49459b6..ef49459b6 100644
--- a/doc/source/developing_guides/supportstatus.rst
+++ b/doc/source/contributor/supportstatus.rst
diff --git a/doc/source/developing_guides/index.rst b/doc/source/developing_guides/index.rst
deleted file mode 100644
index 8b29ef4b9..000000000
--- a/doc/source/developing_guides/index.rst
+++ /dev/null
@@ -1,29 +0,0 @@
-..
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-:orphan:
-
-Developing Guides
-=================
-
-.. toctree::
- :maxdepth: 1
-
- ../contributing/index
- ../getting_started/on_devstack
- architecture
- pluginguide
- schedulerhints
- gmr
- supportstatus
- rally_on_gates
diff --git a/doc/source/ext/resources.py b/doc/source/ext/resources.py
index 32a0dc3ff..0c3e735f4 100644
--- a/doc/source/ext/resources.py
+++ b/doc/source/ext/resources.py
@@ -19,7 +19,6 @@ import pydoc
from docutils import core
from docutils import nodes
from docutils.parsers import rst
-import six
from heat.common.i18n import _
from heat.engine import attributes
@@ -27,20 +26,24 @@ from heat.engine import plugin_manager
from heat.engine import properties
from heat.engine import support
-_CODE_NAMES = {'2013.1': 'Grizzly',
- '2013.2': 'Havana',
- '2014.1': 'Icehouse',
- '2014.2': 'Juno',
- '2015.1': 'Kilo',
- '5.0.0': 'Liberty',
- '6.0.0': 'Mitaka',
- '7.0.0': 'Newton',
- '8.0.0': 'Ocata',
- '9.0.0': 'Pike',
- '10.0.0': 'Queens',
- '11.0.0': 'Rocky',
- '12.0.0': 'Stein',
- '13.0.0': 'Train'}
+_CODE_NAMES = {
+ '2013.1': 'Grizzly',
+ '2013.2': 'Havana',
+ '2014.1': 'Icehouse',
+ '2014.2': 'Juno',
+ '2015.1': 'Kilo',
+ '5.0.0': 'Liberty',
+ '6.0.0': 'Mitaka',
+ '7.0.0': 'Newton',
+ '8.0.0': 'Ocata',
+ '9.0.0': 'Pike',
+ '10.0.0': 'Queens',
+ '11.0.0': 'Rocky',
+ '12.0.0': 'Stein',
+ '13.0.0': 'Train',
+ '14.0.0': 'Ussuri',
+ '15.0.0': 'Victoria',
+}
all_resources = {}
@@ -245,7 +248,10 @@ resources:
if not prop.implemented:
para = nodes.line('', _('Not implemented.'))
note = nodes.note('', para)
- definition.append(note)
+ if sub_prop:
+ definition.append(para)
+ else:
+ definition.append(note)
return
if sub_prop and prop.type not in (properties.Schema.LIST,
@@ -286,6 +292,8 @@ resources:
default = nodes.literal('', json.dumps(prop.default))
para.append(default)
definition.append(para)
+ elif prop_key == 'description' and prop.update_allowed:
+ para = nodes.line('', _('Defaults to the resource description'))
for constraint in prop.constraints:
para = nodes.line('', str(constraint))
@@ -351,11 +359,14 @@ resources:
if not self.attrs_schemata:
return
section = self._section(parent, _('Attributes'), '%s-attrs')
+ definition_list = nodes.definition_list()
+ section.append(definition_list)
+
for prop_key, prop in sorted(self.attrs_schemata.items()):
if prop.support_status.status != support.HIDDEN:
description = prop.description
attr_section = self._prop_section(
- section, prop_key, '%s-attr-' + prop_key)
+ definition_list, prop_key, '%s-attr-' + prop_key)
self._status_str(prop.support_status, attr_section)
@@ -367,9 +378,12 @@ resources:
if not self.update_policy_schemata:
return
section = self._section(parent, _('update_policy'), '%s-updpolicy')
+ definition_list = nodes.definition_list()
+ section.append(definition_list)
+
for _key, _prop in sorted(self.update_policy_schemata.items(),
key=cmp_to_key(self.cmp_prop)):
- self.contribute_property(section, _key, _prop)
+ self.contribute_property(definition_list, _key, _prop)
class IntegrateResourcePages(ResourcePages):
@@ -422,7 +436,7 @@ def _filter_resources(prefix=None, path=None, statuses=None):
else:
filtered_resources[name] = [cls]
- return sorted(six.iteritems(filtered_resources))
+ return sorted(filtered_resources.items())
def _load_all_resources():
diff --git a/doc/source/getting_started/create_a_stack.rst b/doc/source/getting_started/create_a_stack.rst
index 8b8f03dac..0d1576a92 100644
--- a/doc/source/getting_started/create_a_stack.rst
+++ b/doc/source/getting_started/create_a_stack.rst
@@ -118,8 +118,7 @@ Note: The list operation will show no running stack.::
$ openstack stack delete teststack
$ openstack stack list
-You can explore other heat commands by referring to the `Heat command reference
-<https://docs.openstack.org/python-heatclient/latest/cli/>`_ for the
-`OpenStack Command-Line Interface
-<https://docs.openstack.org/python-openstackclient/>`_; then read the
-:ref:`template-guide` and start authoring your own templates.
+You can explore other heat commands by referring to the
+:python-heatclient-doc:`Heat command reference <cli/>` for the
+:python-openstackclient-doc:`OpenStack Command-Line Interface <>`; then read
+the :ref:`template-guide` and start authoring your own templates.
diff --git a/doc/source/getting_started/index.rst b/doc/source/getting_started/index.rst
index fdc07fcdd..d54b0bd8f 100644
--- a/doc/source/getting_started/index.rst
+++ b/doc/source/getting_started/index.rst
@@ -23,5 +23,4 @@ Getting Started Guides
on_fedora
on_ubuntu
on_other
- jeos_building
standalone
diff --git a/doc/source/getting_started/jeos_building.rst b/doc/source/getting_started/jeos_building.rst
deleted file mode 100644
index bf54b2c6b..000000000
--- a/doc/source/getting_started/jeos_building.rst
+++ /dev/null
@@ -1,84 +0,0 @@
-..
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-Building JEOS images for use with Heat
-======================================
-Heat's full functionality can only be used when launching cloud images that have
-the heat-cfntools_ package installed.
-This document describes some options for creating a heat-cfntools enabled image
-for yourself.
-
-.. _heat-cfntools: https://opendev.org/openstack/heat-cfntools
-
-Building an image with diskimage-builder
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-diskimage-builder_ is a tool for customizing cloud images.
-tripleo-image-elements_ is a collection of diskimage-builder elements related
-to the TripleO_ project. It includes an element for heat-cfntools which can be
-used to create heat-enabled images.
-
-.. _diskimage-builder: https://opendev.org/openstack/diskimage-builder
-.. _tripleo-image-elements: https://opendev.org/openstack/tripleo-image-elements
-.. _TripleO: https://wiki.openstack.org/wiki/TripleO
-
-Install the tool (preferably in a virtualenv) and fetch the elements::
-
- pip install git+https://opendev.org/openstack/diskimage-builder
- git clone https://opendev.org/openstack/tripleo-image-elements
-
-To create a heat-cfntools enabled image with the current release of Fedora x86_64::
-
- export ELEMENTS_PATH=tripleo-image-elements/elements
- disk-image-create vm fedora heat-cfntools -a amd64 -o fedora-heat-cfntools
-
-The image may then be pushed to glance, e.g::
-
- source ~/.openstack/keystonerc
- openstack image create fedora-heat-cfntools --public --disk-format qcow2 --container-format bare < fedora-heat-cfntools.qcow2
-
-To create a heat-cfntools enabled image with the current release of Ubuntu i386::
-
- export ELEMENTS_PATH=tripleo-image-elements/elements
- disk-image-create vm ubuntu heat-cfntools -a i386 -o ubuntu-heat-cfntools
-
-If you are creating your own images you should consider creating golden images
-which contain all the packages required for the stacks that you launch. You can do
-this by writing your own diskimage-builder elements and invoking those elements
-in the call to disk-image-create.
-
-This means that the resulting heat templates only need to modify configuration
-files. This will speed stack launch time and reduce the risk of a transient
-package download failure causing the stack launch to fail.
-
-To create an image that contains hooks needed for SoftwareConfig and SoftwareDeployment,
-you can follow the steps below to build a fedora based image::
-
- pip install git+https://opendev.org/openstack/diskimage-builder
- git clone https://opendev.org/openstack/tripleo-image-elements
- git clone https://opendev.org/openstack/heat-agents
- export ELEMENTS_PATH=tripleo-image-elements/elements:heat-agents
- disk-image-create vm \
- fedora selinux-permissive \
- heat-config \
- os-collect-config \
- os-refresh-config \
- os-apply-config \
- heat-config-cfn-init \
- heat-config-puppet \
- heat-config-script \
- -o fedora-software-config.qcow2
-
-The image may then be pushed to glance, e.g::
-
- source ~/.openstack/keystonerc
- openstack image create fedora-software-config --public --disk-format=qcow2 --container-format=bare < fedora-software-config.qcow2
diff --git a/doc/source/getting_started/on_devstack.rst b/doc/source/getting_started/on_devstack.rst
index 6fb850cc9..29581390d 100644
--- a/doc/source/getting_started/on_devstack.rst
+++ b/doc/source/getting_started/on_devstack.rst
@@ -51,8 +51,8 @@ a VM image that heat can launch. To do that add the following to
`[[local|localrc]]` section of `local.conf`::
IMAGE_URL_SITE="https://download.fedoraproject.org"
- IMAGE_URL_PATH="/pub/fedora/linux/releases/25/CloudImages/x86_64/images/"
- IMAGE_URL_FILE="Fedora-Cloud-Base-25-1.3.x86_64.qcow2"
+ IMAGE_URL_PATH="/pub/fedora/linux/releases/33/Cloud/x86_64/images/"
+ IMAGE_URL_FILE="Fedora-Cloud-Base-33-1.2.x86_64.qcow2"
IMAGE_URLS+=","$IMAGE_URL_SITE$IMAGE_URL_PATH$IMAGE_URL_FILE
URLs for any cloud image may be specified, but fedora images from F20 contain
@@ -81,13 +81,10 @@ will add the profiler notifier to your ceilometer::
Enable the profiler in /etc/heat/heat.conf::
- $ echo -e "[profiler]\nprofiler_enabled = True\n"\
- "trace_sqlalchemy = True\n"\
- >> /etc/heat/heat.conf
-
-Change the default hmac_key in /etc/heat/api-paste.ini::
-
- $ sed -i "s/hmac_keys =.*/hmac_keys = SECRET_KEY/" /etc/heat/api-paste.ini
+ $ echo -e "[profiler]\nenabled = True\n"\
+ "trace_sqlalchemy = True\n"\
+ "hmac_keys = SECRET_KEY\n"\
+ >> /etc/heat/heat.conf
Run any command with --profile SECRET_KEY::
@@ -96,7 +93,7 @@ Run any command with --profile SECRET_KEY::
Get pretty HTML with traces::
- $ osprofiler trace show --html <Profile ID>
+ $ osprofiler trace show --html <Trace ID>
Note that osprofiler should be run with the admin user name & tenant.
diff --git a/doc/source/getting_started/standalone.rst b/doc/source/getting_started/standalone.rst
index 696aab21b..1f0207f5a 100644
--- a/doc/source/getting_started/standalone.rst
+++ b/doc/source/getting_started/standalone.rst
@@ -31,7 +31,7 @@ The localrc looked like this::
HEAT_STANDALONE=True
KEYSTONE_AUTH_HOST=192.168.1.88
- KEYSTONE_AUTH_PORT=35357
+ KEYSTONE_AUTH_PORT=5000
KEYSTONE_AUTH_PROTOCOL=http
KEYSTONE_SERVICE_HOST=$KEYSTONE_AUTH_HOST
KEYSTONE_SERVICE_PORT=$KEYSTONE_AUTH_PORT
@@ -72,7 +72,7 @@ Now make a file to store your new environment (heat.env).
export OS_USERNAME=admin
export OS_TENANT_NAME=demo
export OS_PASSWORD=abetterpasswordthanthis
- export OS_AUTH_URL=http://192.168.1.88:35357/v2.0/
+ export OS_AUTH_URL=http://192.168.1.88:5000/v3/
Now you use this like::
diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst
index 304bc1b46..c3834651a 100644
--- a/doc/source/glossary.rst
+++ b/doc/source/glossary.rst
@@ -24,10 +24,10 @@
CFN
An abbreviated form of "AWS CloudFormation".
- Constraint
+ constraint
Defines valid input :term:`parameters` for a :term:`template`.
- Dependency
+ dependency
When a :term:`resource` must wait for another resource to finish
creation before being created itself. Heat adds an implicit
dependency when a resource references another resource or one of
@@ -35,7 +35,7 @@
dependency can also be created by the user in the template
definition.
- Environment
+ environment
Used to affect the run-time behavior of the template. Provides a
way to override the default resource implementation and
parameters passed to Heat. See :ref:`Environments`.
@@ -48,8 +48,8 @@
HOT
An acronym for ":term:`Heat Orchestration Template`".
- Input parameters
- See :term:`Parameters`.
+ input parameters
+ See :term:`parameters`.
Metadata
May refer to :term:`Resource Metadata`, :term:`Nova Instance
@@ -57,19 +57,17 @@
Metadata service
A Compute service that enables virtual machine instances to
- retrieve instance-specific data. See `Metadata
- service (OpenStack Administrator Guide)`_.
+ retrieve instance-specific data. See :nova-doc:`Nova Metadata
+ service documentation <user/metadata.html#metadata-service>`.
- .. _Metadata service (OpenStack Administrator Guide): https://docs.openstack.org/nova/latest/admin/networking-nova.html#metadata-service
-
- Multi-region
+ multi-region
A feature of Heat that supports deployment to multiple regions.
- Nested resource
+ nested resource
A :term:`resource` instantiated as part of a :term:`nested
stack`.
- Nested stack
+ nested stack
A :term:`template` referenced by URL inside of another template.
Used to reduce redundant resource definitions and group complex
architectures into logical groups.
@@ -83,42 +81,42 @@
OpenStack
Open source software for building private and public clouds.
- Orchestrate
+ orchestrate
Arrange or direct the elements of a situation to produce a
desired effect.
- Outputs
+ outputs
A top-level block in a :term:`template` that defines what data
will be returned by a stack after instantiation.
- Parameters
+ parameters
A top-level block in a :term:`template` that defines what data
can be passed to customise a template when it is used to create
or update a :term:`stack`.
- Provider resource
+ provider resource
A :term:`resource` implemented by a :term:`provider
template`. The parent resource's properties become the
:term:`nested stack's <nested stack>` parameters.
- Provider template
+ provider template
Allows user-definable :term:`resource providers <resource
provider>` to be specified via :term:`nested stacks <nested
stack>`. The nested stack's :term:`outputs` become the parent
stack's :term:`attributes <resource attribute>`.
- Resource
+ resource
An element of OpenStack infrastructure instantiated from a
- particular :term:`resource provider`. See also :term:`Nested
+ particular :term:`resource provider`. See also :term:`nested
resource`.
- Resource attribute
+ resource attribute
Data that can be obtained from a :term:`resource`, e.g. a
server's public IP or name. Usually passed to another resource's
:term:`properties <resource property>` or added to the stack's
:term:`outputs`.
- Resource group
+ resource group
A :term:`resource provider` that creates one or more identically
configured :term:`resources <resource>` or :term:`nested
resources <nested resource>`.
@@ -129,46 +127,45 @@
.. _AWS::CloudFormation::Init (AWS CloudFormation User Guide): https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-init.html
- Resource plugin
+ resource plugin
Python code that understands how to instantiate and manage a
:term:`resource`. See `Heat Resource Plugins (OpenStack wiki)`_.
.. _Heat Resource Plugins (OpenStack wiki): https://wiki.openstack.org/wiki/Heat/Plugins#Heat_Resource_Plugins
- Resource property
+ resource property
Data utilized for the instantiation of a :term:`resource`. Can be
defined statically in a :term:`template` or passed in as
:term:`input parameters <parameters>`.
- Resource provider
+ resource provider
The implementation of a particular resource type. May be a
- :term:`Resource plugin` or a :term:`Provider template`.
+ :term:`resource plugin` or a :term:`provider template`.
- Stack
+ stack
A collection of instantiated :term:`resources <resource>` that
are defined in a single :term:`template`.
- Stack resource
+ stack resource
A :term:`resource provider` that allows the management of a
:term:`nested stack` as a :term:`resource` in a parent stack.
- Template
+ template
An orchestration document that details everything needed to carry
out an :term:`orchestration <orchestrate>`.
- Template resource
- See :term:`Provider resource`.
+ template resource
+ See :term:`provider resource`.
- User data
+ user data
A :term:`resource property` that contains a user-provided data
blob. User data gets passed to `cloud-init`_ to automatically
- configure instances at boot time. See also `User data (OpenStack
- End User Guide)`_.
+ configure instances at boot time. See also :nova-doc:`Nova User data
+ documentation <user/metadata.html#user-provided-data>`.
- .. _User data (OpenStack End User Guide): https://docs.openstack.org/nova/latest/user/user-data.html
.. _cloud-init: https://cloudinit.readthedocs.io/
- Wait condition
+ wait condition
A :term:`resource provider` that provides a way to communicate
data or events from servers back to the orchestration
engine. Most commonly used to pause the creation of the
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 8b65d3bb8..58426e4c7 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -80,10 +80,9 @@ Using the Heat Service
----------------------
- `OpenStack Orchestration API v1 Reference`_
-- `Python and CLI client`_
+- :python-heatclient-doc:`Python and CLI client <>`
-.. _`OpenStack Orchestration API v1 Reference`: https://developer.openstack.org/api-ref/orchestration/v1/
-.. _`Python and CLI client`: https://docs.openstack.org/python-heatclient/latest
+.. _`OpenStack Orchestration API v1 Reference`: https://docs.openstack.org/api-ref/orchestration/v1/
Developing Heat
===============
@@ -91,14 +90,14 @@ Developing Heat
.. toctree::
:maxdepth: 1
- contributing/index
+ contributor/index
getting_started/on_devstack
- developing_guides/architecture
- developing_guides/pluginguide
- developing_guides/schedulerhints
- developing_guides/gmr
- developing_guides/supportstatus
- developing_guides/rally_on_gates
+ contributor/architecture
+ contributor/pluginguide
+ contributor/schedulerhints
+ contributor/gmr
+ contributor/supportstatus
+ contributor/rally_on_gates
api/index
Indices and tables
diff --git a/doc/source/install/install-debian.rst b/doc/source/install/install-debian.rst
index c3622ee8c..8e8a0b93b 100644
--- a/doc/source/install/install-debian.rst
+++ b/doc/source/install/install-debian.rst
@@ -6,23 +6,413 @@ Install and configure for Debian
This section describes how to install and configure the Orchestration service
for Debian.
+Prerequisites
+-------------
+
+Before you install and configure Orchestration, you must create a
+database, service credentials, and API endpoints. Orchestration also
+requires additional information in the Identity service.
+
+#. To create the database, complete these steps:
+
+ * Use the database access client to connect to the database
+ server as the ``root`` user:
+
+ .. code-block:: console
+
+ $ mysql -u root -p
+
+ * Create the ``heat`` database:
+
+ .. code-block:: console
+
+ CREATE DATABASE heat;
+
+ * Grant proper access to the ``heat`` database:
+
+ .. code-block:: console
+
+ GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'localhost' \
+ IDENTIFIED BY 'HEAT_DBPASS';
+ GRANT ALL PRIVILEGES ON heat.* TO 'heat'@'%' \
+ IDENTIFIED BY 'HEAT_DBPASS';
+
+ Replace ``HEAT_DBPASS`` with a suitable password.
+
+ * Exit the database access client.
+
+#. Source the ``admin`` credentials to gain access to
+ admin-only CLI commands:
+
+ .. code-block:: console
+
+ $ . admin-openrc
+
+#. To create the service credentials, complete these steps:
+
+ * Create the ``heat`` user:
+
+ .. code-block:: console
+
+ $ openstack user create --domain default --password-prompt heat
+ User Password:
+ Repeat User Password:
+ +-----------+----------------------------------+
+ | Field | Value |
+ +-----------+----------------------------------+
+ | domain_id | e0353a670a9e496da891347c589539e9 |
+ | enabled | True |
+ | id | ca2e175b851943349be29a328cc5e360 |
+ | name | heat |
+ +-----------+----------------------------------+
+
+ * Add the ``admin`` role to the ``heat`` user:
+
+ .. code-block:: console
+
+ $ openstack role add --project service --user heat admin
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``heat`` and ``heat-cfn`` service entities:
+
+ .. code-block:: console
+
+ $ openstack service create --name heat \
+ --description "Orchestration" orchestration
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | Orchestration |
+ | enabled | True |
+ | id | 727841c6f5df4773baa4e8a5ae7d72eb |
+ | name | heat |
+ | type | orchestration |
+ +-------------+----------------------------------+
+
+ $ openstack service create --name heat-cfn \
+ --description "Orchestration" cloudformation
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | Orchestration |
+ | enabled | True |
+ | id | c42cede91a4e47c3b10c8aedc8d890c6 |
+ | name | heat-cfn |
+ | type | cloudformation |
+ +-------------+----------------------------------+
+
+#. Create the Orchestration service API endpoints:
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ orchestration public http://controller:8004/v1/%\(tenant_id\)s
+ +--------------+-----------------------------------------+
+ | Field | Value |
+ +--------------+-----------------------------------------+
+ | enabled | True |
+ | id | 3f4dab34624e4be7b000265f25049609 |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 727841c6f5df4773baa4e8a5ae7d72eb |
+ | service_name | heat |
+ | service_type | orchestration |
+ | url | http://controller:8004/v1/%(tenant_id)s |
+ +--------------+-----------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ orchestration internal http://controller:8004/v1/%\(tenant_id\)s
+ +--------------+-----------------------------------------+
+ | Field | Value |
+ +--------------+-----------------------------------------+
+ | enabled | True |
+ | id | 9489f78e958e45cc85570fec7e836d98 |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 727841c6f5df4773baa4e8a5ae7d72eb |
+ | service_name | heat |
+ | service_type | orchestration |
+ | url | http://controller:8004/v1/%(tenant_id)s |
+ +--------------+-----------------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ orchestration admin http://controller:8004/v1/%\(tenant_id\)s
+ +--------------+-----------------------------------------+
+ | Field | Value |
+ +--------------+-----------------------------------------+
+ | enabled | True |
+ | id | 76091559514b40c6b7b38dde790efe99 |
+ | interface | admin |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | 727841c6f5df4773baa4e8a5ae7d72eb |
+ | service_name | heat |
+ | service_type | orchestration |
+ | url | http://controller:8004/v1/%(tenant_id)s |
+ +--------------+-----------------------------------------+
+
+ .. code-block:: console
+
+ $ openstack endpoint create --region RegionOne \
+ cloudformation public http://controller:8000/v1
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | b3ea082e019c4024842bf0a80555052c |
+ | interface | public |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | c42cede91a4e47c3b10c8aedc8d890c6 |
+ | service_name | heat-cfn |
+ | service_type | cloudformation |
+ | url | http://controller:8000/v1 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ cloudformation internal http://controller:8000/v1
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 169df4368cdc435b8b115a9cb084044e |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | c42cede91a4e47c3b10c8aedc8d890c6 |
+ | service_name | heat-cfn |
+ | service_type | cloudformation |
+ | url | http://controller:8000/v1 |
+ +--------------+----------------------------------+
+
+ $ openstack endpoint create --region RegionOne \
+ cloudformation admin http://controller:8000/v1
+ +--------------+----------------------------------+
+ | Field | Value |
+ +--------------+----------------------------------+
+ | enabled | True |
+ | id | 3d3edcd61eb343c1bbd629aa041ff88b |
+ | interface | internal |
+ | region | RegionOne |
+ | region_id | RegionOne |
+ | service_id | c42cede91a4e47c3b10c8aedc8d890c6 |
+ | service_name | heat-cfn |
+ | service_type | cloudformation |
+ | url | http://controller:8000/v1 |
+ +--------------+----------------------------------+
+
+#. Orchestration requires additional information in the Identity service to
+ manage stacks. To add this information, complete these steps:
+
+ * Create the ``heat`` domain that contains projects and users
+ for stacks:
+
+ .. code-block:: console
+
+ $ openstack domain create --description "Stack projects and users" heat
+ +-------------+----------------------------------+
+ | Field | Value |
+ +-------------+----------------------------------+
+ | description | Stack projects and users |
+ | enabled | True |
+ | id | 0f4d1bd326f2454dacc72157ba328a47 |
+ | name | heat |
+ +-------------+----------------------------------+
+
+ * Create the ``heat_domain_admin`` user to manage projects and users
+ in the ``heat`` domain:
+
+ .. code-block:: console
+
+ $ openstack user create --domain heat --password-prompt heat_domain_admin
+ User Password:
+ Repeat User Password:
+ +-----------+----------------------------------+
+ | Field | Value |
+ +-----------+----------------------------------+
+ | domain_id | 0f4d1bd326f2454dacc72157ba328a47 |
+ | enabled | True |
+ | id | b7bd1abfbcf64478b47a0f13cd4d970a |
+ | name | heat_domain_admin |
+ +-----------+----------------------------------+
+
+ * Add the ``admin`` role to the ``heat_domain_admin`` user in the
+ ``heat`` domain to enable administrative stack management
+ privileges by the ``heat_domain_admin`` user:
+
+ .. code-block:: console
+
+ $ openstack role add --domain heat --user-domain heat --user heat_domain_admin admin
+
+ .. note::
+
+ This command provides no output.
+
+ * Create the ``heat_stack_owner`` role:
+
+ .. code-block:: console
+
+ $ openstack role create heat_stack_owner
+ +-----------+----------------------------------+
+ | Field | Value |
+ +-----------+----------------------------------+
+ | domain_id | None |
+ | id | 15e34f0c4fed4e68b3246275883c8630 |
+ | name | heat_stack_owner |
+ +-----------+----------------------------------+
+
+ * Add the ``heat_stack_owner`` role to the ``demo`` project and user to
+ enable stack management by the ``demo`` user:
+
+ .. code-block:: console
+
+ $ openstack role add --project demo --user demo heat_stack_owner
+
+ .. note::
+
+ This command provides no output.
+
+ .. note::
+
+ You must add the ``heat_stack_owner`` role to each user
+ that manages stacks.
+
+ * Create the ``heat_stack_user`` role:
+
+ .. code-block:: console
+
+ $ openstack role create heat_stack_user
+ +-----------+----------------------------------+
+ | Field | Value |
+ +-----------+----------------------------------+
+ | domain_id | None |
+ | id | 88849d41a55d4d1d91e4f11bffd8fc5c |
+ | name | heat_stack_user |
+ +-----------+----------------------------------+
+
+ .. note::
+
+ The Orchestration service automatically assigns the
+ ``heat_stack_user`` role to users that it creates
+ during stack deployment. By default, this role restricts
+ API <Application Programming Interface (API)> operations.
+ To avoid conflicts, do not add
+ this role to users with the ``heat_stack_owner`` role.
+
Install and configure components
--------------------------------
+.. note::
-#. Run the following commands to install the packages:
+ Default configuration files vary by distribution. You might need
+ to add these sections and options rather than modifying existing
+ sections and options. Also, an ellipsis (``...``) in the configuration
+ snippets indicates potential default configuration options that you
+ should retain.
+
+#. Install the packages:
.. code-block:: console
- # apt-get install heat-api heat-api-cfn heat-engine python-heat-client
+ # apt-get install heat-api heat-api-cfn heat-engine
+
+2. Edit the ``/etc/heat/heat.conf`` file and complete the following
+ actions:
+
+ * In the ``[database]`` section, configure database access:
+
+ .. code-block:: ini
+
+ [database]
+ ...
+ connection = mysql+pymysql://heat:HEAT_DBPASS@controller/heat
+
+ Replace ``HEAT_DBPASS`` with the password you chose for the
+ Orchestration database.
+
+ * In the ``[DEFAULT]`` section,
+ configure ``RabbitMQ`` message queue access:
+
+ .. code-block:: ini
+
+ [DEFAULT]
+ ...
+ transport_url = rabbit://openstack:RABBIT_PASS@controller
+
+ Replace ``RABBIT_PASS`` with the password you chose for the
+ ``openstack`` account in ``RabbitMQ``.
+
+ * In the ``[keystone_authtoken]``, ``[trustee]`` and
+ ``[clients_keystone]`` sections,
+ configure Identity service access:
+
+ .. code-block:: ini
+
+ [keystone_authtoken]
+ ...
+ www_authenticate_uri = http://controller:5000
+ auth_url = http://controller:5000
+ memcached_servers = controller:11211
+ auth_type = password
+ project_domain_name = default
+ user_domain_name = default
+ project_name = service
+ username = heat
+ password = HEAT_PASS
+
+ [trustee]
+ ...
+ auth_type = password
+ auth_url = http://controller:5000
+ username = heat
+ password = HEAT_PASS
+ user_domain_name = default
+
+ [clients_keystone]
+ ...
+ auth_uri = http://controller:5000
+
+ Replace ``HEAT_PASS`` with the password you chose for the
+ ``heat`` user in the Identity service.
+
+ * In the ``[DEFAULT]`` section, configure the metadata and
+ wait condition URLs:
+
+ .. code-block:: ini
+
+ [DEFAULT]
+ ...
+ heat_metadata_server_url = http://controller:8000
+ heat_waitcondition_server_url = http://controller:8000/v1/waitcondition
+
+ * In the ``[DEFAULT]`` section, configure the stack domain and
+ administrative credentials:
+
+ .. code-block:: ini
+
+ [DEFAULT]
+ ...
+ stack_domain_admin = heat_domain_admin
+ stack_domain_admin_password = HEAT_DOMAIN_PASS
+ stack_user_domain_name = heat
+
+ Replace ``HEAT_DOMAIN_PASS`` with the password you chose for the
+ ``heat_domain_admin`` user in the Identity service.
+
+3. Populate the Orchestration database:
+
+ .. code-block:: console
-#. Respond to prompts for debconf.
+ # su -s /bin/sh -c "heat-manage db_sync" heat
- .. :doc:`database management <debconf/debconf-dbconfig-common>`,
- :doc:`Identity service credentials <debconf/debconf-keystone-authtoken>`,
- :doc:`service endpoint registration <debconf/debconf-api-endpoints>`,
- and :doc:`message broker credentials <debconf/debconf-rabbitmq>`.
+ .. note::
+ Ignore any deprecation messages in this output.
Finalize installation
---------------------
diff --git a/doc/source/install/install-obs.rst b/doc/source/install/install-obs.rst
index 24aa89d0d..7d3a97938 100644
--- a/doc/source/install/install-obs.rst
+++ b/doc/source/install/install-obs.rst
@@ -357,7 +357,7 @@ Install and configure components
[keystone_authtoken]
...
www_authenticate_uri = http://controller:5000
- auth_url = http://controller:35357
+ auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
@@ -369,7 +369,7 @@ Install and configure components
[trustee]
...
auth_type = password
- auth_url = http://controller:35357
+ auth_url = http://controller:5000
username = heat
password = HEAT_PASS
user_domain_name = default
diff --git a/doc/source/install/install-rdo.rst b/doc/source/install/install-rdo.rst
index fcb666e7f..e5bfae0e7 100644
--- a/doc/source/install/install-rdo.rst
+++ b/doc/source/install/install-rdo.rst
@@ -74,14 +74,13 @@ requires additional information in the Identity service.
.. note::
- If installing OpenStack manually following the `Keystone install
- guide`_, the name of the services project is ``service`` as given
- above. However, traditional methods of installing RDO (such as
- PackStack and TripleO) use ``services`` as the name of the service
- project. If you installed RDO using a Puppet-based method, substitute
- ``services`` as the project name.
-
- .. _`Keystone install guide`: https://docs.openstack.org/keystone/latest/install/keystone-users-rdo.html
+ If installing OpenStack manually following the :keystone-doc:`Keystone
+ install guide <install/keystone-users-rdo.html>`, the name of the
+ services project is ``service`` as given above. However, traditional
+ methods of installing RDO (such as PackStack and TripleO) use
+ ``services`` as the name of the service project. If you installed RDO
+ using a Puppet-based method, substitute ``services`` as the project
+ name.
.. note::
@@ -368,7 +367,7 @@ Install and configure components
[keystone_authtoken]
...
www_authenticate_uri = http://controller:5000
- auth_url = http://controller:35357
+ auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
@@ -380,7 +379,7 @@ Install and configure components
[trustee]
...
auth_type = password
- auth_url = http://controller:35357
+ auth_url = http://controller:5000
username = heat
password = HEAT_PASS
user_domain_name = default
diff --git a/doc/source/install/install-ubuntu.rst b/doc/source/install/install-ubuntu.rst
index 643c7c2cb..1882be397 100644
--- a/doc/source/install/install-ubuntu.rst
+++ b/doc/source/install/install-ubuntu.rst
@@ -356,7 +356,7 @@ Install and configure components
[keystone_authtoken]
...
www_authenticate_uri = http://controller:5000
- auth_url = http://controller:35357
+ auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
@@ -368,7 +368,7 @@ Install and configure components
[trustee]
...
auth_type = password
- auth_url = http://controller:35357
+ auth_url = http://controller:5000
username = heat
password = HEAT_PASS
user_domain_name = default
diff --git a/doc/source/operating_guides/scale_deployment.rst b/doc/source/operating_guides/scale_deployment.rst
index c19d3657f..156f591c9 100644
--- a/doc/source/operating_guides/scale_deployment.rst
+++ b/doc/source/operating_guides/scale_deployment.rst
@@ -31,12 +31,12 @@ Assumptions
This guide, using a devstack installation of OpenStack, assumes that:
- 1. You have configured devstack from `Single Machine Installation Guide
- <https://docs.openstack.org/devstack/latest/guides/single-machine.html>`_;
- 2. You have set up heat on devstack, as defined at :doc:`heat and DevStack
- <../getting_started/on_devstack>`;
- 3. You have installed HAProxy_ on the devstack
- server.
+1. You have configured devstack from :devstack-doc:`Single Machine
+ Installation Guide <guides/single-machine.html>`;
+2. You have set up heat on devstack, as defined at :doc:`heat and DevStack
+ <../getting_started/on_devstack>`;
+3. You have installed HAProxy_ on the devstack
+ server.
Architecture
============
@@ -48,9 +48,9 @@ Basic Architecture
------------------
The heat architecture is as defined at :doc:`heat architecture
-<../developing_guides/architecture>` and shown in the diagram below,
-where we have a CLI that sends HTTP requests to the REST and CFN APIs, which in
-turn make calls using AMQP to the heat-engine::
+<../contributor/architecture>` and shown in the diagram below, where we have
+a CLI that sends HTTP requests to the REST and CFN APIs, which in turn make
+calls using AMQP to the heat-engine::
|- [REST API] -|
[CLI] -- <HTTP> -- -- <AMQP> -- [ENGINE]
diff --git a/doc/source/operating_guides/upgrades_guide.rst b/doc/source/operating_guides/upgrades_guide.rst
index 9d13cad99..e1dfa0825 100644
--- a/doc/source/operating_guides/upgrades_guide.rst
+++ b/doc/source/operating_guides/upgrades_guide.rst
@@ -153,6 +153,6 @@ References
.. _cold-upgrades: https://governance.openstack.org/tc/reference/tags/assert_supports-upgrade.html
-.. _spec about rolling upgrades: https://review.openstack.org/#/c/407989/
+.. _spec about rolling upgrades: https://review.opendev.org/#/c/407989/
.. _RabbitMQ: http://www.rabbitmq.com/management.html
diff --git a/doc/source/template_guide/hot_spec.rst b/doc/source/template_guide/hot_spec.rst
index c81f48eea..b9e395255 100644
--- a/doc/source/template_guide/hot_spec.rst
+++ b/doc/source/template_guide/hot_spec.rst
@@ -376,7 +376,53 @@ The complete list of supported condition functions is::
-------------------
The key with value ``2018-08-31`` or ``rocky`` indicates that the YAML
document is a HOT template and it may contain features added and/or removed
-up until the Queens release. The complete list of supported functions is::
+up until the Rocky release. The complete list of supported functions is::
+
+ digest
+ filter
+ get_attr
+ get_file
+ get_param
+ get_resource
+ list_join
+ make_url
+ list_concat
+ list_concat_unique
+ contains
+ map_merge
+ map_replace
+ repeat
+ resource_facade
+ str_replace
+ str_replace_strict
+ str_replace_vstrict
+ str_split
+ yaql
+ if
+
+The complete list of supported condition functions is::
+
+ equals
+ get_param
+ not
+ and
+ or
+ yaql
+ contains
+
+2021-04-16 | wallaby
+--------------------
+The key with value ``2021-04-16`` or ``wallaby`` indicates that the YAML
+document is a HOT template and it may contain features added and/or removed
+up until the Wallaby release.
+
+This version adds a 2-argument variant of the ``if`` function. When the
+condition is false and no third argument is supplied, the entire enclosing item
+(which may be e.g. a list item, a key-value pair in a dict, or a property
+value) will be elided. This allows for e.g. conditional definition of
+properties while keeping the default value when the condition is false.
+
+The complete list of supported functions is::
digest
filter
@@ -1837,6 +1883,27 @@ template except for ``if`` conditions. You can use the ``if`` condition
in the property values in the ``resources`` section and ``outputs`` sections
of a template.
+Beginning with the ``wallaby`` template version, the third argument is
+optional. If only two arguments are passed, the entire enclosing item is
+removed when the condition is false.
+
+For example:
+
+.. code-block:: yaml
+
+ conditions:
+ override_name: {not: {equals: [{get_param: server_name}, ""]}}
+
+ resources:
+ test_server:
+ type: OS::Nova::Server
+ properties:
+ name: {if: [override_name, {get_param: server_name}]}
+
+In this example, the default name for the server (which is generated by Heat
+when the property value is not specified) would be used when the
+``server_name`` parameter value is an empty string.
+
not
---
The ``not`` function acts as a NOT operator.
diff --git a/doc/source/template_guide/multi-clouds.rst b/doc/source/template_guide/multi-clouds.rst
index 6509d1a08..e6266db46 100644
--- a/doc/source/template_guide/multi-clouds.rst
+++ b/doc/source/template_guide/multi-clouds.rst
@@ -13,10 +13,10 @@ multi-clouds features, and what's the environment requirement.
.. note:: If you like to create a stack in multi-region environment,
you don't need this feature at all. all you need to do is provide
- `region_name` under `context` property for `OS::Heat::Stack`_.
+ `region_name` under `context` property for :ref:`OS::Heat::Stack`.
If you like to see information on how to provide SSL support for
your multi-region environment, you can jump to `Use CA
- cert(Optional)`_ .
+ cert (Optional)`_ .
Requirements
~~~~~~~~~~~~
@@ -58,17 +58,23 @@ Gathering credential information
Before we start generating secret, let's talk about what credential format we
need. credential is a JSON format string contains two keys ``auth_type``, and
``auth``. ``auth_type``, and ``auth`` following auth plugin loader rules from
-Keystone. You can find `plugin options`_ and `authentication plugins`_ in
-keystoneauth documents.
+Keystone. You can find :keystoneauth-doc:`plugin options
+<plugin-options.html>` and :keystoneauth-doc:`authentication plugins
+<authentication-plugins.html#loading-plugins-by-name>` in keystoneauth
+documents.
* **auth_type** - ``auth_type`` is a string for plugin name. Allows value like
`v3applicationcredential`, `password`, `v3oidcclientcredentials`, etc. You
- need to provide `available plugins`_.
+ need to provide `available plugins
+ <plugin-options.html#available-plugins>`.
* **auth** - auth is a dictionary contains all parameters for plugins to
perform authentication. You can find all valid parameter references from
- `available plugins`_ or get to all class path from `plugin names`_ for more
- detail allowed value or trace plugin class from there.
+ :keystoneauth-doc:`available plugins
+ <plugin-options.html#available-plugins>` or get to all class path from
+ :keystoneauth-doc:`plugin names
+ <authentication-plugins.html#loading-plugins-by-name>` for more detail
+ allowed value or trace plugin class from there.
As you can tell, all allowed authentication plugins for credentials follows
plugins keystoneauth rules. So once new change in keystoneauth, it will also
@@ -81,15 +87,18 @@ Validate your credential
------------------------
Now you have all your credential information ready, try to validate first if
-you can. You can either directly test them `via config`_, `via CLI`_, or
-`via keystoneauth sessions`_.
+you can. You can either directly test them :keystoneauth-doc:`via config
+<plugin-options.html#using-plugins-via-config-file>`,
+:keystoneauth-doc:`via CLI <plugin-options.html#using-plugins-via-cli>`,
+or :keystoneauth-doc:`via keystoneauth sessions <using-sessions.html>`.
build credential secret
-----------------------
-Once you're sure it's valid, we can start building the secret out.
-To build a secret you just have to follow standard Barbican CLI_ or API to
-store your secret.
+Once you're sure it's valid, we can start building the secret out. To build a
+secret you just have to follow the standard
+:python-barbicanclient-doc:`Barbican CLI <cli/cli_usage.html#secret-create>` or
+API to store your secret.
The local site will read this secret to perform stack actions in remote site.
Let's give a quick example here:
@@ -111,9 +120,8 @@ Create remote stacks
Now, you have a secret id generated for your Barbican secret. Use that id as
input for template.
-To create a remote stack, you can simply use `OS::Heat::Stack`_ resource, as
-child stack in your template (we also referring this structure as
-`nested stack`).
+To create a remote stack, you can simply use an :ref:`OS::Heat::Stack` resource
+in your template.
In resource properties, provide `credential_secret_id` (Barbican secret ID
from the secret we just builded for credential) under `context` property.
@@ -146,8 +154,8 @@ won't affect resources/stacks in remote site. So do such actions with super
care.
-Use CA cert(Optional)
----------------------
+Use CA cert (Optional)
+----------------------
For production clouds, it's very important to have SSL support. Here we
provide CA cert method for your SSL access. If you wish to use that, use
@@ -174,13 +182,3 @@ Here is an example for you:
template: { get_file: "remote-app.yaml" }
.. note:: If insecure flag is on, ca_cert will be ignored.
-
-.. _`plugin options`: https://docs.openstack.org/keystoneauth/latest/plugin-options.html
-.. _`authentication plugins`: https://docs.openstack.org/keystoneauth/latest/authentication-plugins.html#loading-plugins-by-name
-.. _`plugin names`: https://docs.openstack.org/keystoneauth/latest/authentication-plugins.html#loading-plugins-by-name
-.. _`available plugins`: https://docs.openstack.org/keystoneauth/latest/plugin-options.html#available-plugins
-.. _`via keystoneauth sessions`: https://docs.openstack.org/keystoneauth/latest/using-sessions.html
-.. _`via config`: https://docs.openstack.org/keystoneauth/latest/plugin-options.html#using-plugins-via-config-file
-.. _`via CLI`: https://docs.openstack.org/keystoneauth/latest/plugin-options.html#using-plugins-via-cli
-.. _CLI: https://docs.openstack.org/python-barbicanclient/latest/cli/cli_usage.html#secret-create
-.. _`OS::Heat::Stack`: https://docs.openstack.org/heat/rocky/template_guide/openstack.html#OS::Heat::Stack
diff --git a/doc/source/template_guide/software_deployment.rst b/doc/source/template_guide/software_deployment.rst
index 33f525014..166e396eb 100644
--- a/doc/source/template_guide/software_deployment.rst
+++ b/doc/source/template_guide/software_deployment.rst
@@ -39,18 +39,21 @@ you might want to do this, including:
A number of tools are available for building custom images, including:
-* diskimage-builder_ image building tools for OpenStack
+* :diskimage-builder-doc:`diskimage-builder <>` image building tools for OpenStack
* imagefactory_ builds images for a variety of operating system/cloud
combinations
-Examples in this guide which require custom images will use diskimage-builder_.
+Examples in this guide that require custom images will use
+:diskimage-builder-doc:`diskimage-builder <>`.
User-data boot scripts and cloud-init
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
When booting a server it is possible to specify the contents of the user-data
to be passed to that server. This user-data is made available either from
-configured config-drive or from the `Metadata service`_.
+configured config-drive or from the :nova-doc:`Metadata service
+<user/metadata.html#metadata-service>`
How this user-data is consumed depends on the image being booted, but the most
commonly used tool for default cloud images is cloud-init_.
@@ -779,14 +782,11 @@ contents of the file ``example-puppet-manifest.pp``, containing:
}
-
.. _`AWS::CloudFormation::Init`: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-init.html
-.. _diskimage-builder: https://docs.openstack.org/diskimage-builder/latest/
.. _imagefactory: https://imgfac.org/
-.. _`Metadata service`: https://docs.openstack.org/nova/latest/admin/networking-nova.html#metadata-service
.. _cloud-init: https://cloudinit.readthedocs.io/
.. _curl: https://curl.haxx.se/
-.. _`Orchestration API`: https://developer.openstack.org/api-ref/orchestration/v1/
+.. _`Orchestration API`: https://docs.openstack.org/api-ref/orchestration/v1/
.. _os-refresh-config: https://opendev.org/openstack/os-refresh-config
.. _os-apply-config: https://opendev.org/openstack/os-apply-config
.. _tripleo-heat-templates: https://opendev.org/openstack/tripleo-heat-templates
diff --git a/etc/heat/api-paste.ini b/etc/heat/api-paste.ini
index 883f36dfc..09b4d82f8 100644
--- a/etc/heat/api-paste.ini
+++ b/etc/heat/api-paste.ini
@@ -1,7 +1,7 @@
# heat-api pipeline
[pipeline:heat-api]
-pipeline = cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authurl authtoken context osprofiler apiv1app
+pipeline = healthcheck cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authurl authtoken context osprofiler apiv1app
# heat-api pipeline for standalone heat
# ie. uses alternative auth backend that authenticates users against keystone
@@ -12,7 +12,7 @@ pipeline = cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authu
# flavor = standalone
#
[pipeline:heat-api-standalone]
-pipeline = cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authurl authpassword context apiv1app
+pipeline = healthcheck cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authurl authpassword context apiv1app
# heat-api pipeline for custom cloud backends
# i.e. in heat.conf:
@@ -20,23 +20,23 @@ pipeline = cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authu
# flavor = custombackend
#
[pipeline:heat-api-custombackend]
-pipeline = cors request_id context faultwrap versionnegotiation custombackendauth apiv1app
+pipeline = healthcheck cors request_id context faultwrap versionnegotiation custombackendauth apiv1app
# To enable, in heat.conf:
# [paste_deploy]
# flavor = noauth
#
[pipeline:heat-api-noauth]
-pipeline = cors request_id faultwrap noauth context http_proxy_to_wsgi versionnegotiation apiv1app
+pipeline = healthcheck cors request_id faultwrap noauth context http_proxy_to_wsgi versionnegotiation apiv1app
# heat-api-cfn pipeline
[pipeline:heat-api-cfn]
-pipeline = cors request_id http_proxy_to_wsgi cfnversionnegotiation ec2authtoken authtoken context osprofiler apicfnv1app
+pipeline = healthcheck cors request_id http_proxy_to_wsgi cfnversionnegotiation ec2authtoken authtoken context osprofiler apicfnv1app
# heat-api-cfn pipeline for standalone heat
# relies exclusively on authenticating with ec2 signed requests
[pipeline:heat-api-cfn-standalone]
-pipeline = cors request_id http_proxy_to_wsgi cfnversionnegotiation ec2authtoken context apicfnv1app
+pipeline = healthcheck cors request_id http_proxy_to_wsgi cfnversionnegotiation ec2authtoken context apicfnv1app
[app:apiv1app]
paste.app_factory = heat.common.wsgi:app_factory
@@ -100,3 +100,6 @@ paste.filter_factory = oslo_middleware.request_id:RequestId.factory
[filter:osprofiler]
paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+
+[filter:healthcheck]
+paste.filter_factory = oslo_middleware:Healthcheck.factory
diff --git a/heat/api/aws/exception.py b/heat/api/aws/exception.py
index d92c24478..90366f861 100644
--- a/heat/api/aws/exception.py
+++ b/heat/api/aws/exception.py
@@ -17,7 +17,6 @@
"""Heat API exception subclasses - maps API response errors to AWS Errors."""
from oslo_utils import reflection
-import six
import webob.exc
from heat.common.i18n import _
@@ -322,7 +321,7 @@ def map_remote_error(ex):
ex_type = ex_type[:-len('_Remote')]
safe = getattr(ex, 'safe', False)
- detail = six.text_type(ex) if safe else None
+ detail = str(ex) if safe else None
if ex_type in inval_param_errors:
return HeatInvalidParameterValueError(detail=detail)
diff --git a/heat/api/middleware/fault.py b/heat/api/middleware/fault.py
index a190ff84d..67418a77a 100644
--- a/heat/api/middleware/fault.py
+++ b/heat/api/middleware/fault.py
@@ -24,7 +24,6 @@ import traceback
from oslo_config import cfg
from oslo_utils import reflection
-import six
import webob
from heat.common import exception
@@ -127,7 +126,7 @@ class FaultWrapper(wsgi.Middleware):
if is_remote:
ex_type = ex_type[:-len('_Remote')]
- full_message = six.text_type(ex)
+ full_message = str(ex)
if '\n' in full_message and is_remote:
message, msg_trace = full_message.split('\n', 1)
elif traceback_marker in full_message:
diff --git a/heat/api/openstack/v1/__init__.py b/heat/api/openstack/v1/__init__.py
index 7a782f1f0..7f625c878 100644
--- a/heat/api/openstack/v1/__init__.py
+++ b/heat/api/openstack/v1/__init__.py
@@ -12,7 +12,6 @@
# under the License.
import routes
-import six
from heat.api.openstack.v1 import actions
from heat.api.openstack.v1 import build_info
@@ -51,7 +50,7 @@ class API(wsgi.Router):
for r in routes:
url = path_prefix + r['url']
methods = r['method']
- if isinstance(methods, six.string_types):
+ if isinstance(methods, str):
methods = [methods]
methods_str = ','.join(methods)
mapper.connect(r['name'], url, controller=controller,
diff --git a/heat/api/openstack/v1/actions.py b/heat/api/openstack/v1/actions.py
index 2b058e72c..3a99a445e 100644
--- a/heat/api/openstack/v1/actions.py
+++ b/heat/api/openstack/v1/actions.py
@@ -11,7 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
from webob import exc
from heat.api.openstack.v1 import util
@@ -42,8 +41,9 @@ class ActionController(object):
self.options = options
self.rpc_client = rpc_client.EngineClient()
- @util.registered_identified_stack
- def action(self, req, identity, body=None):
+ # Don't enforce policy on this API, as potentially differing policies
+ # will be enforced on individual actions.
+ def action(self, req, tenant_id, stack_name, stack_id, body=None):
"""Performs a specified action on a stack.
The body is expecting to contain exactly one item whose key specifies
@@ -56,25 +56,40 @@ class ActionController(object):
if len(body) > 1:
raise exc.HTTPBadRequest(_("Multiple actions specified"))
- ac = next(six.iterkeys(body))
+ ac = next(iter(body.keys()))
if ac not in self.ACTIONS:
raise exc.HTTPBadRequest(_("Invalid action %s specified") % ac)
- if ac == self.SUSPEND:
- self.rpc_client.stack_suspend(req.context, identity)
- elif ac == self.RESUME:
- self.rpc_client.stack_resume(req.context, identity)
- elif ac == self.CHECK:
- self.rpc_client.stack_check(req.context, identity)
- elif ac == self.CANCEL_UPDATE:
- self.rpc_client.stack_cancel_update(req.context, identity,
- cancel_with_rollback=True)
- elif ac == self.CANCEL_WITHOUT_ROLLBACK:
- self.rpc_client.stack_cancel_update(req.context, identity,
- cancel_with_rollback=False)
- else:
+ do_action = getattr(self, ac, None)
+ if do_action is None:
raise exc.HTTPInternalServerError(_("Unexpected action %s") % ac)
+ do_action(req, tenant_id=tenant_id,
+ stack_name=stack_name, stack_id=stack_id,
+ body=body)
+
+ @util.registered_identified_stack
+ def suspend(self, req, identity, body=None):
+ self.rpc_client.stack_suspend(req.context, identity)
+
+ @util.registered_identified_stack
+ def resume(self, req, identity, body=None):
+ self.rpc_client.stack_resume(req.context, identity)
+
+ @util.registered_identified_stack
+ def check(self, req, identity, body=None):
+ self.rpc_client.stack_check(req.context, identity)
+
+ @util.registered_identified_stack
+ def cancel_update(self, req, identity, body=None):
+ self.rpc_client.stack_cancel_update(req.context, identity,
+ cancel_with_rollback=True)
+
+ @util.registered_identified_stack
+ def cancel_without_rollback(self, req, identity, body=None):
+ self.rpc_client.stack_cancel_update(req.context, identity,
+ cancel_with_rollback=False)
+
def create_resource(options):
"""Actions action factory method."""
diff --git a/heat/api/openstack/v1/events.py b/heat/api/openstack/v1/events.py
index 465d0ab45..87aa000a8 100644
--- a/heat/api/openstack/v1/events.py
+++ b/heat/api/openstack/v1/events.py
@@ -13,7 +13,6 @@
import itertools
-import six
from webob import exc
from heat.api.openstack.v1 import util
@@ -110,21 +109,21 @@ class EventController(object):
@util.registered_identified_stack
def index(self, req, identity, resource_name=None):
"""Lists summary information for all events."""
- whitelist = {
+ param_types = {
'limit': util.PARAM_TYPE_SINGLE,
'marker': util.PARAM_TYPE_SINGLE,
'sort_dir': util.PARAM_TYPE_SINGLE,
'sort_keys': util.PARAM_TYPE_MULTI,
'nested_depth': util.PARAM_TYPE_SINGLE,
}
- filter_whitelist = {
+ filter_param_types = {
'resource_status': util.PARAM_TYPE_MIXED,
'resource_action': util.PARAM_TYPE_MIXED,
'resource_name': util.PARAM_TYPE_MIXED,
'resource_type': util.PARAM_TYPE_MIXED,
}
- params = util.get_allowed_params(req.params, whitelist)
- filter_params = util.get_allowed_params(req.params, filter_whitelist)
+ params = util.get_allowed_params(req.params, param_types)
+ filter_params = util.get_allowed_params(req.params, filter_param_types)
int_params = (rpc_api.PARAM_LIMIT, rpc_api.PARAM_NESTED_DEPTH)
try:
@@ -133,7 +132,7 @@ class EventController(object):
params[key] = param_utils.extract_int(
key, params[key], allow_zero=True)
except ValueError as e:
- raise exc.HTTPBadRequest(six.text_type(e))
+ raise exc.HTTPBadRequest(str(e))
if resource_name is None:
if not filter_params:
diff --git a/heat/api/openstack/v1/resources.py b/heat/api/openstack/v1/resources.py
index dc01e5378..199f778e4 100644
--- a/heat/api/openstack/v1/resources.py
+++ b/heat/api/openstack/v1/resources.py
@@ -13,7 +13,6 @@
import itertools
-import six
from webob import exc
from heat.api.openstack.v1 import util
@@ -89,7 +88,7 @@ class ResourceController(object):
try:
return extractor(key, req.params[key])
except ValueError as e:
- raise exc.HTTPBadRequest(six.text_type(e))
+ raise exc.HTTPBadRequest(str(e))
else:
return default
@@ -97,21 +96,21 @@ class ResourceController(object):
def index(self, req, identity):
"""Lists information for all resources."""
- whitelist = {
- 'type': 'mixed',
- 'status': 'mixed',
- 'name': 'mixed',
- 'action': 'mixed',
- 'id': 'mixed',
- 'physical_resource_id': 'mixed'
+ param_types = {
+ 'type': util.PARAM_TYPE_MIXED,
+ 'status': util.PARAM_TYPE_MIXED,
+ 'name': util.PARAM_TYPE_MIXED,
+ 'action': util.PARAM_TYPE_MIXED,
+ 'id': util.PARAM_TYPE_MIXED,
+ 'physical_resource_id': util.PARAM_TYPE_MIXED,
}
invalid_keys = (set(req.params.keys()) -
- set(list(whitelist) + [rpc_api.PARAM_NESTED_DEPTH,
- rpc_api.PARAM_WITH_DETAIL]))
+ set(list(param_types) + [rpc_api.PARAM_NESTED_DEPTH,
+ rpc_api.PARAM_WITH_DETAIL]))
if invalid_keys:
raise exc.HTTPBadRequest(_('Invalid filter parameters %s') %
- six.text_type(list(invalid_keys)))
+ str(list(invalid_keys)))
nested_depth = self._extract_to_param(req,
rpc_api.PARAM_NESTED_DEPTH,
@@ -122,7 +121,7 @@ class ResourceController(object):
param_utils.extract_bool,
default=False)
- params = util.get_allowed_params(req.params, whitelist)
+ params = util.get_allowed_params(req.params, param_types)
res_list = self.rpc_client.list_stack_resources(req.context,
identity,
@@ -136,8 +135,8 @@ class ResourceController(object):
def show(self, req, identity, resource_name):
"""Gets detailed information for a resource."""
- whitelist = {'with_attr': util.PARAM_TYPE_MULTI}
- params = util.get_allowed_params(req.params, whitelist)
+ param_types = {'with_attr': util.PARAM_TYPE_MULTI}
+ params = util.get_allowed_params(req.params, param_types)
if 'with_attr' not in params:
params['with_attr'] = None
res = self.rpc_client.describe_stack_resource(req.context,
@@ -186,7 +185,7 @@ class ResourceController(object):
RES_UPDATE_MARK_UNHEALTHY,
body[RES_UPDATE_MARK_UNHEALTHY])
except ValueError as e:
- raise exc.HTTPBadRequest(six.text_type(e))
+ raise exc.HTTPBadRequest(str(e))
data[RES_UPDATE_STATUS_REASON] = body.get(RES_UPDATE_STATUS_REASON, "")
self.rpc_client.resource_mark_unhealthy(req.context,
diff --git a/heat/api/openstack/v1/software_configs.py b/heat/api/openstack/v1/software_configs.py
index 9724f0248..41f9486e4 100644
--- a/heat/api/openstack/v1/software_configs.py
+++ b/heat/api/openstack/v1/software_configs.py
@@ -11,7 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
from webob import exc
from heat.api.openstack.v1 import util
@@ -42,14 +41,14 @@ class SoftwareConfigController(object):
try:
return param_utils.extract_bool(name, value)
except ValueError as e:
- raise exc.HTTPBadRequest(six.text_type(e))
+ raise exc.HTTPBadRequest(str(e))
def _index(self, req, use_admin_cnxt=False):
- whitelist = {
+ param_types = {
'limit': util.PARAM_TYPE_SINGLE,
'marker': util.PARAM_TYPE_SINGLE
}
- params = util.get_allowed_params(req.params, whitelist)
+ params = util.get_allowed_params(req.params, param_types)
if use_admin_cnxt:
cnxt = context.get_admin_context()
diff --git a/heat/api/openstack/v1/software_deployments.py b/heat/api/openstack/v1/software_deployments.py
index 99aca5785..725b4ef1e 100644
--- a/heat/api/openstack/v1/software_deployments.py
+++ b/heat/api/openstack/v1/software_deployments.py
@@ -37,10 +37,10 @@ class SoftwareDeploymentController(object):
@util.registered_policy_enforce
def index(self, req):
"""List software deployments."""
- whitelist = {
+ param_types = {
'server_id': util.PARAM_TYPE_SINGLE,
}
- params = util.get_allowed_params(req.params, whitelist)
+ params = util.get_allowed_params(req.params, param_types)
sds = self.rpc_client.list_software_deployments(req.context, **params)
return {'software_deployments': sds}
diff --git a/heat/api/openstack/v1/stacks.py b/heat/api/openstack/v1/stacks.py
index d144ea197..d84adbc92 100644
--- a/heat/api/openstack/v1/stacks.py
+++ b/heat/api/openstack/v1/stacks.py
@@ -15,8 +15,7 @@
import contextlib
from oslo_log import log as logging
-import six
-from six.moves.urllib import parse
+from urllib import parse
from webob import exc
from heat.api.openstack.v1 import util
@@ -79,7 +78,7 @@ class InstantiationData(object):
try:
yield
except ValueError as parse_ex:
- mdict = {'type': data_type, 'error': six.text_type(parse_ex)}
+ mdict = {'type': data_type, 'error': str(parse_ex)}
msg = _("%(type)s not in valid format: %(error)s") % mdict
raise exc.HTTPBadRequest(msg)
@@ -101,7 +100,7 @@ class InstantiationData(object):
try:
adopt_data = template_format.simple_parse(adopt_data)
template_format.validate_template_limit(
- six.text_type(adopt_data['template']))
+ str(adopt_data['template']))
return adopt_data['template']
except (ValueError, KeyError) as ex:
err_reason = _('Invalid adopt data: %s') % ex
@@ -109,7 +108,7 @@ class InstantiationData(object):
elif self.PARAM_TEMPLATE in self.data:
template_data = self.data[self.PARAM_TEMPLATE]
if isinstance(template_data, dict):
- template_format.validate_template_limit(six.text_type(
+ template_format.validate_template_limit(str(
template_data))
return template_data
@@ -188,7 +187,7 @@ class StackController(object):
try:
return param_utils.extract_bool(name, value)
except ValueError as e:
- raise exc.HTTPBadRequest(six.text_type(e))
+ raise exc.HTTPBadRequest(str(e))
def _extract_int_param(self, name, value,
allow_zero=True, allow_negative=False):
@@ -196,16 +195,16 @@ class StackController(object):
return param_utils.extract_int(name, value,
allow_zero, allow_negative)
except ValueError as e:
- raise exc.HTTPBadRequest(six.text_type(e))
+ raise exc.HTTPBadRequest(str(e))
def _extract_tags_param(self, tags):
try:
return param_utils.extract_tags(tags)
except ValueError as e:
- raise exc.HTTPBadRequest(six.text_type(e))
+ raise exc.HTTPBadRequest(str(e))
def _index(self, req, use_admin_cnxt=False):
- filter_whitelist = {
+ filter_param_types = {
# usage of keys in this list are not encouraged, please use
# rpc_api.STACK_KEYS instead
'id': util.PARAM_TYPE_MIXED,
@@ -216,7 +215,7 @@ class StackController(object):
'username': util.PARAM_TYPE_MIXED,
'owner_id': util.PARAM_TYPE_MIXED,
}
- whitelist = {
+ param_types = {
'limit': util.PARAM_TYPE_SINGLE,
'marker': util.PARAM_TYPE_SINGLE,
'sort_dir': util.PARAM_TYPE_SINGLE,
@@ -229,7 +228,7 @@ class StackController(object):
'not_tags': util.PARAM_TYPE_SINGLE,
'not_tags_any': util.PARAM_TYPE_SINGLE,
}
- params = util.get_allowed_params(req.params, whitelist)
+ params = util.get_allowed_params(req.params, param_types)
stack_keys = dict.fromkeys(rpc_api.STACK_KEYS, util.PARAM_TYPE_MIXED)
unsupported = (
rpc_api.STACK_ID, # not user visible
@@ -247,7 +246,7 @@ class StackController(object):
for key in unsupported:
stack_keys.pop(key)
# downward compatibility
- stack_keys.update(filter_whitelist)
+ stack_keys.update(filter_param_types)
filter_params = util.get_allowed_params(req.params, stack_keys)
show_deleted = False
@@ -392,7 +391,7 @@ class StackController(object):
if not is_update and key in args:
msg = _("%s flag only supported in stack update (or update "
"preview) request.") % key
- raise exc.HTTPBadRequest(six.text_type(msg))
+ raise exc.HTTPBadRequest(str(msg))
return args
@util.registered_policy_enforce
@@ -520,8 +519,8 @@ class StackController(object):
raise exc.HTTPAccepted()
def _param_show_nested(self, req):
- whitelist = {'show_nested': 'single'}
- params = util.get_allowed_params(req.params, whitelist)
+ param_types = {'show_nested': util.PARAM_TYPE_SINGLE}
+ params = util.get_allowed_params(req.params, param_types)
p_name = 'show_nested'
if p_name in params:
@@ -605,9 +604,9 @@ class StackController(object):
data = InstantiationData(body)
- whitelist = {'show_nested': util.PARAM_TYPE_SINGLE,
- 'ignore_errors': util.PARAM_TYPE_SINGLE}
- params = util.get_allowed_params(req.params, whitelist)
+ param_types = {'show_nested': util.PARAM_TYPE_SINGLE,
+ 'ignore_errors': util.PARAM_TYPE_SINGLE}
+ params = util.get_allowed_params(req.params, param_types)
show_nested = False
p_name = rpc_api.PARAM_SHOW_NESTED
@@ -700,7 +699,7 @@ class StackController(object):
req.params.get(rpc_api.TEMPLATE_TYPE))
except ValueError as ex:
msg = _("Template type is not supported: %s") % ex
- raise exc.HTTPBadRequest(six.text_type(msg))
+ raise exc.HTTPBadRequest(str(msg))
return self.rpc_client.generate_template(req.context,
type_name,
@@ -753,10 +752,7 @@ class StackSerializer(serializers.JSONResponseSerializer):
def _populate_response_header(self, response, location, status):
response.status = status
- if six.PY2:
- response.headers['Location'] = location.encode('utf-8')
- else:
- response.headers['Location'] = location
+ response.headers['Location'] = location
response.headers['Content-Type'] = 'application/json'
return response
@@ -764,7 +760,7 @@ class StackSerializer(serializers.JSONResponseSerializer):
self._populate_response_header(response,
result['stack']['links'][0]['href'],
201)
- response.body = six.b(self.to_json(result))
+ response.body = self.to_json(result).encode('latin-1')
return response
diff --git a/heat/api/openstack/v1/util.py b/heat/api/openstack/v1/util.py
index 3bf6dab13..70a22d420 100644
--- a/heat/api/openstack/v1/util.py
+++ b/heat/api/openstack/v1/util.py
@@ -11,24 +11,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
+import functools
+
from webob import exc
from heat.common.i18n import _
from heat.common import identifier
-def policy_enforce(handler):
- """Decorator that enforces policies.
-
- Checks the path matches the request context and enforce policy defined in
- policy.json or in policies.
-
- This is a handler method decorator.
- """
- return _policy_enforce(handler)
-
-
def registered_policy_enforce(handler):
"""Decorator that enforces policies.
@@ -37,19 +27,20 @@ def registered_policy_enforce(handler):
This is a handler method decorator.
"""
- return _policy_enforce(handler, is_registered_policy=True)
-
-
-def _policy_enforce(handler, is_registered_policy=False):
- @six.wraps(handler)
+ @functools.wraps(handler)
def handle_stack_method(controller, req, tenant_id, **kwargs):
- if req.context.tenant_id != tenant_id and not req.context.is_admin:
+ _target = {"project_id": tenant_id}
+
+ if req.context.tenant_id != tenant_id and not (
+ req.context.is_admin or
+ req.context.system_scope == all):
raise exc.HTTPForbidden()
allowed = req.context.policy.enforce(
context=req.context,
action=handler.__name__,
scope=controller.REQUEST_SCOPE,
- is_registered_policy=is_registered_policy)
+ target=_target,
+ is_registered_policy=True)
if not allowed:
raise exc.HTTPForbidden()
return handler(controller, req, **kwargs)
@@ -57,35 +48,24 @@ def _policy_enforce(handler, is_registered_policy=False):
return handle_stack_method
-def identified_stack(handler):
- """Decorator that passes a stack identifier instead of path components.
-
- This is a handler method decorator.
- """
-
- return _identified_stack(handler)
-
-
def registered_identified_stack(handler):
"""Decorator that passes a stack identifier instead of path components.
- This is a handler method decorator.
+ This is a handler method decorator. Policy is enforced using a registered
+ policy name.
"""
-
- return _identified_stack(handler, is_registered_policy=True)
+ return registered_policy_enforce(_identified_stack(handler))
-def _identified_stack(handler, is_registered_policy=False):
-
- @six.wraps(handler)
+def _identified_stack(handler):
+ @functools.wraps(handler)
def handle_stack_method(controller, req, stack_name, stack_id, **kwargs):
stack_identity = identifier.HeatIdentifier(req.context.tenant_id,
stack_name,
stack_id)
return handler(controller, req, dict(stack_identity), **kwargs)
- return _policy_enforce(handle_stack_method,
- is_registered_policy=is_registered_policy)
+ return handle_stack_method
def make_url(req, identity):
@@ -111,22 +91,22 @@ PARAM_TYPES = (
)
-def get_allowed_params(params, whitelist):
- """Extract from ``params`` all entries listed in ``whitelist``.
+def get_allowed_params(params, param_types):
+ """Extract from ``params`` all entries listed in ``param_types``.
The returning dict will contain an entry for a key if, and only if,
- there's an entry in ``whitelist`` for that key and at least one entry in
+ there's an entry in ``param_types`` for that key and at least one entry in
``params``. If ``params`` contains multiple entries for the same key, it
will yield an array of values: ``{key: [v1, v2,...]}``
:param params: a NestedMultiDict from webob.Request.params
- :param whitelist: an array of strings to whitelist
+ :param param_types: an dict of allowed parameters and their types
:returns: a dict with {key: value} pairs
"""
allowed_params = {}
- for key, get_type in six.iteritems(whitelist):
+ for key, get_type in param_types.items():
assert get_type in PARAM_TYPES
value = None
diff --git a/heat/api/openstack/v1/views/views_common.py b/heat/api/openstack/v1/views/views_common.py
index 030980066..c5514e4ba 100644
--- a/heat/api/openstack/v1/views/views_common.py
+++ b/heat/api/openstack/v1/views/views_common.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
def get_collection_links(request, items):
diff --git a/heat/api/versions.py b/heat/api/versions.py
index 41cbf2699..c49b0ec76 100644
--- a/heat/api/versions.py
+++ b/heat/api/versions.py
@@ -13,9 +13,9 @@
"""Controller that returns information on the heat API versions."""
+import http.client
+
from oslo_serialization import jsonutils
-import six
-from six.moves import http_client
import webob.dec
@@ -43,11 +43,11 @@ class Controller(object):
body = jsonutils.dumps(dict(versions=version_objs))
response = webob.Response(request=req,
- status=http_client.MULTIPLE_CHOICES,
+ status=http.client.MULTIPLE_CHOICES,
content_type='application/json')
# NOTE(pas-ha) in WebOb, Response.body accepts only bytes,
# and Response.text accepts only unicode.
- response.text = six.text_type(body)
+ response.text = str(body)
return response
diff --git a/heat/cmd/all.py b/heat/cmd/all.py
index 9521af06e..d9c7dd0d6 100644
--- a/heat/cmd/all.py
+++ b/heat/cmd/all.py
@@ -15,27 +15,34 @@
An OpenStack Heat server that can run all services.
"""
+
+# flake8: noqa: E402
+
import eventlet
eventlet.monkey_patch(os=False)
-
-import six
+# Monkey patch the original current_thread to use the up-to-date _active
+# global variable. See https://bugs.launchpad.net/bugs/1863021 and
+# https://github.com/eventlet/eventlet/issues/592
+import __original_module_threading as orig_threading
+import threading # noqa
+orig_threading.current_thread.__globals__['_active'] = threading._active
import sys
+from oslo_config import cfg
+import oslo_i18n as i18n
+from oslo_log import log as logging
+from oslo_service import systemd
+
from heat.cmd import api
from heat.cmd import api_cfn
from heat.cmd import engine
from heat.common import config
from heat.common import messaging
from heat import version
-from oslo_config import cfg
-import oslo_i18n as i18n
-from oslo_log import log as logging
-from oslo_service import systemd
-i18n.enable_lazy()
-LOG = logging.getLogger('heat.all')
+i18n.enable_lazy()
API_LAUNCH_OPTS = {'setup_logging': False}
@@ -66,15 +73,15 @@ def _start_service_threads(services):
def launch_all(setup_logging=True):
- if setup_logging:
- logging.register_options(cfg.CONF)
- cfg.CONF(project='heat', prog='heat-all',
- version=version.version_info.version_string())
- if setup_logging:
- logging.setup(cfg.CONF, 'heat-all')
- config.set_config_defaults()
- messaging.setup()
- return _start_service_threads(set(cfg.CONF.heat_all.enabled_services))
+ if setup_logging:
+ logging.register_options(cfg.CONF)
+ cfg.CONF(project='heat', prog='heat-all',
+ version=version.version_info.version_string())
+ if setup_logging:
+ logging.setup(cfg.CONF, 'heat-all')
+ config.set_config_defaults()
+ messaging.setup()
+ return _start_service_threads(set(cfg.CONF.heat_all.enabled_services))
def main():
@@ -84,5 +91,5 @@ def main():
systemd.notify_once()
[service.wait() for service in services]
except RuntimeError as e:
- msg = six.text_type(e)
+ msg = str(e)
sys.exit("ERROR: %s" % msg)
diff --git a/heat/cmd/api.py b/heat/cmd/api.py
index 5a03682fb..4ab22b0d6 100644
--- a/heat/cmd/api.py
+++ b/heat/cmd/api.py
@@ -16,8 +16,16 @@
An OpenStack ReST API to Heat.
"""
+# flake8: noqa: E402
+
import eventlet
eventlet.monkey_patch(os=False)
+# Monkey patch the original current_thread to use the up-to-date _active
+# global variable. See https://bugs.launchpad.net/bugs/1863021 and
+# https://github.com/eventlet/eventlet/issues/592
+import __original_module_threading as orig_threading
+import threading # noqa
+orig_threading.current_thread.__globals__['_active'] = threading._active
import sys
@@ -26,7 +34,6 @@ import oslo_i18n as i18n
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from oslo_service import systemd
-import six
from heat.common import config
from heat.common import messaging
@@ -34,30 +41,32 @@ from heat.common import profiler
from heat.common import wsgi
from heat import version
+
i18n.enable_lazy()
-LOG = logging.getLogger('heat.api')
+CONF = cfg.CONF
def launch_api(setup_logging=True):
if setup_logging:
- logging.register_options(cfg.CONF)
- cfg.CONF(project='heat', prog='heat-api',
- version=version.version_info.version_string())
+ logging.register_options(CONF)
+ CONF(project='heat', prog='heat-api',
+ version=version.version_info.version_string())
if setup_logging:
- logging.setup(cfg.CONF, 'heat-api')
+ logging.setup(CONF, CONF.prog)
+ LOG = logging.getLogger(CONF.prog)
config.set_config_defaults()
messaging.setup()
app = config.load_paste_app()
- port = cfg.CONF.heat_api.bind_port
- host = cfg.CONF.heat_api.bind_host
+ port = CONF.heat_api.bind_port
+ host = CONF.heat_api.bind_host
LOG.info('Starting Heat REST API on %(host)s:%(port)s',
{'host': host, 'port': port})
- profiler.setup('heat-api', host)
+ profiler.setup(CONF.prog, host)
gmr.TextGuruMeditation.setup_autorun(version)
- server = wsgi.Server('heat-api', cfg.CONF.heat_api)
+ server = wsgi.Server(CONF.prog, CONF.heat_api)
server.start(app, default_port=port)
return server
@@ -68,5 +77,5 @@ def main():
systemd.notify_once()
server.wait()
except RuntimeError as e:
- msg = six.text_type(e)
+ msg = str(e)
sys.exit("ERROR: %s" % msg)
diff --git a/heat/cmd/api_cfn.py b/heat/cmd/api_cfn.py
index b74e304a9..2a44605e5 100644
--- a/heat/cmd/api_cfn.py
+++ b/heat/cmd/api_cfn.py
@@ -18,8 +18,16 @@ translates it into a native representation. It then calls the heat-engine via
AMQP RPC to implement them.
"""
+# flake8: noqa: E402
+
import eventlet
eventlet.monkey_patch(os=False)
+# Monkey patch the original current_thread to use the up-to-date _active
+# global variable. See https://bugs.launchpad.net/bugs/1863021 and
+# https://github.com/eventlet/eventlet/issues/592
+import __original_module_threading as orig_threading
+import threading # noqa
+orig_threading.current_thread.__globals__['_active'] = threading._active
import sys
@@ -28,7 +36,6 @@ import oslo_i18n as i18n
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from oslo_service import systemd
-import six
from heat.common import config
from heat.common import messaging
@@ -36,32 +43,34 @@ from heat.common import profiler
from heat.common import wsgi
from heat import version
+
i18n.enable_lazy()
-LOG = logging.getLogger('heat.api.cfn')
+CONF = cfg.CONF
def launch_cfn_api(setup_logging=True):
if setup_logging:
- logging.register_options(cfg.CONF)
- cfg.CONF(project='heat',
- prog='heat-api-cfn',
- version=version.version_info.version_string())
+ logging.register_options(CONF)
+ CONF(project='heat',
+ prog='heat-api-cfn',
+ version=version.version_info.version_string())
if setup_logging:
- logging.setup(cfg.CONF, 'heat-api-cfn')
+ logging.setup(CONF, CONF.prog)
logging.set_defaults()
+ LOG = logging.getLogger(CONF.prog)
config.set_config_defaults()
messaging.setup()
app = config.load_paste_app()
- port = cfg.CONF.heat_api_cfn.bind_port
- host = cfg.CONF.heat_api_cfn.bind_host
+ port = CONF.heat_api_cfn.bind_port
+ host = CONF.heat_api_cfn.bind_host
LOG.info('Starting Heat API on %(host)s:%(port)s',
{'host': host, 'port': port})
- profiler.setup('heat-api-cfn', host)
+ profiler.setup(CONF.prog, host)
gmr.TextGuruMeditation.setup_autorun(version)
- server = wsgi.Server('heat-api-cfn', cfg.CONF.heat_api_cfn)
+ server = wsgi.Server(CONF.prog, CONF.heat_api_cfn)
server.start(app, default_port=port)
return server
@@ -72,5 +81,5 @@ def main():
systemd.notify_once()
server.wait()
except RuntimeError as e:
- msg = six.text_type(e)
+ msg = str(e)
sys.exit("ERROR: %s" % msg)
diff --git a/heat/cmd/engine.py b/heat/cmd/engine.py
index d5af64b56..b969a4c22 100644
--- a/heat/cmd/engine.py
+++ b/heat/cmd/engine.py
@@ -18,8 +18,16 @@ Normal communications is done via the heat API which then calls into this
engine.
"""
+# flake8: noqa: E402
+
import eventlet
eventlet.monkey_patch()
+# Monkey patch the original current_thread to use the up-to-date _active
+# global variable. See https://bugs.launchpad.net/bugs/1863021 and
+# https://github.com/eventlet/eventlet/issues/592
+import __original_module_threading as orig_threading
+import threading # noqa
+orig_threading.current_thread.__globals__['_active'] = threading._active
import sys
@@ -37,19 +45,21 @@ from heat.engine import template
from heat.rpc import api as rpc_api
from heat import version
+
i18n.enable_lazy()
-LOG = logging.getLogger('heat.engine')
+CONF = cfg.CONF
def launch_engine(setup_logging=True):
if setup_logging:
- logging.register_options(cfg.CONF)
- cfg.CONF(project='heat', prog='heat-engine',
- version=version.version_info.version_string())
+ logging.register_options(CONF)
+ CONF(project='heat', prog='heat-engine',
+ version=version.version_info.version_string())
if setup_logging:
- logging.setup(cfg.CONF, 'heat-engine')
+ logging.setup(CONF, CONF.prog)
logging.set_defaults()
+ LOG = logging.getLogger(CONF.prog)
messaging.setup()
config.startup_sanity_check()
@@ -64,14 +74,14 @@ def launch_engine(setup_logging=True):
from heat.engine import service as engine # noqa
- profiler.setup('heat-engine', cfg.CONF.host)
+ profiler.setup(CONF.prog, CONF.host)
gmr.TextGuruMeditation.setup_autorun(version)
- srv = engine.EngineService(cfg.CONF.host, rpc_api.ENGINE_TOPIC)
- workers = cfg.CONF.num_engine_workers
+ srv = engine.EngineService(CONF.host, rpc_api.ENGINE_TOPIC)
+ workers = CONF.num_engine_workers
if not workers:
workers = max(4, processutils.get_worker_count())
- launcher = service.launch(cfg.CONF, srv, workers=workers,
+ launcher = service.launch(CONF, srv, workers=workers,
restart_method='mutate')
return launcher
diff --git a/heat/cmd/manage.py b/heat/cmd/manage.py
index 468132d80..c853d9ac2 100644
--- a/heat/cmd/manage.py
+++ b/heat/cmd/manage.py
@@ -19,7 +19,6 @@ import sys
from oslo_config import cfg
from oslo_log import log
-from six import moves
from heat.common import context
from heat.common import exception
@@ -106,7 +105,7 @@ def do_reset_stack_status():
"intended to recover from specific crashes."))
print(_("It is advised to shutdown all Heat engines beforehand."))
print(_("Continue ? [y/N]"))
- data = moves.input()
+ data = input()
if not data.lower().startswith('y'):
return
ctxt = context.get_admin_context()
@@ -186,7 +185,7 @@ def add_command_parsers(subparsers):
parser.add_argument(
'-b', '--batch_size', default='20',
help=_('Number of stacks to delete at a time (per transaction). '
- 'Note that a single stack may have many db rows '
+ 'Note that a single stack may have many DB rows '
'(events, etc.) associated with it.'))
# update_params parser
@@ -224,6 +223,7 @@ def add_command_parsers(subparsers):
ServiceManageCommand.add_service_parsers(subparsers)
+
command_opt = cfg.SubCommandOpt('command',
title='Commands',
help=_('Show available commands.'),
diff --git a/heat/cmd/status.py b/heat/cmd/status.py
index 9fb60efc2..3281176f1 100644
--- a/heat/cmd/status.py
+++ b/heat/cmd/status.py
@@ -50,5 +50,6 @@ def main():
return upgradecheck.main(
cfg.CONF, project='heat', upgrade_command=Checks())
+
if __name__ == '__main__':
sys.exit(main())
diff --git a/heat/common/config.py b/heat/common/config.py
index 1e38247a5..1c03b6f1b 100644
--- a/heat/common/config.py
+++ b/heat/common/config.py
@@ -16,6 +16,7 @@ import os
from eventlet.green import socket
from oslo_config import cfg
+from oslo_db import options as oslo_db_ops
from oslo_log import log as logging
from oslo_middleware import cors
from osprofiler import opts as profiler
@@ -60,6 +61,13 @@ service_opts = [
'SSL is used.')),
cfg.StrOpt('region_name_for_services',
help=_('Default region name used to get services endpoints.')),
+ cfg.StrOpt('region_name_for_shared_services',
+ help=_('Region name for shared services endpoints.')),
+ cfg.ListOpt('shared_services_types',
+ default=['image', 'volume', 'volumev2'],
+ help=_('The shared services located in the other region.'
+ 'Needs region_name_for_shared_services option to '
+ 'be set for this to take effect.')),
cfg.StrOpt('heat_stack_user_role',
default="heat_stack_user",
help=_('Keystone role for heat template-defined users.')),
@@ -88,7 +96,14 @@ service_opts = [
cfg.IntOpt('num_engine_workers',
help=_('Number of heat-engine processes to fork and run. '
'Will default to either to 4 or number of CPUs on '
- 'the host, whichever is greater.'))]
+ 'the host, whichever is greater.')),
+ cfg.StrOpt('server_keystone_endpoint_type',
+ choices=['', 'public', 'internal', 'admin'],
+ default='',
+ help=_('If set, is used to control which authentication '
+ 'endpoint is used by user-controlled servers to make '
+ 'calls back to Heat. '
+ 'If unset www_authenticate_uri is used.'))]
engine_opts = [
cfg.ListOpt('plugin_dirs',
@@ -169,6 +184,11 @@ engine_opts = [
'this limitation, any nova feature supported with '
'microversion number above max_nova_api_microversion '
'will not be available.')),
+ cfg.FloatOpt('max_ironic_api_microversion',
+ help=_('Maximum ironic API version for client plugin. With '
+ 'this limitation, any ironic feature supported with '
+ 'microversion number above '
+ 'max_ironic_api_microversion will not be available.')),
cfg.IntOpt('event_purge_batch_size',
min=1,
default=200,
@@ -441,7 +461,7 @@ def list_opts():
for client in ('aodh', 'barbican', 'cinder', 'designate',
'glance', 'heat', 'keystone', 'magnum', 'manila', 'mistral',
'monasca', 'neutron', 'nova', 'octavia', 'sahara', 'senlin',
- 'swift', 'trove', 'zaqar'
+ 'swift', 'trove', 'vitrage', 'zaqar'
):
client_specific_group = 'clients_' + client
yield client_specific_group, clients_opts
@@ -450,6 +470,7 @@ def list_opts():
yield 'clients_keystone', keystone_client_opts
yield 'clients_nova', client_http_log_debug_opts
yield 'clients_cinder', client_http_log_debug_opts
+ yield oslo_db_ops.list_opts()[0]
cfg.CONF.register_group(paste_deploy_group)
diff --git a/heat/common/context.py b/heat/common/context.py
index 55025e918..64eef397a 100644
--- a/heat/common/context.py
+++ b/heat/common/context.py
@@ -11,6 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
+
from keystoneauth1 import access
from keystoneauth1.identity import access as access_plugin
from keystoneauth1.identity import generic
@@ -23,7 +25,6 @@ from oslo_log import log as logging
import oslo_messaging
from oslo_middleware import request_id as oslo_request_id
from oslo_utils import importutils
-import six
from heat.common import config
from heat.common import endpoint_utils
@@ -36,6 +37,8 @@ from heat.engine import clients
LOG = logging.getLogger(__name__)
+cfg.CONF.import_opt('client_retry_limit', 'heat.common.config')
+
# Note, we yield the options via list_opts to enable generation of the
# sample heat.conf, but we don't register these options directly via
# cfg.CONF.register*, it's done via ks_loading.register_auth_conf_options
@@ -103,6 +106,8 @@ class RequestContext(context.RequestContext):
self.username = username
self.password = password
+ if username is None and password is None:
+ self.username = self.user_name
self.region_name = region_name
self.aws_creds = aws_creds
self.project_name = project_name
@@ -111,6 +116,7 @@ class RequestContext(context.RequestContext):
self._session = None
self._clients = None
self._keystone_session = session.Session(
+ connect_retries=cfg.CONF.client_retry_limit,
**config.get_ssl_options('keystone'))
self.trust_id = trust_id
self.trustor_user_id = trustor_user_id
@@ -404,7 +410,7 @@ def ContextMiddleware_filter_factory(global_conf, **local_conf):
def request_context(func):
- @six.wraps(func)
+ @functools.wraps(func)
def wrapped(self, ctx, *args, **kwargs):
try:
return func(self, ctx, *args, **kwargs)
diff --git a/heat/common/crypt.py b/heat/common/crypt.py
index b3afc7761..ef12dc9f7 100644
--- a/heat/common/crypt.py
+++ b/heat/common/crypt.py
@@ -46,7 +46,7 @@ class SymmetricCrypto(object):
Note: This is a reimplementation of the decryption algorithm from
oslo-incubator, and is provided for backward compatibility. Once we have a
- db migration script available to re-encrypt using new encryption method as
+ DB migration script available to re-encrypt using new encryption method as
part of upgrade, this can be removed.
:param enctype: Encryption Cipher name (default: AES)
@@ -125,7 +125,12 @@ def decrypted_dict(data, encryption_key=None):
return return_data
for prop_name, prop_value in data.items():
method, value = prop_value
- decrypted_value = decrypt(method, value, encryption_key)
+ try:
+ decrypted_value = decrypt(method, value, encryption_key)
+ except UnicodeDecodeError:
+ # The dict contained valid JSON on the way in, so if what comes
+ # out is garbage then the key was incorrect.
+ raise exception.InvalidEncryptionKey()
prop_string = jsonutils.loads(decrypted_value)
return_data[prop_name] = prop_string
return return_data
diff --git a/heat/common/environment_util.py b/heat/common/environment_util.py
index f060ca019..8a6ed4393 100644
--- a/heat/common/environment_util.py
+++ b/heat/common/environment_util.py
@@ -13,7 +13,6 @@
import collections
from oslo_serialization import jsonutils
-import six
from heat.common import environment_format as env_fmt
from heat.common import exception
@@ -58,14 +57,14 @@ def merge_map(old, new, deep_merge=False):
if v is not None:
if not deep_merge:
old[k] = v
- elif isinstance(v, collections.Mapping):
+ elif isinstance(v, collections.abc.Mapping):
old_v = old.get(k)
old[k] = merge_map(old_v, v, deep_merge) if old_v else v
- elif (isinstance(v, collections.Sequence) and
- not isinstance(v, six.string_types)):
+ elif (isinstance(v, collections.abc.Sequence) and
+ not isinstance(v, str)):
old_v = old.get(k)
old[k] = merge_list(old_v, v) if old_v else v
- elif isinstance(v, six.string_types):
+ elif isinstance(v, str):
old[k] = ''.join([old.get(k, ''), v])
else:
old[k] = v
@@ -76,14 +75,14 @@ def merge_map(old, new, deep_merge=False):
def parse_param(p_val, p_schema):
try:
if p_schema.type == p_schema.MAP:
- if not isinstance(p_val, six.string_types):
+ if not isinstance(p_val, str):
p_val = jsonutils.dumps(p_val)
if p_val:
return jsonutils.loads(p_val)
- elif not isinstance(p_val, collections.Sequence):
+ elif not isinstance(p_val, collections.abc.Sequence):
raise ValueError()
except (ValueError, TypeError) as err:
- msg = _("Invalid parameter in environment %s.") % six.text_type(err)
+ msg = _("Invalid parameter in environment %s.") % str(err)
raise ValueError(msg)
return p_val
diff --git a/heat/common/exception.py b/heat/common/exception.py
index 48faa10ba..ecb809eb5 100644
--- a/heat/common/exception.py
+++ b/heat/common/exception.py
@@ -21,8 +21,6 @@ import sys
from oslo_log import log as logging
from oslo_utils import excutils
-import six
-
from heat.common.i18n import _
_FATAL_EXCEPTION_FORMAT_ERRORS = False
@@ -38,7 +36,6 @@ ERROR_CODE_MAP = {
}
-@six.python_2_unicode_compatible
class HeatException(Exception):
"""Base Heat Exception.
@@ -71,7 +68,7 @@ class HeatException(Exception):
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception('Exception in string format operation')
- for name, value in six.iteritems(kwargs):
+ for name, value in kwargs.items():
LOG.error("%(name)s: %(value)s",
{'name': name, 'value': value}) # noqa
@@ -222,7 +219,7 @@ class HeatExceptionWithPath(HeatException):
if path is not None:
if isinstance(path, list):
self.path = path
- elif isinstance(path, six.string_types):
+ elif isinstance(path, str):
self.path = [path]
result_path = ''
@@ -247,7 +244,7 @@ class StackValidationFailed(HeatExceptionWithPath):
resource=None):
if path is None:
path = []
- elif isinstance(path, six.string_types):
+ elif isinstance(path, str):
path = [path]
if resource is not None and not path:
@@ -263,8 +260,8 @@ class StackValidationFailed(HeatExceptionWithPath):
# oslo.messaging.
self.args = error.args
else:
- str_error = six.text_type(type(error).__name__)
- message = six.text_type(error)
+ str_error = str(type(error).__name__)
+ message = str(error)
else:
str_error = error
@@ -331,12 +328,12 @@ class ResourceFailure(HeatExceptionWithPath):
path = exception_or_error.path
else:
self.exc = exception_or_error
- error = six.text_type(type(self.exc).__name__)
- message = six.text_type(self.exc)
+ error = str(type(self.exc).__name__)
+ message = str(self.exc)
path = res_path
else:
self.exc = None
- res_failed = 'Resource %s failed: ' % action.upper()
+ res_failed = 'Resource %s failed: ' % self.action.upper()
if res_failed in exception_or_error:
(error, message, new_path) = self._from_status_reason(
exception_or_error)
@@ -418,7 +415,7 @@ class UpdateReplace(Exception):
"""Raised when resource update requires replacement."""
def __init__(self, resource_name='Unknown'):
msg = _("The Resource %s requires replacement.") % resource_name
- super(Exception, self).__init__(six.text_type(msg))
+ super(Exception, self).__init__(str(msg))
class ResourceUnknownStatus(HeatException):
@@ -443,7 +440,7 @@ class ResourceInError(HeatException):
class UpdateInProgress(Exception):
def __init__(self, resource_name='Unknown'):
msg = _("The resource %s is already being updated.") % resource_name
- super(Exception, self).__init__(six.text_type(msg))
+ super(Exception, self).__init__(str(msg))
class HTTPExceptionDisguise(Exception):
diff --git a/heat/common/grouputils.py b/heat/common/grouputils.py
index d62a20c82..69e3c53fd 100644
--- a/heat/common/grouputils.py
+++ b/heat/common/grouputils.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import status
@@ -122,7 +120,7 @@ def get_members(group, include_failed=False):
"""
resources = []
if group.nested():
- resources = [r for r in six.itervalues(group.nested())
+ resources = [r for r in group.nested().values()
if include_failed or r.status != r.FAILED]
return sorted(resources,
diff --git a/heat/common/i18n.py b/heat/common/i18n.py
index e2c423cb0..2b3cc178e 100644
--- a/heat/common/i18n.py
+++ b/heat/common/i18n.py
@@ -17,29 +17,10 @@
# recommendations from
# https://docs.openstack.org/oslo.i18n/latest/user/usage.html
-import six
-
import oslo_i18n as i18n
-from oslo_utils import encodeutils
_translators = i18n.TranslatorFactory(domain='heat')
# The primary translation function using the well-known name "_"
_ = _translators.primary
-
-
-def repr_wrapper(klass):
- """A decorator that defines __repr__ method under Python 2.
-
- Under Python 2 it will encode repr return value to str type.
- Under Python 3 it does nothing.
- """
- if six.PY2:
- if '__repr__' not in klass.__dict__:
- raise ValueError("@repr_wrapper cannot be applied "
- "to %s because it doesn't define __repr__()." %
- klass.__name__)
- klass._repr = klass.__repr__
- klass.__repr__ = lambda self: encodeutils.safe_encode(self._repr())
- return klass
diff --git a/heat/common/identifier.py b/heat/common/identifier.py
index 6d39569a6..02628b478 100644
--- a/heat/common/identifier.py
+++ b/heat/common/identifier.py
@@ -15,12 +15,12 @@ import collections
import re
from oslo_utils import encodeutils
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
from heat.common.i18n import _
-class HeatIdentifier(collections.Mapping):
+class HeatIdentifier(collections.abc.Mapping):
FIELDS = (
TENANT, STACK_NAME, STACK_ID, PATH
diff --git a/heat/common/lifecycle_plugin_utils.py b/heat/common/lifecycle_plugin_utils.py
index c1a4bd007..48745889e 100644
--- a/heat/common/lifecycle_plugin_utils.py
+++ b/heat/common/lifecycle_plugin_utils.py
@@ -30,7 +30,7 @@ def get_plug_point_class_instances():
The list of class instances is sorted using get_ordinal methods
on the plug point classes. If class1.ordinal() < class2.ordinal(),
- then class1 will be before before class2 in the list.
+ then class1 will be before class2 in the list.
"""
global pp_class_instances
if pp_class_instances is None:
diff --git a/heat/common/password_gen.py b/heat/common/password_gen.py
index 52d5a3a24..0a5c33384 100644
--- a/heat/common/password_gen.py
+++ b/heat/common/password_gen.py
@@ -12,11 +12,10 @@
# under the License.
import collections
+import io
import random as random_module
import string
-import six
-
# NOTE(pas-ha) Heat officially supports only POSIX::Linux platform
# where os.urandom() and random.SystemRandom() are available
@@ -88,19 +87,19 @@ def generate_password(length, char_classes):
:param char_classes: Iterable over classes of characters from which to
generate a password
"""
- char_buffer = six.StringIO()
+ char_buffer = io.StringIO()
all_allowed_chars = set()
# Add the minimum number of chars from each char class
for char_class in char_classes:
all_allowed_chars |= char_class.allowed_chars
allowed_chars = tuple(char_class.allowed_chars)
- for i in six.moves.xrange(char_class.min_count):
+ for i in range(char_class.min_count):
char_buffer.write(random.choice(allowed_chars))
# Fill up rest with random chars from provided classes
combined_chars = tuple(all_allowed_chars)
- for i in six.moves.xrange(max(0, length - char_buffer.tell())):
+ for i in range(max(0, length - char_buffer.tell())):
char_buffer.write(random.choice(combined_chars))
# Shuffle string
diff --git a/heat/common/plugin_loader.py b/heat/common/plugin_loader.py
index 58f3736fc..59f75df15 100644
--- a/heat/common/plugin_loader.py
+++ b/heat/common/plugin_loader.py
@@ -19,12 +19,12 @@ existing package tree, use create_subpackage() to dynamically create a package
for them before loading them.
"""
+import functools
import pkgutil
import sys
import types
from oslo_log import log as logging
-import six
LOG = logging.getLogger(__name__)
@@ -46,7 +46,7 @@ def create_subpackage(path, parent_package_name, subpackage_name="plugins"):
package_name = _module_name(parent_package_name, subpackage_name)
package = types.ModuleType(package_name)
- package.__path__ = ([path] if isinstance(path, six.string_types)
+ package.__path__ = ([path] if isinstance(path, str)
else list(path))
sys.modules[package_name] = package
@@ -75,7 +75,7 @@ def _import_module(importer, module_name, package):
# Make this accessible through the parent package for static imports
local_name = module_name.partition(package.__name__ + '.')[2]
module_components = local_name.split('.')
- parent = six.moves.reduce(getattr, module_components[:-1], package)
+ parent = functools.reduce(getattr, module_components[:-1], package)
setattr(parent, module_components[-1], module)
return module
diff --git a/heat/common/pluginutils.py b/heat/common/pluginutils.py
index 4820a69bb..c4da0ec06 100644
--- a/heat/common/pluginutils.py
+++ b/heat/common/pluginutils.py
@@ -12,7 +12,6 @@
# under the License.
from oslo_log import log as logging
-import six
LOG = logging.getLogger(__name__)
@@ -21,7 +20,7 @@ LOG = logging.getLogger(__name__)
def log_fail_msg(manager, entrypoint, exception):
LOG.warning('Encountered exception while loading %(module_name)s: '
'"%(message)s". Not using %(name)s.',
- {'module_name': entrypoint.module_name,
+ {'module_name': entrypoint.module,
'message': getattr(exception, 'message',
- six.text_type(exception)),
+ str(exception)),
'name': entrypoint.name})
diff --git a/heat/common/policy.py b/heat/common/policy.py
index 312608c93..38971e001 100644
--- a/heat/common/policy.py
+++ b/heat/common/policy.py
@@ -21,7 +21,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from oslo_utils import excutils
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -147,11 +146,11 @@ class ResourceEnforcer(Enforcer):
result = super(ResourceEnforcer, self).enforce(
context, res_type,
scope=scope or 'resource_types',
- target=target, is_registered_policy=is_registered_policy)
+ target=target, is_registered_policy=is_registered_policy)
except policy.PolicyNotRegistered:
result = True
except self.exc as ex:
- LOG.info(six.text_type(ex))
+ LOG.info(str(ex))
raise
if not result:
if self.exc:
diff --git a/heat/common/serializers.py b/heat/common/serializers.py
index 3125ba98b..a9ccd9576 100644
--- a/heat/common/serializers.py
+++ b/heat/common/serializers.py
@@ -23,7 +23,6 @@ import datetime
from lxml import etree
from oslo_log import log as logging
from oslo_serialization import jsonutils
-import six
LOG = logging.getLogger(__name__)
@@ -34,7 +33,7 @@ class JSONResponseSerializer(object):
def sanitizer(obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
- return six.text_type(obj)
+ return str(obj)
response = jsonutils.dumps(data, default=sanitizer)
@@ -46,7 +45,7 @@ class JSONResponseSerializer(object):
def default(self, response, result):
response.content_type = 'application/json'
- response.body = six.b(self.to_json(result))
+ response.body = self.to_json(result).encode('latin-1')
# Escape XML serialization for these keys, as the AWS API defines them as
@@ -75,11 +74,11 @@ class XMLResponseSerializer(object):
else:
self.object_to_element(value, subelement)
else:
- element.text = six.text_type(obj)
+ element.text = str(obj)
def to_xml(self, data):
# Assumption : root node is dict with single key
- root = next(six.iterkeys(data))
+ root = next(iter(data.keys()))
eltree = etree.Element(root)
self.object_to_element(data.get(root), eltree)
response = etree.tostring(eltree)
diff --git a/heat/common/service_utils.py b/heat/common/service_utils.py
index b5cf00485..a59fef8e8 100644
--- a/heat/common/service_utils.py
+++ b/heat/common/service_utils.py
@@ -51,14 +51,10 @@ def format_service(service):
return
status = 'down'
- if service.updated_at is not None:
- if ((timeutils.utcnow() - service.updated_at).total_seconds()
- <= service.report_interval):
- status = 'up'
- else:
- if ((timeutils.utcnow() - service.created_at).total_seconds()
- <= service.report_interval):
- status = 'up'
+ last_updated = service.updated_at or service.created_at
+ check_interval = (timeutils.utcnow() - last_updated).total_seconds()
+ if check_interval <= 2 * service.report_interval:
+ status = 'up'
result = {
SERVICE_ID: service.id,
diff --git a/heat/common/short_id.py b/heat/common/short_id.py
index ef4acd0bb..54221d336 100644
--- a/heat/common/short_id.py
+++ b/heat/common/short_id.py
@@ -19,8 +19,6 @@ The IDs each comprise 12 (lower-case) alphanumeric characters.
import base64
import uuid
-import six
-
from heat.common.i18n import _
@@ -30,12 +28,12 @@ def _to_byte_string(value, num_bits):
Padding is added at the end (i.e. after the least-significant bit) if
required.
"""
- shifts = six.moves.xrange(num_bits - 8, -8, -8)
+ shifts = range(num_bits - 8, -8, -8)
def byte_at(off):
return (value >> off if off >= 0 else value << -off) & 0xff
- return b''.join(six.int2byte(byte_at(offset)) for offset in shifts)
+ return b''.join(bytes((byte_at(offset),)) for offset in shifts)
def get_id(source_uuid):
@@ -43,7 +41,7 @@ def get_id(source_uuid):
The supplied UUID must be a version 4 UUID object.
"""
- if isinstance(source_uuid, six.string_types):
+ if isinstance(source_uuid, str):
source_uuid = uuid.UUID(source_uuid)
if source_uuid.version != 4:
raise ValueError(_('Invalid UUID version (%d)') % source_uuid.version)
diff --git a/heat/common/template_format.py b/heat/common/template_format.py
index 282487889..9978bf0ab 100644
--- a/heat/common/template_format.py
+++ b/heat/common/template_format.py
@@ -15,7 +15,6 @@ import collections
from oslo_config import cfg
from oslo_serialization import jsonutils
-import six
import yaml
from heat.common import exception
@@ -73,7 +72,7 @@ def simple_parse(tmpl_str, tmpl_url=None):
except yaml.YAMLError as yea:
if tmpl_url is None:
tmpl_url = '[root stack]'
- yea = six.text_type(yea)
+ yea = str(yea)
msg = _('Error parsing template %(tmpl)s '
'%(yea)s') % {'tmpl': tmpl_url, 'yea': yea}
raise ValueError(msg)
@@ -111,7 +110,7 @@ def parse(tmpl_str, tmpl_url=None):
# TODO(ricolin): Move this validation to api side.
# Validate nested stack template.
- validate_template_limit(six.text_type(tmpl_str))
+ validate_template_limit(str(tmpl_str))
tpl = simple_parse(tmpl_str, tmpl_url)
# Looking for supported version keys in the loaded template
@@ -136,7 +135,7 @@ def convert_json_to_yaml(json_str):
def top_level_items(tpl):
yield ("HeatTemplateFormatVersion", '2012-12-12')
- for k, v in six.iteritems(tpl):
+ for k, v in tpl.items():
if k != 'AWSTemplateFormatVersion':
yield k, v
diff --git a/heat/common/urlfetch.py b/heat/common/urlfetch.py
index 54cc90027..633c309c0 100644
--- a/heat/common/urlfetch.py
+++ b/heat/common/urlfetch.py
@@ -14,12 +14,14 @@
"""Utility for fetching a resource (e.g. a template) from a URL."""
import socket
+import urllib.error
+import urllib.parse
+import urllib.request
from oslo_config import cfg
from oslo_log import log as logging
import requests
from requests import exceptions
-from six.moves import urllib
from heat.common import exception
from heat.common.i18n import _
diff --git a/heat/common/wsgi.py b/heat/common/wsgi.py
index 15ec9e58e..7e63ee883 100644
--- a/heat/common/wsgi.py
+++ b/heat/common/wsgi.py
@@ -40,7 +40,6 @@ from oslo_utils import encodeutils
from oslo_utils import importutils
from paste.deploy import loadwsgi
from routes import middleware
-import six
import webob.dec
import webob.exc
@@ -264,7 +263,7 @@ def get_socket(conf, default_port):
backlog=conf.backlog,
family=address_family)
except socket.error as err:
- if err.args[0] != errno.EADDRINUSE:
+ if err.errno != errno.EADDRINUSE:
raise
eventlet.sleep(0.1)
if not sock:
@@ -585,7 +584,7 @@ class Server(object):
keepalive=cfg.CONF.eventlet_opts.wsgi_keep_alive,
socket_timeout=socket_timeout)
except socket.error as err:
- if err[0] != errno.EINVAL:
+ if err.errno != errno.EINVAL:
raise
self.pool.waitall()
@@ -648,7 +647,7 @@ class Debug(Middleware):
resp = req.get_response(self.application)
print(("*" * 40) + " RESPONSE HEADERS")
- for (key, value) in six.iteritems(resp.headers):
+ for (key, value) in resp.headers.items():
print(key, "=", value)
print('')
@@ -823,7 +822,7 @@ class JSONRequestDeserializer(object):
raise exception.RequestLimitExceeded(message=msg)
return jsonutils.loads(datastring)
except ValueError as ex:
- raise webob.exc.HTTPBadRequest(six.text_type(ex))
+ raise webob.exc.HTTPBadRequest(str(ex))
def default(self, request):
if self.has_body(request):
@@ -1004,8 +1003,7 @@ def translate_exception(exc, locale):
return exc
-@six.add_metaclass(abc.ABCMeta)
-class BasePasteFactory(object):
+class BasePasteFactory(object, metaclass=abc.ABCMeta):
"""A base class for paste app and filter factories.
Sub-classes must override the KEY class attribute and provide
diff --git a/heat/db/sqlalchemy/api.py b/heat/db/sqlalchemy/api.py
index 1642c33e6..3672ce036 100644
--- a/heat/db/sqlalchemy/api.py
+++ b/heat/db/sqlalchemy/api.py
@@ -13,6 +13,7 @@
"""Implementation of SQLAlchemy backend."""
import datetime
+import functools
import itertools
import random
@@ -24,9 +25,9 @@ from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import utils
from oslo_log import log as logging
from oslo_utils import encodeutils
+from oslo_utils import excutils
from oslo_utils import timeutils
import osprofiler.sqlalchemy
-import six
import sqlalchemy
from sqlalchemy import and_
from sqlalchemy import func
@@ -48,6 +49,12 @@ CONF = cfg.CONF
CONF.import_opt('hidden_stack_tags', 'heat.common.config')
CONF.import_opt('max_events_per_stack', 'heat.common.config')
CONF.import_group('profiler', 'heat.common.config')
+CONF.import_opt('db_max_retries', 'oslo_db.options', group='database')
+CONF.import_opt('db_retry_interval', 'oslo_db.options', group='database')
+CONF.import_opt(
+ 'db_inc_retry_interval', 'oslo_db.options', group='database')
+CONF.import_opt(
+ 'db_max_retry_interval', 'oslo_db.options', group='database')
options.set_defaults(CONF)
@@ -88,9 +95,33 @@ def get_session():
return get_facade().get_session()
+def retry_on_db_error(func):
+ @functools.wraps(func)
+ def try_func(context, *args, **kwargs):
+ if (context.session.transaction is None or
+ not context.session.autocommit):
+ wrapped = oslo_db_api.wrap_db_retry(
+ max_retries=CONF.database.db_max_retries,
+ retry_on_deadlock=True,
+ retry_on_disconnect=True,
+ retry_interval=CONF.database.db_retry_interval,
+ inc_retry_interval=CONF.database.db_inc_retry_interval,
+ max_retry_interval=CONF.database.db_max_retry_interval)(func)
+ return wrapped(context, *args, **kwargs)
+ else:
+ try:
+ return func(context, *args, **kwargs)
+ except (db_exception.DBDeadlock, db_exception.DBConnectionError):
+ with excutils.save_and_reraise_exception():
+ LOG.debug('Not retrying on DBDeadlock and '
+ 'DBConnectionError because '
+ 'transaction not closed')
+ return try_func
+
+
def update_and_save(context, obj, values):
with context.session.begin(subtransactions=True):
- for k, v in six.iteritems(values):
+ for k, v in values.items():
setattr(obj, k, v)
@@ -142,7 +173,11 @@ def raw_template_update(context, template_id, values):
def raw_template_delete(context, template_id):
- raw_template = raw_template_get(context, template_id)
+ try:
+ raw_template = raw_template_get(context, template_id)
+ except exception.NotFound:
+ # Ignore not found
+ return
raw_tmpl_files_id = raw_template.files_id
session = context.session
with session.begin(subtransactions=True):
@@ -153,7 +188,12 @@ def raw_template_delete(context, template_id):
# delete that too
if session.query(models.RawTemplate).filter_by(
files_id=raw_tmpl_files_id).first() is None:
- raw_tmpl_files = raw_template_files_get(context, raw_tmpl_files_id)
+ try:
+ raw_tmpl_files = raw_template_files_get(
+ context, raw_tmpl_files_id)
+ except exception.NotFound:
+ # Ignore not found
+ return
session.delete(raw_tmpl_files)
@@ -234,14 +274,13 @@ def resource_get_all(context):
return results
-@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
- retry_interval=0.5, inc_retry_interval=True)
+@retry_on_db_error
def resource_purge_deleted(context, stack_id):
filters = {'stack_id': stack_id, 'action': 'DELETE', 'status': 'COMPLETE'}
query = context.session.query(models.Resource)
result = query.filter_by(**filters)
attr_ids = [r.attr_data_id for r in result if r.attr_data_id is not None]
- with context.session.begin(subtransactions=True):
+ with context.session.begin():
result.delete()
if attr_ids:
context.session.query(models.ResourcePropertiesData).filter(
@@ -256,10 +295,15 @@ def _add_atomic_key_to_values(values, atomic_key):
values['atomic_key'] = atomic_key + 1
-@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
- retry_interval=0.5, inc_retry_interval=True)
+@retry_on_db_error
def resource_update(context, resource_id, values, atomic_key,
expected_engine_id=None):
+ return _try_resource_update(context, resource_id, values, atomic_key,
+ expected_engine_id)
+
+
+def _try_resource_update(context, resource_id, values, atomic_key,
+ expected_engine_id=None):
session = context.session
with session.begin(subtransactions=True):
_add_atomic_key_to_values(values, atomic_key)
@@ -289,7 +333,7 @@ def resource_delete(context, resource_id):
def resource_attr_id_set(context, resource_id, atomic_key, attr_id):
session = context.session
- with session.begin(subtransactions=True):
+ with session.begin():
values = {'attr_data_id': attr_id}
_add_atomic_key_to_values(values, atomic_key)
rows_updated = session.query(models.Resource).filter(and_(
@@ -304,7 +348,7 @@ def resource_attr_id_set(context, resource_id, atomic_key, attr_id):
else:
# Someone else set the attr_id first and/or we have a stale
# view of the resource based on atomic_key, so delete the
- # resource_properties_data (attr) db row.
+ # resource_properties_data (attr) DB row.
LOG.debug('Not updating res_id %(rid)s with attr_id %(aid)s',
{'rid': resource_id, 'aid': attr_id})
session.query(
@@ -315,7 +359,7 @@ def resource_attr_id_set(context, resource_id, atomic_key, attr_id):
def resource_attr_data_delete(context, resource_id, attr_id):
session = context.session
- with session.begin(subtransactions=True):
+ with session.begin():
resource = session.query(models.Resource).get(resource_id)
attr_prop_data = session.query(
models.ResourcePropertiesData).get(attr_id)
@@ -437,7 +481,7 @@ def resource_exchange_stacks(context, resource_id1, resource_id2):
def resource_data_delete(context, resource_id, key):
result = resource_data_get_by_key(context, resource_id, key)
session = context.session
- with session.begin(subtransactions=True):
+ with session.begin():
session.delete(result)
@@ -448,20 +492,21 @@ def resource_create(context, values):
return resource_ref
+@retry_on_db_error
def resource_create_replacement(context,
existing_res_id, existing_res_values,
new_res_values,
atomic_key, expected_engine_id=None):
session = context.session
try:
- with session.begin(subtransactions=True):
+ with session.begin():
new_res = resource_create(context, new_res_values)
update_data = {'replaced_by': new_res.id}
update_data.update(existing_res_values)
- if not resource_update(context,
- existing_res_id, update_data,
- atomic_key,
- expected_engine_id=expected_engine_id):
+ if not _try_resource_update(context,
+ existing_res_id, update_data,
+ atomic_key,
+ expected_engine_id=expected_engine_id):
data = {}
if 'name' in new_res_values:
data['resource_name'] = new_res_values['name']
@@ -571,7 +616,7 @@ def stack_get_by_name(context, stack_name):
models.Stack.tenant == context.tenant_id,
models.Stack.stack_user_project_id == context.tenant_id)
).filter_by(name=stack_name)
- return query.first()
+ return query.order_by(models.Stack.created_at).first()
def stack_get(context, stack_id, show_deleted=False, eager_load=True):
@@ -621,13 +666,13 @@ def stack_get_all_by_root_owner_id(context, owner_id):
def _get_sort_keys(sort_keys, mapping):
- """Returns an array containing only whitelisted keys
+ """Returns an array containing only allowed keys
:param sort_keys: an array of strings
:param mapping: a mapping from keys to DB column names
:returns: filtered list of sort keys
"""
- if isinstance(sort_keys, six.string_types):
+ if isinstance(sort_keys, str):
sort_keys = [sort_keys]
return [mapping[key] for key in sort_keys or [] if key in mapping]
@@ -656,7 +701,7 @@ def _paginate_query(context, query, model, limit=None, sort_keys=None,
return query
-def _query_stack_get_all(context, show_deleted=False,
+def _query_stack_get_all(context, show_deleted=False,
show_nested=False, show_hidden=False, tags=None,
tags_any=None, not_tags=None, not_tags_any=None):
if show_nested:
@@ -733,11 +778,11 @@ def _filter_and_page_query(context, query, limit=None, sort_keys=None,
rpc_api.STACK_STATUS: models.Stack.status.key,
rpc_api.STACK_CREATION_TIME: models.Stack.created_at.key,
rpc_api.STACK_UPDATED_TIME: models.Stack.updated_at.key}
- whitelisted_sort_keys = _get_sort_keys(sort_keys, sort_key_map)
+ valid_sort_keys = _get_sort_keys(sort_keys, sort_key_map)
query = db_filters.exact_filter(query, models.Stack, filters)
return _paginate_query(context, query, models.Stack, limit,
- whitelisted_sort_keys, marker, sort_dir)
+ valid_sort_keys, marker, sort_dir)
def stack_count_all(context, filters=None,
@@ -757,12 +802,21 @@ def stack_count_all(context, filters=None,
def stack_create(context, values):
stack_ref = models.Stack()
stack_ref.update(values)
+ stack_name = stack_ref.name
stack_ref.save(context.session)
+
+ # Even though we just created a stack with this name, we may not find
+ # it again because some unit tests create stacks with deleted_at set. Also
+ # some backup stacks may not be found, for reasons that are unclear.
+ earliest = stack_get_by_name(context, stack_name)
+ if earliest is not None and earliest.id != stack_ref.id:
+ context.session.query(models.Stack).filter_by(id=stack_ref.id).delete()
+ raise exception.StackExists(stack_name=stack_name)
+
return stack_ref
-@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
- retry_interval=0.5, inc_retry_interval=True)
+@retry_on_db_error
def stack_update(context, stack_id, values, exp_trvsl=None):
session = context.session
with session.begin(subtransactions=True):
@@ -820,7 +874,9 @@ def _is_duplicate_error(exc):
@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
- retry_interval=0.5, inc_retry_interval=True,
+ retry_on_disconnect=True,
+ retry_interval=0.5,
+ inc_retry_interval=True,
exception_checker=_is_duplicate_error)
def stack_lock_create(context, stack_id, engine_id):
with db_context.writer.independent.using(context) as session:
@@ -906,7 +962,7 @@ def user_creds_create(context):
else:
user_creds_ref.update(values)
method, password = crypt.encrypt(values['password'])
- if len(six.text_type(password)) > 255:
+ if len(str(password)) > 255:
raise exception.Error(_("Length of OS_PASSWORD after encryption"
" exceeds Heat limit (255 chars)"))
user_creds_ref.password = password
@@ -926,7 +982,7 @@ def user_creds_get(context, user_creds_id):
db_result = context.session.query(models.UserCreds).get(user_creds_id)
if db_result is None:
return None
- # Return a dict copy of db results, do not decrypt details into db_result
+ # Return a dict copy of DB results, do not decrypt details into db_result
# or it can be committed back to the DB in decrypted form
result = dict(db_result)
del result['decrypt_method']
@@ -1011,12 +1067,12 @@ def _events_filter_and_page_query(context, query,
sort_key_map = {rpc_api.EVENT_TIMESTAMP: models.Event.created_at.key,
rpc_api.EVENT_RES_TYPE: models.Event.resource_type.key}
- whitelisted_sort_keys = _get_sort_keys(sort_keys, sort_key_map)
+ valid_sort_keys = _get_sort_keys(sort_keys, sort_key_map)
query = db_filters.exact_filter(query, models.Event, filters)
return _events_paginate_query(context, query, models.Event, limit,
- whitelisted_sort_keys, marker, sort_dir)
+ valid_sort_keys, marker, sort_dir)
def event_count_all_by_stack(context, stack_id):
@@ -1118,8 +1174,7 @@ def _delete_event_rows(context, stack_id, limit):
return retval
-@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
- retry_interval=0.5, inc_retry_interval=True)
+@retry_on_db_error
def event_create(context, values):
if 'stack_id' in values and cfg.CONF.max_events_per_stack:
# only count events and purge on average
@@ -1133,7 +1188,7 @@ def event_create(context, values):
_delete_event_rows(context, values['stack_id'],
cfg.CONF.event_purge_batch_size)
except db_exception.DBError as exc:
- LOG.error('Failed to purge events: %s', six.text_type(exc))
+ LOG.error('Failed to purge events: %s', str(exc))
event_ref = models.Event()
event_ref.update(values)
event_ref.save(context.session)
@@ -1227,7 +1282,7 @@ def software_deployment_update(context, deployment_id, values):
def software_deployment_delete(context, deployment_id):
deployment = software_deployment_get(context, deployment_id)
session = context.session
- with session.begin(subtransactions=True):
+ with session.begin():
session.delete(deployment)
@@ -1531,8 +1586,7 @@ def sync_point_delete_all_by_stack_and_traversal(context, stack_id,
return rows_deleted
-@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
- retry_interval=0.5, inc_retry_interval=True)
+@retry_on_db_error
def sync_point_create(context, values):
values['entity_id'] = str(values['entity_id'])
sync_point_ref = models.SyncPoint()
@@ -1548,6 +1602,7 @@ def sync_point_get(context, entity_id, traversal_id, is_update):
)
+@retry_on_db_error
def sync_point_update_input_data(context, entity_id,
traversal_id, is_update, atomic_key,
input_data):
@@ -1591,7 +1646,7 @@ def _db_encrypt_or_decrypt_template_params(
batch_size=batch_size)
next_batch = list(itertools.islice(template_batches, batch_size))
while next_batch:
- with session.begin(subtransactions=True):
+ with session.begin():
for raw_template in next_batch:
try:
if verbose:
@@ -1622,7 +1677,7 @@ def _db_encrypt_or_decrypt_template_params(
not param_schemata[param_name].hidden):
continue
encrypted_val = crypt.encrypt(
- six.text_type(param_val), encryption_key)
+ str(param_val), encryption_key)
env['parameters'][param_name] = encrypted_val
encrypted_params.append(param_name)
needs_update = True
@@ -1674,7 +1729,7 @@ def _db_encrypt_or_decrypt_resource_prop_data_legacy(
batch_size=batch_size)
next_batch = list(itertools.islice(resource_batches, batch_size))
while next_batch:
- with session.begin(subtransactions=True):
+ with session.begin():
for resource in next_batch:
if not resource.properties_data:
continue
@@ -1721,7 +1776,7 @@ def _db_encrypt_or_decrypt_resource_prop_data(
model=models.ResourcePropertiesData, batch_size=batch_size)
next_batch = list(itertools.islice(rpd_batches, batch_size))
while next_batch:
- with session.begin(subtransactions=True):
+ with session.begin():
for rpd in next_batch:
if not rpd.data:
continue
@@ -1761,7 +1816,7 @@ def db_encrypt_parameters_and_properties(ctxt, encryption_key, batch_size=50,
:param ctxt: RPC context
:param encryption_key: key that will be used for parameter and property
encryption
- :param batch_size: number of templates requested from db in each iteration.
+ :param batch_size: number of templates requested from DB in each iteration.
50 means that heat requests 50 templates, encrypt them
and proceed with next 50 items.
:param verbose: log an INFO message when processing of each raw_template or
@@ -1785,7 +1840,7 @@ def db_decrypt_parameters_and_properties(ctxt, encryption_key, batch_size=50,
:param ctxt: RPC context
:param encryption_key: key that will be used for parameter and property
decryption
- :param batch_size: number of templates requested from db in each iteration.
+ :param batch_size: number of templates requested from DB in each iteration.
50 means that heat requests 50 templates, encrypt them
and proceed with next 50 items.
:param verbose: log an INFO message when processing of each raw_template or
@@ -1806,7 +1861,7 @@ def db_properties_data_migrate(ctxt, batch_size=50):
"""Migrate properties data from legacy columns to new location in db.
:param ctxt: RPC context
- :param batch_size: number of templates requested from db in each iteration.
+ :param batch_size: number of templates requested from DB in each iteration.
50 means that heat requests 50 templates, encrypt them
and proceed with next 50 items.
"""
diff --git a/heat/db/sqlalchemy/filters.py b/heat/db/sqlalchemy/filters.py
index b4e856448..6c7b8cf24 100644
--- a/heat/db/sqlalchemy/filters.py
+++ b/heat/db/sqlalchemy/filters.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
def exact_filter(query, model, filters):
"""Applies exact match filtering to a query.
@@ -33,7 +31,7 @@ def exact_filter(query, model, filters):
if filters is None:
filters = {}
- for key, value in six.iteritems(filters):
+ for key, value in filters.items():
if isinstance(value, (list, tuple, set, frozenset)):
column_attr = getattr(model, key)
query = query.filter(column_attr.in_(value))
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/072_raw_template_files.py b/heat/db/sqlalchemy/migrate_repo/versions/072_raw_template_files.py
deleted file mode 100644
index 649ac0c65..000000000
--- a/heat/db/sqlalchemy/migrate_repo/versions/072_raw_template_files.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-from heat.db.sqlalchemy import types
-
-
-def upgrade(migrate_engine):
- meta = sqlalchemy.MetaData(bind=migrate_engine)
- raw_template_files = sqlalchemy.Table(
- 'raw_template_files', meta,
- sqlalchemy.Column('id', sqlalchemy.Integer,
- primary_key=True,
- nullable=False),
- sqlalchemy.Column('files', types.Json),
- sqlalchemy.Column('created_at', sqlalchemy.DateTime),
- sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
-
- )
- raw_template_files.create()
-
- raw_template = sqlalchemy.Table('raw_template', meta, autoload=True)
- files_id = sqlalchemy.Column(
- 'files_id', sqlalchemy.Integer(),
- sqlalchemy.ForeignKey('raw_template_files.id',
- name='raw_tmpl_files_fkey_ref'))
- files_id.create(raw_template)
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/071_mitaka.py b/heat/db/sqlalchemy/migrate_repo/versions/073_newton.py
index 22d69da49..3293b91ce 100644
--- a/heat/db/sqlalchemy/migrate_repo/versions/071_mitaka.py
+++ b/heat/db/sqlalchemy/migrate_repo/versions/073_newton.py
@@ -22,6 +22,18 @@ def upgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
+ raw_template_files = sqlalchemy.Table(
+ 'raw_template_files', meta,
+ sqlalchemy.Column('id', sqlalchemy.Integer,
+ primary_key=True,
+ nullable=False),
+ sqlalchemy.Column('files', types.Json),
+ sqlalchemy.Column('created_at', sqlalchemy.DateTime),
+ sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
+ )
+
raw_template = sqlalchemy.Table(
'raw_template', meta,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
@@ -31,7 +43,10 @@ def upgrade(migrate_engine):
sqlalchemy.Column('template', types.LongText),
sqlalchemy.Column('files', types.Json),
sqlalchemy.Column('environment', types.Json),
-
+ sqlalchemy.Column('files_id', sqlalchemy.Integer(),
+ sqlalchemy.ForeignKey(
+ 'raw_template_files.id',
+ name='raw_tmpl_files_fkey_ref')),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
@@ -76,7 +91,7 @@ def upgrade(migrate_engine):
sqlalchemy.Column('owner_id', sqlalchemy.String(36)),
sqlalchemy.Column('action', sqlalchemy.String(255)),
sqlalchemy.Column('status', sqlalchemy.String(255)),
- sqlalchemy.Column('status_reason', types.LongText),
+ sqlalchemy.Column('status_reason', sqlalchemy.Text),
sqlalchemy.Column('timeout', sqlalchemy.Integer),
sqlalchemy.Column('tenant', sqlalchemy.String(256)),
sqlalchemy.Column('disable_rollback', sqlalchemy.Boolean,
@@ -109,7 +124,7 @@ def upgrade(migrate_engine):
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
sqlalchemy.Column('action', sqlalchemy.String(255)),
sqlalchemy.Column('status', sqlalchemy.String(255)),
- sqlalchemy.Column('status_reason', types.LongText),
+ sqlalchemy.Column('status_reason', sqlalchemy.Text),
sqlalchemy.Column('stack_id', sqlalchemy.String(36),
sqlalchemy.ForeignKey('stack.id'), nullable=False),
sqlalchemy.Column('rsrc_metadata', types.LongText),
@@ -145,7 +160,9 @@ def upgrade(migrate_engine):
sqlalchemy.Column('decrypt_method', sqlalchemy.String(length=64)),
sqlalchemy.Column('resource_id',
sqlalchemy.Integer,
- sqlalchemy.ForeignKey('resource.id'),
+ sqlalchemy.ForeignKey('resource.id',
+ name='fk_resource_id',
+ ondelete='CASCADE'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8'
@@ -251,7 +268,7 @@ def upgrade(migrate_engine):
sqlalchemy.Column('output_values', types.Json),
sqlalchemy.Column('action', sqlalchemy.String(255)),
sqlalchemy.Column('status', sqlalchemy.String(255)),
- sqlalchemy.Column('status_reason', types.LongText),
+ sqlalchemy.Column('status_reason', sqlalchemy.Text),
sqlalchemy.Column('tenant', sqlalchemy.String(64),
nullable=False,
index=True),
@@ -342,6 +359,7 @@ def upgrade(migrate_engine):
)
tables = (
+ raw_template_files,
raw_template,
user_creds,
stack,
diff --git a/heat/db/sqlalchemy/migrate_repo/versions/073_resource_data_fk_ondelete_cascade.py b/heat/db/sqlalchemy/migrate_repo/versions/073_resource_data_fk_ondelete_cascade.py
deleted file mode 100644
index f532ac75f..000000000
--- a/heat/db/sqlalchemy/migrate_repo/versions/073_resource_data_fk_ondelete_cascade.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Liberty backports.
-# Do not use this number for new Mitaka work. New Mitaka work starts after
-# all the placeholders.
-
-
-import sqlalchemy
-
-from migrate import ForeignKeyConstraint
-
-
-def upgrade(migrate_engine):
- meta = sqlalchemy.MetaData()
- meta.bind = migrate_engine
-
- resource_data = sqlalchemy.Table('resource_data', meta, autoload=True)
- resource = sqlalchemy.Table('resource', meta, autoload=True)
-
- for fk in resource_data.foreign_keys:
- if fk.column == resource.c.id:
- # delete the existing fk
- # and create with ondelete cascade and a proper name
- existing_fkey = ForeignKeyConstraint(
- columns=[resource_data.c.resource_id],
- refcolumns=[resource.c.id], name=fk.name)
- existing_fkey.drop()
- fkey = ForeignKeyConstraint(
- columns=[resource_data.c.resource_id],
- refcolumns=[resource.c.id],
- name="fk_resource_id", ondelete='CASCADE')
- fkey.create()
- break
diff --git a/heat/db/sqlalchemy/migration.py b/heat/db/sqlalchemy/migration.py
index 051d6af16..7f030d90a 100644
--- a/heat/db/sqlalchemy/migration.py
+++ b/heat/db/sqlalchemy/migration.py
@@ -16,7 +16,7 @@ import os
from oslo_db.sqlalchemy import migration as oslo_migration
-INIT_VERSION = 70
+INIT_VERSION = 72
def db_sync(engine, version=None):
diff --git a/heat/db/sqlalchemy/utils.py b/heat/db/sqlalchemy/utils.py
index aca88a5e5..5fff4f763 100644
--- a/heat/db/sqlalchemy/utils.py
+++ b/heat/db/sqlalchemy/utils.py
@@ -53,7 +53,7 @@ def clone_table(name, parent, meta, newcols=None, ignorecols=None,
return False
- constraints = [c.copy() for c in parent.constraints
+ constraints = [c.copy(target_table=new_table) for c in parent.constraints
if c.name not in ignorecons
if not _is_ignorable(c)]
diff --git a/heat/engine/api.py b/heat/engine/api.py
index 2c5840683..35edaff6e 100644
--- a/heat/engine/api.py
+++ b/heat/engine/api.py
@@ -15,7 +15,6 @@ import collections
from oslo_log import log as logging
from oslo_utils import timeutils
-import six
from heat.common.i18n import _
from heat.common import param_utils
@@ -60,12 +59,12 @@ def extract_args(params):
kwargs[rpc_api.PARAM_ADOPT_STACK_DATA] = adopt_data
tags = params.get(rpc_api.PARAM_TAGS)
- if tags:
+ if tags is not None:
if not isinstance(tags, list):
raise ValueError(_('Invalid tags, not a list: %s') % tags)
for tag in tags:
- if not isinstance(tag, six.string_types):
+ if not isinstance(tag, str):
raise ValueError(_('Invalid tag, "%s" is not a string') % tag)
if len(tag) > 80:
@@ -188,7 +187,7 @@ def format_stack_output(output_defn, resolve_value=True):
except Exception as ex:
# We don't need error raising, just adding output_error to
# resulting dict.
- result.update({rpc_api.OUTPUT_ERROR: six.text_type(ex)})
+ result.update({rpc_api.OUTPUT_ERROR: str(ex)})
finally:
result.update({rpc_api.OUTPUT_VALUE: value})
@@ -212,7 +211,7 @@ def format_stack(stack, preview=False, resolve_outputs=True):
rpc_api.STACK_UPDATED_TIME: updated_time,
rpc_api.STACK_DELETION_TIME: deleted_time,
rpc_api.STACK_NOTIFICATION_TOPICS: [], # TODO(therve) Not implemented
- rpc_api.STACK_PARAMETERS: stack.parameters.map(six.text_type),
+ rpc_api.STACK_PARAMETERS: stack.parameters.map(str),
rpc_api.STACK_DESCRIPTION: stack.t[stack.t.DESCRIPTION],
rpc_api.STACK_TMPL_DESCRIPTION: stack.t[stack.t.DESCRIPTION],
rpc_api.STACK_CAPABILITIES: [], # TODO(?) Not implemented yet
@@ -244,7 +243,7 @@ def format_stack(stack, preview=False, resolve_outputs=True):
def format_stack_db_object(stack):
"""Return a summary representation of the given stack.
- Given a stack versioned db object, return a representation of the given
+ Given a stack versioned DB object, return a representation of the given
stack for a stack listing.
"""
updated_time = heat_timeutils.isotime(stack.updated_at)
@@ -293,7 +292,7 @@ def format_resource_attributes(resource, with_attr=None):
if 'show' in resolver:
show_attr = resolve('show', resolver)
# check if 'show' resolved to dictionary. so it's not None
- if isinstance(show_attr, collections.Mapping):
+ if isinstance(show_attr, collections.abc.Mapping):
for a in with_attr:
if a not in show_attr:
show_attr[a] = resolve(a, resolver)
@@ -312,6 +311,8 @@ def format_resource_properties(resource):
try:
return resource.properties[prop]
except (KeyError, ValueError):
+ LOG.exception("Error in fetching property %s of resource %s" %
+ (prop, resource.name))
return None
return dict((prop, get_property(prop))
diff --git a/heat/engine/attributes.py b/heat/engine/attributes.py
index f5fcdf265..eb2c5667f 100644
--- a/heat/engine/attributes.py
+++ b/heat/engine/attributes.py
@@ -12,12 +12,11 @@
# under the License.
import collections
+import functools
from oslo_utils import strutils
-import six
from heat.common.i18n import _
-from heat.common.i18n import repr_wrapper
from heat.engine import constraints as constr
from heat.engine import support
@@ -139,8 +138,7 @@ BASE_ATTRIBUTES = (SHOW_ATTR, ) = ('show', )
ALL_ATTRIBUTES = '*'
-@repr_wrapper
-class Attributes(collections.Mapping):
+class Attributes(collections.abc.Mapping):
"""Models a collection of Resource Attributes."""
def __init__(self, res_name, schema, resolver):
@@ -208,20 +206,20 @@ class Attributes(collections.Mapping):
def _validate_type(self, attrib, value):
if attrib.schema.type == attrib.schema.STRING:
- if not isinstance(value, six.string_types):
+ if not isinstance(value, str):
LOG.warning("Attribute %(name)s is not of type "
"%(att_type)s",
{'name': attrib.name,
'att_type': attrib.schema.STRING})
elif attrib.schema.type == attrib.schema.LIST:
- if (not isinstance(value, collections.Sequence)
- or isinstance(value, six.string_types)):
+ if (not isinstance(value, collections.abc.Sequence)
+ or isinstance(value, str)):
LOG.warning("Attribute %(name)s is not of type "
"%(att_type)s",
{'name': attrib.name,
'att_type': attrib.schema.LIST})
elif attrib.schema.type == attrib.schema.MAP:
- if not isinstance(value, collections.Mapping):
+ if not isinstance(value, collections.abc.Mapping):
LOG.warning("Attribute %(name)s is not of type "
"%(att_type)s",
{'name': attrib.name,
@@ -298,7 +296,7 @@ class Attributes(collections.Mapping):
def __repr__(self):
return ("Attributes for %s:\n\t" % self._resource_name +
- '\n\t'.join(six.itervalues(self)))
+ '\n\t'.join(self.values()))
def select_from_attribute(attribute_value, path):
@@ -309,16 +307,16 @@ def select_from_attribute(attribute_value, path):
:returns: the selected attribute component value.
"""
def get_path_component(collection, key):
- if not isinstance(collection, (collections.Mapping,
- collections.Sequence)):
+ if not isinstance(collection, (collections.abc.Mapping,
+ collections.abc.Sequence)):
raise TypeError(_("Can't traverse attribute path"))
- if not isinstance(key, (six.string_types, int)):
+ if not isinstance(key, (str, int)):
raise TypeError(_('Path components in attributes must be strings'))
return collection[key]
try:
- return six.moves.reduce(get_path_component, path, attribute_value)
+ return functools.reduce(get_path_component, path, attribute_value)
except (KeyError, IndexError, TypeError):
return None
diff --git a/heat/engine/cfn/functions.py b/heat/engine/cfn/functions.py
index c0a138524..f037eec0c 100644
--- a/heat/engine/cfn/functions.py
+++ b/heat/engine/cfn/functions.py
@@ -14,7 +14,6 @@
import collections
from oslo_serialization import jsonutils
-import six
from heat.api.aws import utils as aws_utils
from heat.common import exception
@@ -39,7 +38,7 @@ class FindInMap(function.Function):
try:
self._mapname, self._mapkey, self._mapvalue = self.args
except ValueError as ex:
- raise KeyError(six.text_type(ex))
+ raise KeyError(str(ex))
def result(self):
mapping = self.stack.t.maps[function.resolve(self._mapname)]
@@ -160,7 +159,7 @@ class Select(function.Function):
# Handle by returning an empty string
return ''
- if isinstance(strings, six.string_types):
+ if isinstance(strings, str):
# might be serialized json.
try:
strings = jsonutils.loads(strings)
@@ -169,8 +168,8 @@ class Select(function.Function):
'err': json_ex}
raise ValueError(_('"%(fn_name)s": %(err)s') % fmt_data)
- if isinstance(strings, collections.Mapping):
- if not isinstance(index, six.string_types):
+ if isinstance(strings, collections.abc.Mapping):
+ if not isinstance(index, str):
raise TypeError(_('Index to "%s" must be a string') %
self.fn_name)
return strings.get(index, '')
@@ -180,9 +179,9 @@ class Select(function.Function):
except (ValueError, TypeError):
pass
- if (isinstance(strings, collections.Sequence) and
- not isinstance(strings, six.string_types)):
- if not isinstance(index, six.integer_types):
+ if (isinstance(strings, collections.abc.Sequence) and
+ not isinstance(strings, str)):
+ if not isinstance(index, int):
raise TypeError(_('Index to "%s" must be an integer') %
self.fn_name)
@@ -230,7 +229,7 @@ class Split(function.Function):
fmt_data = {'fn_name': self.fn_name,
'example': example}
- if isinstance(self.args, (six.string_types, collections.Mapping)):
+ if isinstance(self.args, (str, collections.abc.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
@@ -243,10 +242,10 @@ class Split(function.Function):
def result(self):
strings = function.resolve(self._strings)
- if not isinstance(self._delim, six.string_types):
+ if not isinstance(self._delim, str):
raise TypeError(_("Delimiter for %s must be string") %
self.fn_name)
- if not isinstance(strings, six.string_types):
+ if not isinstance(strings, str):
raise TypeError(_("String to split must be string; got %s") %
type(strings))
@@ -279,7 +278,7 @@ class Replace(hot_funcs.Replace):
fmt_data = {'fn_name': self.fn_name,
'example': example}
- if isinstance(self.args, (six.string_types, collections.Mapping)):
+ if isinstance(self.args, (str, collections.abc.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
@@ -306,7 +305,7 @@ class Base64(function.Function):
def result(self):
resolved = function.resolve(self.args)
- if not isinstance(resolved, six.string_types):
+ if not isinstance(resolved, str):
raise TypeError(_('"%s" argument must be a string') % self.fn_name)
return resolved
@@ -342,20 +341,20 @@ class MemberListToMap(function.Function):
'''
raise TypeError(_('Wrong Arguments try: "%s"') % correct)
- if not isinstance(self._keyname, six.string_types):
+ if not isinstance(self._keyname, str):
raise TypeError(_('%s Key Name must be a string') % self.fn_name)
- if not isinstance(self._valuename, six.string_types):
+ if not isinstance(self._valuename, str):
raise TypeError(_('%s Value Name must be a string') % self.fn_name)
def result(self):
member_list = function.resolve(self._list)
- if not isinstance(member_list, collections.Iterable):
+ if not isinstance(member_list, collections.abc.Iterable):
raise TypeError(_('Member list must be a list'))
def item(s):
- if not isinstance(s, six.string_types):
+ if not isinstance(s, str):
raise TypeError(_("Member list items must be strings"))
return s.split('=', 1)
@@ -429,8 +428,8 @@ class Not(hot_funcs.Not):
msg = _('Arguments to "%s" must be of the form: '
'[condition]') % self.fn_name
if (not self.args or
- not isinstance(self.args, collections.Sequence) or
- isinstance(self.args, six.string_types)):
+ not isinstance(self.args, collections.abc.Sequence) or
+ isinstance(self.args, str)):
raise ValueError(msg)
if len(self.args) != 1:
raise ValueError(msg)
diff --git a/heat/engine/cfn/template.py b/heat/engine/cfn/template.py
index 22ec1b6fe..09bf06554 100644
--- a/heat/engine/cfn/template.py
+++ b/heat/engine/cfn/template.py
@@ -13,8 +13,6 @@
import functools
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine.cfn import functions as cfn_funcs
@@ -98,12 +96,12 @@ class CfnTemplateBase(template_common.CommonTemplate):
def param_schemata(self, param_defaults=None):
params = self.t.get(self.PARAMETERS) or {}
pdefaults = param_defaults or {}
- for name, schema in six.iteritems(params):
+ for name, schema in params.items():
if name in pdefaults:
params[name][parameters.DEFAULT] = pdefaults[name]
return dict((name, parameters.Schema.from_dict(name, schema))
- for name, schema in six.iteritems(params))
+ for name, schema in params.items())
def get_section_name(self, section):
return section
@@ -124,7 +122,7 @@ class CfnTemplateBase(template_common.CommonTemplate):
defn_data = dict(self._rsrc_defn_args(stack, name,
snippet))
except (TypeError, ValueError, KeyError) as ex:
- msg = six.text_type(ex)
+ msg = str(ex)
raise exception.StackValidationFailed(message=msg)
defn = rsrc_defn.ResourceDefinition(name, **defn_data)
@@ -135,7 +133,7 @@ class CfnTemplateBase(template_common.CommonTemplate):
enabled = conditions.is_enabled(cond_name)
except ValueError as exc:
path = [self.RESOURCES, name, self.RES_CONDITION]
- message = six.text_type(exc)
+ message = str(exc)
raise exception.StackValidationFailed(path=path,
message=message)
if not enabled:
@@ -234,7 +232,7 @@ class CfnTemplate(CfnTemplateBase):
yield ('condition',
self._parse_resource_field(self.RES_CONDITION,
- (six.string_types, bool,
+ (str, bool,
function.Function),
'string or boolean',
name, data, parse_cond))
diff --git a/heat/engine/check_resource.py b/heat/engine/check_resource.py
index a2f6d842c..4bbdb5890 100644
--- a/heat/engine/check_resource.py
+++ b/heat/engine/check_resource.py
@@ -13,8 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import six
-
import eventlet.queue
import functools
@@ -83,7 +81,7 @@ class CheckResource(object):
'during resource %s' % rsrc.action)
rsrc.state_set(rsrc.action,
rsrc.FAILED,
- six.text_type(status_reason))
+ str(status_reason))
return True
elif (rs_obj.engine_id is None and
rs_obj.current_template_id == prev_template_id):
@@ -105,11 +103,10 @@ class CheckResource(object):
def _retrigger_new_traversal(self, cnxt, current_traversal, is_update,
stack_id, rsrc_id):
- latest_stack = parser.Stack.load(cnxt, stack_id=stack_id,
- force_reload=True)
- if current_traversal != latest_stack.current_traversal:
- self.retrigger_check_resource(cnxt, is_update, rsrc_id,
- latest_stack)
+ latest_stack = parser.Stack.load(cnxt, stack_id=stack_id,
+ force_reload=True)
+ if current_traversal != latest_stack.current_traversal:
+ self.retrigger_check_resource(cnxt, rsrc_id, latest_stack)
def _handle_stack_timeout(self, cnxt, stack):
failure_reason = u'Timed out'
@@ -163,6 +160,8 @@ class CheckResource(object):
return True
except exception.UpdateInProgress:
+ LOG.debug('Waiting for existing update to unlock resource %s',
+ rsrc.id)
if self._stale_resource_needs_retry(cnxt, rsrc, prev_template_id):
rpc_data = sync_point.serialize_input_data(self.input_data)
self._rpc_client.check_resource(cnxt,
@@ -170,16 +169,18 @@ class CheckResource(object):
current_traversal,
rpc_data, is_update,
adopt_stack_data)
+ else:
+ rsrc.handle_preempt()
except exception.ResourceFailure as ex:
action = ex.action or rsrc.action
reason = 'Resource %s failed: %s' % (action,
- six.text_type(ex))
+ str(ex))
self._handle_resource_failure(cnxt, is_update, rsrc.id,
stack, reason)
except scheduler.Timeout:
self._handle_resource_failure(cnxt, is_update, rsrc.id,
stack, u'Timed out')
- except CancelOperation as ex:
+ except CancelOperation:
# Stack is already marked FAILED, so we just need to retrigger
# in case a new traversal has started and is waiting on us.
self._retrigger_new_traversal(cnxt, current_traversal, is_update,
@@ -187,31 +188,25 @@ class CheckResource(object):
return False
- def retrigger_check_resource(self, cnxt, is_update, resource_id, stack):
+ def retrigger_check_resource(self, cnxt, resource_id, stack):
current_traversal = stack.current_traversal
graph = stack.convergence_dependencies.graph()
- key = (resource_id, is_update)
- if is_update:
- # When re-trigger received for update in latest traversal, first
- # check if update key is available in graph.
- # if No, then latest traversal is waiting for delete.
- if (resource_id, is_update) not in graph:
- key = (resource_id, not is_update)
- else:
- # When re-trigger received for delete in latest traversal, first
- # check if update key is available in graph,
- # if yes, then latest traversal is waiting for update.
- if (resource_id, True) in graph:
- # not is_update evaluates to True below, which means update
- key = (resource_id, not is_update)
- LOG.info('Re-trigger resource: (%(key1)s, %(key2)s)',
- {'key1': key[0], 'key2': key[1]})
+
+ # When re-trigger received for latest traversal, first check if update
+ # key is available in graph. If yes, the latest traversal is waiting
+ # for update, otherwise it is waiting for delete. This is the case
+ # regardless of which action (update or cleanup) from the previous
+ # traversal was blocking it.
+ update_key = parser.ConvergenceNode(resource_id, True)
+ key = parser.ConvergenceNode(resource_id, update_key in graph)
+
+ LOG.info('Re-trigger resource: %s', key)
predecessors = set(graph[key])
try:
propagate_check_resource(cnxt, self._rpc_client, resource_id,
current_traversal, predecessors, key,
- None, key[1], None)
+ None, key.is_update, None)
except exception.EntityNotFound as e:
if e.entity != "Sync Point":
raise
@@ -283,8 +278,7 @@ class CheckResource(object):
current_traversal)
return
- self.retrigger_check_resource(cnxt, is_update,
- resource_id, stack)
+ self.retrigger_check_resource(cnxt, resource_id, stack)
else:
raise
@@ -319,7 +313,7 @@ class CheckResource(object):
rsrc, stack)
except BaseException as exc:
with excutils.save_and_reraise_exception():
- msg = six.text_type(exc)
+ msg = str(exc)
LOG.exception("Unexpected exception in resource check.")
self._handle_resource_failure(cnxt, is_update, rsrc.id,
stack, msg)
@@ -350,7 +344,7 @@ def check_stack_complete(cnxt, stack, current_traversal, sender_id, deps,
def mark_complete(stack_id, data):
stack.mark_complete()
- sender_key = (sender_id, is_update)
+ sender_key = parser.ConvergenceNode(sender_id, is_update)
sync_point.sync(cnxt, stack.id, current_traversal, True,
mark_complete, roots, {sender_key: None})
diff --git a/heat/engine/clients/__init__.py b/heat/engine/clients/__init__.py
index 690719c29..f80ac532e 100644
--- a/heat/engine/clients/__init__.py
+++ b/heat/engine/clients/__init__.py
@@ -16,7 +16,6 @@ import weakref
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
-import six
from stevedore import enabled
from heat.common import exception
@@ -95,7 +94,7 @@ class ClientBackend(object):
context)
except (ImportError, RuntimeError, cfg.NoSuchOptError) as err:
msg = _('Invalid cloud_backend setting in heat.conf '
- 'detected - %s') % six.text_type(err)
+ 'detected - %s') % str(err)
LOG.error(msg)
raise exception.Invalid(reason=msg)
diff --git a/heat/engine/clients/client_plugin.py b/heat/engine/clients/client_plugin.py
index ab737da6f..286778901 100644
--- a/heat/engine/clients/client_plugin.py
+++ b/heat/engine/clients/client_plugin.py
@@ -21,7 +21,6 @@ from oslo_config import cfg
from oslo_utils import excutils
import requests
-import six
from heat.common import config
from heat.common import exception as heat_exception
@@ -29,8 +28,7 @@ from heat.common import exception as heat_exception
cfg.CONF.import_opt('client_retry_limit', 'heat.common.config')
-@six.add_metaclass(abc.ABCMeta)
-class ClientPlugin(object):
+class ClientPlugin(object, metaclass=abc.ABCMeta):
# Module which contains all exceptions classes which the client
# may emit
@@ -49,6 +47,7 @@ class ClientPlugin(object):
self._context = weakref.ref(context)
self._clients = weakref.ref(context.clients)
self._client_instances = {}
+ self._endpoint_existence = {}
@property
def context(self):
@@ -88,7 +87,14 @@ class ClientPlugin(object):
pass
def _get_region_name(self):
- return self.context.region_name or cfg.CONF.region_name_for_services
+ reg = self.context.region_name or cfg.CONF.region_name_for_services
+ # If Shared Services configured, override region for image/volumes
+ shared_services_region_name = cfg.CONF.region_name_for_shared_services
+ shared_services_types = cfg.CONF.shared_services_types
+ if shared_services_region_name:
+ if set(self.service_types) & set(shared_services_types):
+ reg = shared_services_region_name
+ return reg
def url_for(self, **kwargs):
keystone_session = self.context.keystone_session
@@ -131,11 +137,10 @@ class ClientPlugin(object):
if self.exceptions_module:
if isinstance(self.exceptions_module, list):
for m in self.exceptions_module:
- if type(ex) in six.itervalues(m.__dict__):
+ if type(ex) in m.__dict__.values():
return True
else:
- return type(ex) in six.itervalues(
- self.exceptions_module.__dict__)
+ return type(ex) in self.exceptions_module.__dict__.values()
return False
def is_not_found(self, ex):
@@ -163,14 +168,18 @@ class ClientPlugin(object):
def does_endpoint_exist(self,
service_type,
service_name):
- endpoint_type = self._get_client_option(service_name,
- 'endpoint_type')
- try:
- self.url_for(service_type=service_type,
- endpoint_type=endpoint_type)
- return True
- except exceptions.EndpointNotFound:
- return False
+ endpoint_key = (service_type, service_name)
+ if endpoint_key not in self._endpoint_existence:
+ endpoint_type = self._get_client_option(service_name,
+ 'endpoint_type')
+ try:
+ self.url_for(service_type=service_type,
+ endpoint_type=endpoint_type)
+ self._endpoint_existence[endpoint_key] = True
+ except exceptions.EndpointNotFound:
+ self._endpoint_existence[endpoint_key] = False
+
+ return self._endpoint_existence[endpoint_key]
def retry_if_connection_err(exception):
diff --git a/heat/engine/clients/microversion_mixin.py b/heat/engine/clients/microversion_mixin.py
index 84e6634ee..4e303eac9 100644
--- a/heat/engine/clients/microversion_mixin.py
+++ b/heat/engine/clients/microversion_mixin.py
@@ -13,13 +13,10 @@
import abc
-import six
-
from heat.common import exception
-@six.add_metaclass(abc.ABCMeta)
-class MicroversionMixin(object):
+class MicroversionMixin(object, metaclass=abc.ABCMeta):
"""Mixin For microversion support."""
def client(self, version=None):
diff --git a/heat/engine/clients/os/__init__.py b/heat/engine/clients/os/__init__.py
index 8047b236a..fedfdd482 100644
--- a/heat/engine/clients/os/__init__.py
+++ b/heat/engine/clients/os/__init__.py
@@ -11,6 +11,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import abc
+
+import six
+
from oslo_cache import core
from oslo_config import cfg
@@ -25,3 +29,20 @@ MEMOIZE_FINDER = core.get_memoization_decorator(
conf=cfg.CONF,
region=cache.get_cache_region(),
group="resource_finder_cache")
+
+
+@six.add_metaclass(abc.ABCMeta)
+class ExtensionMixin(object):
+ def __init__(self, *args, **kwargs):
+ super(ExtensionMixin, self).__init__(*args, **kwargs)
+ self._extensions = None
+
+ @abc.abstractmethod
+ def _list_extensions(self):
+ return []
+
+ def has_extension(self, alias):
+ """Check if specific extension is present."""
+ if self._extensions is None:
+ self._extensions = set(self._list_extensions())
+ return alias in self._extensions
diff --git a/heat/engine/clients/os/aodh.py b/heat/engine/clients/os/aodh.py
index f734f8778..e8aab0957 100644
--- a/heat/engine/clients/os/aodh.py
+++ b/heat/engine/clients/os/aodh.py
@@ -13,6 +13,7 @@
from aodhclient import client as ac
from aodhclient import exceptions
+from oslo_config import cfg
from heat.engine.clients import client_plugin
@@ -37,6 +38,7 @@ class AodhClientPlugin(client_plugin.ClientPlugin):
session=self.context.keystone_session,
interface=interface,
service_type=self.ALARMING,
+ connect_retries=cfg.CONF.client_retry_limit,
region_name=self._get_region_name())
def is_not_found(self, ex):
diff --git a/heat/engine/clients/os/barbican.py b/heat/engine/clients/os/barbican.py
index 9e4b66e3b..0ddbe966f 100644
--- a/heat/engine/clients/os/barbican.py
+++ b/heat/engine/clients/os/barbican.py
@@ -14,6 +14,7 @@
from barbicanclient import exceptions
from barbicanclient.v1 import client as barbican_client
from barbicanclient.v1 import containers
+from oslo_config import cfg
from oslo_log import log as logging
from heat.common import exception
@@ -35,6 +36,7 @@ class BarbicanClientPlugin(client_plugin.ClientPlugin):
session=self.context.keystone_session,
service_type=self.KEY_MANAGER,
interface=interface,
+ connect_retries=cfg.CONF.client_retry_limit,
region_name=self._get_region_name())
return client
diff --git a/heat/engine/clients/os/blazar.py b/heat/engine/clients/os/blazar.py
index 099c6af5b..9d1d650bf 100644
--- a/heat/engine/clients/os/blazar.py
+++ b/heat/engine/clients/os/blazar.py
@@ -13,8 +13,11 @@
from blazarclient import client as blazar_client
from blazarclient import exception as client_exception
+from oslo_config import cfg
+from heat.common import exception
from heat.engine.clients import client_plugin
+from heat.engine import constraints
CLIENT_NAME = 'blazar'
@@ -30,6 +33,7 @@ class BlazarClientPlugin(client_plugin.ClientPlugin):
'service_type': self.RESERVATION,
'interface': interface,
'region_name': self._get_region_name(),
+ 'connect_retries': cfg.CONF.client_retry_limit
}
client = blazar_client.Client(**args)
@@ -56,3 +60,16 @@ class BlazarClientPlugin(client_plugin.ClientPlugin):
def get_host(self, id):
return self.client().host.get(id)
+
+
+class BlazarBaseConstraint(constraints.BaseCustomConstraint):
+
+ resource_client_name = CLIENT_NAME
+
+
+class ReservationConstraint(BlazarBaseConstraint):
+ expected_exceptions = (
+ exception.EntityNotFound,
+ client_exception.BlazarClientException,)
+
+ resource_getter_name = 'get_lease'
diff --git a/heat/engine/clients/os/cinder.py b/heat/engine/clients/os/cinder.py
index 02903c9ce..7d4c37a46 100644
--- a/heat/engine/clients/os/cinder.py
+++ b/heat/engine/clients/os/cinder.py
@@ -14,6 +14,7 @@
from cinderclient import client as cc
from cinderclient import exceptions
from keystoneauth1 import exceptions as ks_exceptions
+from oslo_config import cfg
from oslo_log import log as logging
from heat.common import exception
@@ -28,7 +29,8 @@ LOG = logging.getLogger(__name__)
CLIENT_NAME = 'cinder'
-class CinderClientPlugin(client_plugin.ClientPlugin):
+class CinderClientPlugin(os_client.ExtensionMixin,
+ client_plugin.ClientPlugin):
exceptions_module = exceptions
@@ -62,6 +64,7 @@ class CinderClientPlugin(client_plugin.ClientPlugin):
'interface': self.interface,
'service_type': self.service_type,
'region_name': self._get_region_name(),
+ 'connect_retries': cfg.CONF.client_retry_limit,
'http_log_debug': self._get_client_option(CLIENT_NAME,
'http_log_debug')
}
@@ -74,10 +77,6 @@ class CinderClientPlugin(client_plugin.ClientPlugin):
extensions = self.client().list_extensions.show_all()
return set(extension.alias for extension in extensions)
- def has_extension(self, alias):
- """Check if specific extension is present."""
- return alias in self._list_extensions()
-
def get_volume(self, volume):
try:
return self.client().volumes.get(volume)
diff --git a/heat/engine/clients/os/designate.py b/heat/engine/clients/os/designate.py
index 3989e98b5..05c4c7da4 100644
--- a/heat/engine/clients/os/designate.py
+++ b/heat/engine/clients/os/designate.py
@@ -13,8 +13,6 @@
from designateclient import client
from designateclient import exceptions
-from designateclient.v1 import domains
-from designateclient.v1 import records
from heat.common import exception as heat_exception
from heat.engine.clients import client_plugin
@@ -29,13 +27,9 @@ class DesignateClientPlugin(client_plugin.ClientPlugin):
service_types = [DNS] = ['dns']
- supported_versions = [V1, V2] = ['1', '2']
-
- default_version = V1
-
- def _create(self, version=default_version):
+ def _create(self):
endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type')
- return client.Client(version=version,
+ return client.Client(version='2',
session=self.context.keystone_session,
endpoint_type=endpoint_type,
service_type=self.DNS,
@@ -44,20 +38,8 @@ class DesignateClientPlugin(client_plugin.ClientPlugin):
def is_not_found(self, ex):
return isinstance(ex, exceptions.NotFound)
- def get_domain_id(self, domain_id_or_name):
- try:
- domain_obj = self.client().domains.get(domain_id_or_name)
- return domain_obj.id
- except exceptions.NotFound:
- for domain in self.client().domains.list():
- if domain.name == domain_id_or_name:
- return domain.id
-
- raise heat_exception.EntityNotFound(entity='Designate Domain',
- name=domain_id_or_name)
-
def get_zone_id(self, zone_id_or_name):
- client = self.client(version=self.V2)
+ client = self.client()
try:
zone_obj = client.zones.get(zone_id_or_name)
return zone_obj['id']
@@ -69,51 +51,6 @@ class DesignateClientPlugin(client_plugin.ClientPlugin):
raise heat_exception.EntityNotFound(entity='Designate Zone',
name=zone_id_or_name)
- def domain_create(self, **kwargs):
- domain = domains.Domain(**kwargs)
- return self.client().domains.create(domain)
-
- def domain_update(self, **kwargs):
- # Designate mandates to pass the Domain object with updated properties
- domain = self.client().domains.get(kwargs['id'])
- for key in kwargs.keys():
- setattr(domain, key, kwargs[key])
-
- return self.client().domains.update(domain)
-
- def record_create(self, **kwargs):
- domain_id = self.get_domain_id(kwargs.pop('domain'))
- record = records.Record(**kwargs)
- return self.client().records.create(domain_id, record)
-
- def record_update(self, **kwargs):
- # Designate mandates to pass the Record object with updated properties
- domain_id = self.get_domain_id(kwargs.pop('domain'))
- record = self.client().records.get(domain_id, kwargs['id'])
-
- for key in kwargs.keys():
- setattr(record, key, kwargs[key])
-
- return self.client().records.update(record.domain_id, record)
-
- def record_delete(self, **kwargs):
- try:
- domain_id = self.get_domain_id(kwargs.pop('domain'))
- except heat_exception.EntityNotFound:
- return
- return self.client().records.delete(domain_id,
- kwargs.pop('id'))
-
- def record_show(self, **kwargs):
- domain_id = self.get_domain_id(kwargs.pop('domain'))
- return self.client().records.get(domain_id,
- kwargs.pop('id'))
-
-
-class DesignateDomainConstraint(constraints.BaseCustomConstraint):
- resource_client_name = CLIENT_NAME
- resource_getter_name = 'get_domain_id'
-
class DesignateZoneConstraint(constraints.BaseCustomConstraint):
resource_client_name = CLIENT_NAME
diff --git a/heat/engine/clients/os/glance.py b/heat/engine/clients/os/glance.py
index 59970b4cc..caa306d4f 100644
--- a/heat/engine/clients/os/glance.py
+++ b/heat/engine/clients/os/glance.py
@@ -11,6 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_config import cfg
from oslo_utils import uuidutils
from glanceclient import client as gc
@@ -41,6 +42,7 @@ class GlanceClientPlugin(client_plugin.ClientPlugin):
return gc.Client(version, session=con.keystone_session,
interface=interface,
service_type=self.IMAGE,
+ connect_retries=cfg.CONF.client_retry_limit,
region_name=self._get_region_name())
def _find_with_attr(self, entity, **kwargs):
diff --git a/heat/engine/clients/os/heat_plugin.py b/heat/engine/clients/os/heat_plugin.py
index fb5132893..76cdf990d 100644
--- a/heat/engine/clients/os/heat_plugin.py
+++ b/heat/engine/clients/os/heat_plugin.py
@@ -38,6 +38,7 @@ class HeatClientPlugin(client_plugin.ClientPlugin):
args['username'] = self.context.username
args['password'] = self.context.password
+ args['connect_retries'] = cfg.CONF.client_retry_limit
return hc.Client('1', endpoint_override=endpoint,
session=self.context.keystone_session,
**args)
diff --git a/heat/engine/clients/os/ironic.py b/heat/engine/clients/os/ironic.py
new file mode 100644
index 000000000..59ade7b4b
--- /dev/null
+++ b/heat/engine/clients/os/ironic.py
@@ -0,0 +1,82 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from ironicclient.common.apiclient import exceptions as ic_exc
+from ironicclient.v1 import client as ironic_client
+from oslo_config import cfg
+
+from heat.common import exception
+from heat.engine.clients import client_plugin
+from heat.engine import constraints
+
+CLIENT_NAME = 'ironic'
+
+
+class IronicClientPlugin(client_plugin.ClientPlugin):
+
+ service_types = [BAREMETAL] = ['baremetal']
+ IRONIC_API_VERSION = 1.58
+ max_ironic_api_microversion = cfg.CONF.max_ironic_api_microversion
+ max_microversion = max_ironic_api_microversion if (
+ max_ironic_api_microversion is not None and (
+ IRONIC_API_VERSION > max_ironic_api_microversion)
+ ) else IRONIC_API_VERSION
+
+ def _create(self):
+ interface = self._get_client_option(CLIENT_NAME, 'endpoint_type')
+ args = {
+ 'interface': interface,
+ 'service_type': self.BAREMETAL,
+ 'session': self.context.keystone_session,
+ 'region_name': self._get_region_name(),
+ 'os_ironic_api_version': self.max_microversion
+ }
+ client = ironic_client.Client(**args)
+ return client
+
+ def is_not_found(self, ex):
+ return isinstance(ex, ic_exc.NotFound)
+
+ def is_over_limit(self, ex):
+ return isinstance(ex, ic_exc.RequestEntityTooLarge)
+
+ def is_conflict(self, ex):
+ return isinstance(ex, ic_exc.Conflict)
+
+ def _get_rsrc_name_or_id(self, value, entity, entity_msg):
+ entity_client = getattr(self.client(), entity)
+ try:
+ return entity_client.get(value).uuid
+ except ic_exc.NotFound:
+ # Ironic cli will find the value either is name or id,
+ # so no need to call list() here.
+ raise exception.EntityNotFound(entity=entity_msg,
+ name=value)
+
+ def get_portgroup(self, value):
+ return self._get_rsrc_name_or_id(value, entity='portgroup',
+ entity_msg='PortGroup')
+
+ def get_node(self, value):
+ return self._get_rsrc_name_or_id(value, entity='node',
+ entity_msg='Node')
+
+
+class PortGroupConstraint(constraints.BaseCustomConstraint):
+ resource_client_name = CLIENT_NAME
+ resource_getter_name = 'get_portgroup'
+
+
+class NodeConstraint(constraints.BaseCustomConstraint):
+ resource_client_name = CLIENT_NAME
+ resource_getter_name = 'get_node'
diff --git a/heat/engine/clients/os/keystone/__init__.py b/heat/engine/clients/os/keystone/__init__.py
index 184705338..076a8142a 100644
--- a/heat/engine/clients/os/keystone/__init__.py
+++ b/heat/engine/clients/os/keystone/__init__.py
@@ -11,6 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import re
+
from keystoneauth1 import exceptions as ks_exceptions
from heat.common import exception
@@ -25,7 +27,8 @@ class KeystoneClientPlugin(client_plugin.ClientPlugin):
service_types = [IDENTITY] = ['identity']
def _create(self):
- return hkc.KeystoneClient(self.context)
+ region_name = self._get_region_name()
+ return hkc.KeystoneClient(self.context, region_name)
def is_not_found(self, ex):
return isinstance(ex, (ks_exceptions.NotFound,
@@ -37,26 +40,59 @@ class KeystoneClientPlugin(client_plugin.ClientPlugin):
def is_conflict(self, ex):
return isinstance(ex, ks_exceptions.Conflict)
- def get_role_id(self, role):
+ def parse_entity_with_domain(self, entity_with_domain, entity_type):
+ """Parse keystone entity user/role/project with domain.
+
+ entity_with_domain should be in entity{domain} format.
+
+ Returns a tuple of (entity, domain).
+ """
+ try:
+ match = re.search(r"\{(.*?)\}$", entity_with_domain)
+ if match:
+ entity = entity_with_domain[:match.start()]
+ domain = match.group(1)
+ domain = self.get_domain_id(domain)
+ return (entity, domain)
+ else:
+ return (entity_with_domain, None)
+ except Exception:
+ raise exception.EntityNotFound(entity=entity_type,
+ name=entity_with_domain)
+
+ def get_role_id(self, role, domain=None):
+ if role is None:
+ return None
+
+ if not domain:
+ role, domain = self.parse_entity_with_domain(role, 'KeystoneRole')
+
try:
role_obj = self.client().client.roles.get(role)
return role_obj.id
except ks_exceptions.NotFound:
- role_list = self.client().client.roles.list(name=role)
+ role_list = self.client().client.roles.list(name=role,
+ domain=domain)
for role_obj in role_list:
if role_obj.name == role:
return role_obj.id
raise exception.EntityNotFound(entity='KeystoneRole', name=role)
- def get_project_id(self, project):
+ def get_project_id(self, project, domain=None):
if project is None:
return None
+
+ if not domain:
+ project, domain = self.parse_entity_with_domain(project,
+ 'KeystoneProject')
+
try:
project_obj = self.client().client.projects.get(project)
return project_obj.id
except ks_exceptions.NotFound:
- project_list = self.client().client.projects.list(name=project)
+ project_list = self.client().client.projects.list(name=project,
+ domain=domain)
for project_obj in project_list:
if project_obj.name == project:
return project_obj.id
@@ -78,14 +114,20 @@ class KeystoneClientPlugin(client_plugin.ClientPlugin):
raise exception.EntityNotFound(entity='KeystoneDomain', name=domain)
- def get_group_id(self, group):
+ def get_group_id(self, group, domain=None):
if group is None:
return None
+
+ if not domain:
+ group, domain = self.parse_entity_with_domain(group,
+ 'KeystoneGroup')
+
try:
group_obj = self.client().client.groups.get(group)
return group_obj.id
except ks_exceptions.NotFound:
- group_list = self.client().client.groups.list(name=group)
+ group_list = self.client().client.groups.list(name=group,
+ domain=domain)
for group_obj in group_list:
if group_obj.name == group:
return group_obj.id
@@ -109,18 +151,23 @@ class KeystoneClientPlugin(client_plugin.ClientPlugin):
raise exception.EntityNotFound(entity='KeystoneService',
name=service)
- def get_user_id(self, user):
+ def get_user_id(self, user, domain=None):
if user is None:
return None
+
+ if not domain:
+ user, domain = self.parse_entity_with_domain(user,
+ 'KeystoneUser')
try:
user_obj = self.client().client.users.get(user)
return user_obj.id
except ks_exceptions.NotFound:
- user_list = self.client().client.users.list(name=user)
- for user_obj in user_list:
- if user_obj.name == user:
- return user_obj.id
-
+ try:
+ user_obj = self.client().client.users.find(name=user,
+ domain_id=domain)
+ return user_obj.id
+ except ks_exceptions.NotFound:
+ pass
raise exception.EntityNotFound(entity='KeystoneUser', name=user)
def get_region_id(self, region):
diff --git a/heat/engine/clients/os/keystone/fake_keystoneclient.py b/heat/engine/clients/os/keystone/fake_keystoneclient.py
index 6e594ecfb..9715c952f 100644
--- a/heat/engine/clients/os/keystone/fake_keystoneclient.py
+++ b/heat/engine/clients/os/keystone/fake_keystoneclient.py
@@ -91,6 +91,13 @@ class FakeKeystoneClient(object):
trust_id='atrust',
trustor_user_id=self.user_id)
+ def regenerate_trust_context(self):
+ return context.RequestContext(username=self.username,
+ password=self.password,
+ is_admin=False,
+ trust_id='atrust',
+ trustor_user_id=self.user_id)
+
def delete_trust(self, trust_id):
pass
@@ -121,3 +128,6 @@ class FakeKeystoneClient(object):
def stack_domain_user_token(self, user_id, project_id, password):
return 'adomainusertoken'
+
+ def server_keystone_endpoint_url(self, fallback_endpoint):
+ return fallback_endpoint
diff --git a/heat/engine/clients/os/keystone/heat_keystoneclient.py b/heat/engine/clients/os/keystone/heat_keystoneclient.py
index 56e263d9f..9754491a9 100644
--- a/heat/engine/clients/os/keystone/heat_keystoneclient.py
+++ b/heat/engine/clients/os/keystone/heat_keystoneclient.py
@@ -31,7 +31,7 @@ from heat.common import exception
from heat.common.i18n import _
from heat.common import password_gen
-LOG = logging.getLogger('heat.engine.clients.keystoneclient')
+LOG = logging.getLogger(__name__)
AccessKey = collections.namedtuple('AccessKey', ['id', 'access', 'secret'])
@@ -58,7 +58,7 @@ class KsClientWrapper(object):
directly instantiate instances of this class inside resources themselves.
"""
- def __init__(self, context):
+ def __init__(self, context, region_name):
# If a trust_id is specified in the context, we immediately
# authenticate so we can populate the context with a trust token
# otherwise, we delay client authentication until needed to avoid
@@ -75,6 +75,7 @@ class KsClientWrapper(object):
self._admin_auth = None
self._domain_admin_auth = None
self._domain_admin_client = None
+ self._region_name = region_name
self.session = self.context.keystone_session
self.v3_endpoint = self.context.keystone_v3_endpoint
@@ -123,8 +124,7 @@ class KsClientWrapper(object):
importutils.import_module('keystonemiddleware.auth_token')
auth_region = cfg.CONF.keystone_authtoken.region_name
if not auth_region:
- auth_region = (self.context.region_name or
- cfg.CONF.region_name_for_services)
+ auth_region = self._region_name
return auth_region
@property
@@ -157,12 +157,14 @@ class KsClientWrapper(object):
self._domain_admin_client = kc_v3.Client(
session=self.session,
auth=self.domain_admin_auth,
+ connect_retries=cfg.CONF.client_retry_limit,
region_name=self.auth_region_name)
return self._domain_admin_client
def _v3_client_init(self):
client = kc_v3.Client(session=self.session,
+ connect_retries=cfg.CONF.client_retry_limit,
region_name=self.auth_region_name)
if hasattr(self.context.auth_plugin, 'get_access'):
@@ -186,19 +188,7 @@ class KsClientWrapper(object):
return client
- def create_trust_context(self):
- """Create a trust using the trustor identity in the current context.
-
- The trust is created with the trustee as the heat service user.
-
- If the current context already contains a trust_id, we do nothing
- and return the current context.
-
- Returns a context containing the new trust_id.
- """
- if self.context.trust_id:
- return self.context
-
+ def _create_trust_context(self, trustor_user_id, trustor_proj_id):
# We need the service admin user ID (not name), as the trustor user
# can't lookup the ID in keystoneclient unless they're admin
# workaround this by getting the user_id from admin_client
@@ -209,9 +199,6 @@ class KsClientWrapper(object):
LOG.error("Domain admin client authentication failed")
raise exception.AuthorizationFailure()
- trustor_user_id = self.context.auth_plugin.get_user_id(self.session)
- trustor_proj_id = self.context.auth_plugin.get_project_id(self.session)
-
role_kw = {}
# inherit the roles of the trustor, unless set trusts_delegated_roles
if cfg.CONF.trusts_delegated_roles:
@@ -243,6 +230,23 @@ class KsClientWrapper(object):
trust_context.trustor_user_id = trustor_user_id
return trust_context
+ def create_trust_context(self):
+ """Create a trust using the trustor identity in the current context.
+
+ The trust is created with the trustee as the heat service user.
+
+ If the current context already contains a trust_id, we do nothing
+ and return the current context.
+
+ Returns a context containing the new trust_id.
+ """
+ if self.context.trust_id:
+ return self.context
+
+ trustor_user_id = self.context.auth_plugin.get_user_id(self.session)
+ trustor_proj_id = self.context.auth_plugin.get_project_id(self.session)
+ return self._create_trust_context(trustor_user_id, trustor_proj_id)
+
def delete_trust(self, trust_id):
"""Delete the specified trust."""
try:
@@ -250,6 +254,23 @@ class KsClientWrapper(object):
except (ks_exception.NotFound, ks_exception.Unauthorized):
pass
+ def regenerate_trust_context(self):
+ """Regenerate a trust using the trustor identity of current user_id.
+
+ The trust is created with the trustee as the heat service user.
+
+ Returns a context containing the new trust_id.
+ """
+ old_trust_id = self.context.trust_id
+ trustor_user_id = self.context.auth_plugin.get_user_id(self.session)
+ trustor_proj_id = self.context.auth_plugin.get_project_id(self.session)
+ trust_context = self._create_trust_context(trustor_user_id,
+ trustor_proj_id)
+
+ if old_trust_id:
+ self.delete_trust(old_trust_id)
+ return trust_context
+
def _get_username(self, username):
if(len(username) > 255):
LOG.warning("Truncating the username %s to the last 255 "
@@ -563,6 +584,29 @@ class KsClientWrapper(object):
self._check_stack_domain_user(user_id, project_id, 'enable')
self.domain_admin_client.users.update(user=user_id, enabled=True)
+ def server_keystone_endpoint_url(self, fallback_endpoint):
+ ks_endpoint_type = cfg.CONF.server_keystone_endpoint_type
+ if ((ks_endpoint_type == 'public') or (
+ ks_endpoint_type == 'internal') or
+ (ks_endpoint_type == 'admin')):
+ if (hasattr(self.context, 'auth_plugin') and
+ hasattr(self.context.auth_plugin, 'get_access')):
+ try:
+ auth_ref = self.context.auth_plugin.get_access(
+ self.session)
+ if hasattr(auth_ref, "service_catalog"):
+ unversioned_sc_auth_uri = (
+ auth_ref.service_catalog.get_urls(
+ service_type='identity',
+ interface=ks_endpoint_type))
+ if len(unversioned_sc_auth_uri) > 0:
+ sc_auth_uri = (
+ unversioned_sc_auth_uri[0] + "/v3")
+ return sc_auth_uri
+ except ks_exception.Unauthorized:
+ LOG.error("Keystone client authentication failed")
+ return fallback_endpoint
+
class KeystoneClient(object):
"""Keystone Auth Client.
@@ -571,9 +615,9 @@ class KeystoneClient(object):
needs to be initialized.
"""
- def __new__(cls, context):
+ def __new__(cls, context, region_name=None):
if cfg.CONF.keystone_backend == _default_keystone_backend:
- return KsClientWrapper(context)
+ return KsClientWrapper(context, region_name)
else:
return importutils.import_object(
cfg.CONF.keystone_backend,
diff --git a/heat/engine/clients/os/magnum.py b/heat/engine/clients/os/magnum.py
index 216f4ece9..310b81c8d 100644
--- a/heat/engine/clients/os/magnum.py
+++ b/heat/engine/clients/os/magnum.py
@@ -13,6 +13,7 @@
from magnumclient import exceptions as mc_exc
from magnumclient.v1 import client as magnum_client
+from oslo_config import cfg
from heat.common import exception
from heat.engine.clients import client_plugin
@@ -31,6 +32,7 @@ class MagnumClientPlugin(client_plugin.ClientPlugin):
'interface': interface,
'service_type': self.CONTAINER,
'session': self.context.keystone_session,
+ 'connect_retries': cfg.CONF.client_retry_limit,
'region_name': self._get_region_name()
}
client = magnum_client.Client(**args)
diff --git a/heat/engine/clients/os/manila.py b/heat/engine/clients/os/manila.py
index 15f612e63..43ff43c24 100644
--- a/heat/engine/clients/os/manila.py
+++ b/heat/engine/clients/os/manila.py
@@ -16,8 +16,9 @@ from heat.engine.clients import client_plugin
from heat.engine import constraints
from manilaclient import client as manila_client
from manilaclient import exceptions
+from oslo_config import cfg
-MANILACLIENT_VERSION = "2"
+MANILACLIENT_VERSION = "2.13"
CLIENT_NAME = 'manila'
@@ -25,7 +26,7 @@ class ManilaClientPlugin(client_plugin.ClientPlugin):
exceptions_module = exceptions
- service_types = [SHARE] = ['share']
+ service_types = [SHARE] = ['sharev2']
def _create(self):
endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type')
@@ -33,6 +34,7 @@ class ManilaClientPlugin(client_plugin.ClientPlugin):
'endpoint_type': endpoint_type,
'service_type': self.SHARE,
'session': self.context.keystone_session,
+ 'connect_retries': cfg.CONF.client_retry_limit,
'region_name': self._get_region_name()
}
client = manila_client.Client(MANILACLIENT_VERSION, **args)
diff --git a/heat/engine/clients/os/monasca.py b/heat/engine/clients/os/monasca.py
index 3fb656d32..6dccdfe90 100644
--- a/heat/engine/clients/os/monasca.py
+++ b/heat/engine/clients/os/monasca.py
@@ -11,8 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from monascaclient import client
from monascaclient import exc as monasca_exc
+from monascaclient.v2_0 import client as monasca_client
from heat.common import exception as heat_exc
from heat.engine.clients import client_plugin
@@ -32,9 +32,12 @@ class MonascaClientPlugin(client_plugin.ClientPlugin):
interface = self._get_client_option(CLIENT_NAME, 'endpoint_type')
endpoint = self.url_for(service_type=self.MONITORING,
endpoint_type=interface)
- return client.Client(
- self.VERSION,
+
+ # Directly use v2_0 client to avoid dynamic import in monasca client,
+ # We can switch back once https://review.opendev.org/#/c/700989 fixed.
+ return monasca_client.Client(
session=self.context.keystone_session,
+ service_type='monitoring',
endpoint=endpoint)
def is_not_found(self, ex):
diff --git a/heat/engine/clients/os/neutron/__init__.py b/heat/engine/clients/os/neutron/__init__.py
index 847552673..beb752afe 100644
--- a/heat/engine/clients/os/neutron/__init__.py
+++ b/heat/engine/clients/os/neutron/__init__.py
@@ -14,6 +14,7 @@
from neutronclient.common import exceptions
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as nc
+from oslo_config import cfg
from oslo_utils import uuidutils
from heat.common import exception
@@ -22,7 +23,8 @@ from heat.engine.clients import client_plugin
from heat.engine.clients import os as os_client
-class NeutronClientPlugin(client_plugin.ClientPlugin):
+class NeutronClientPlugin(os_client.ExtensionMixin,
+ client_plugin.ClientPlugin):
exceptions_module = exceptions
@@ -60,7 +62,8 @@ class NeutronClientPlugin(client_plugin.ClientPlugin):
'session': con.keystone_session,
'service_type': self.NETWORK,
'interface': interface,
- 'region_name': self._get_region_name()
+ 'region_name': self._get_region_name(),
+ 'connect_retries': cfg.CONF.client_retry_limit
}
return nc.Client(**args)
@@ -115,10 +118,6 @@ class NeutronClientPlugin(client_plugin.ClientPlugin):
extensions = self.client().list_extensions().get('extensions')
return set(extension.get('alias') for extension in extensions)
- def has_extension(self, alias):
- """Check if specific extension is present."""
- return alias in self._list_extensions()
-
def _resolve(self, props, key, id_key, key_type):
if props.get(key):
props[id_key] = self.find_resourceid_by_name_or_id(key_type,
@@ -168,7 +167,11 @@ class NeutronClientPlugin(client_plugin.ClientPlugin):
seclist.append(sg)
else:
if not all_groups:
- response = self.client().list_security_groups()
+ # filtering by project_id so that if the user
+ # has access to multiple (like admin)
+ # only groups from the token scope are returned
+ response = self.client().list_security_groups(
+ project_id=self.context.project_id)
all_groups = response['security_groups']
same_name_groups = [g for g in all_groups if g['name'] == sg]
groups = [g['id'] for g in same_name_groups]
@@ -177,15 +180,7 @@ class NeutronClientPlugin(client_plugin.ClientPlugin):
elif len(groups) == 1:
seclist.append(groups[0])
else:
- # for admin roles, can get the other users'
- # securityGroups, so we should match the tenant_id with
- # the groups, and return the own one
- own_groups = [g['id'] for g in same_name_groups
- if g['tenant_id'] == self.context.tenant_id]
- if len(own_groups) == 1:
- seclist.append(own_groups[0])
- else:
- raise exception.PhysicalResourceNameAmbiguity(name=sg)
+ raise exception.PhysicalResourceNameAmbiguity(name=sg)
return seclist
def _resolve_resource_path(self, resource):
diff --git a/heat/engine/clients/os/nova.py b/heat/engine/clients/os/nova.py
index d19fa19c3..bb3a8adde 100644
--- a/heat/engine/clients/os/nova.py
+++ b/heat/engine/clients/os/nova.py
@@ -18,6 +18,7 @@ from email.mime import text
import os
import pkgutil
import string
+from urllib import parse as urlparse
from neutronclient.common import exceptions as q_exceptions
from novaclient import api_versions
@@ -27,8 +28,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import netutils
-import six
-from six.moves.urllib import parse as urlparse
import tenacity
from heat.common import exception
@@ -48,7 +47,7 @@ CLIENT_NAME = 'nova'
class NovaClientPlugin(microversion_mixin.MicroversionMixin,
client_plugin.ClientPlugin):
- deferred_server_statuses = ['BUILD',
+ deferred_server_statuses = {'BUILD',
'HARD_REBOOT',
'PASSWORD',
'REBOOT',
@@ -57,7 +56,7 @@ class NovaClientPlugin(microversion_mixin.MicroversionMixin,
'REVERT_RESIZE',
'SHUTOFF',
'SUSPENDED',
- 'VERIFY_RESIZE']
+ 'VERIFY_RESIZE'}
exceptions_module = exceptions
@@ -82,14 +81,13 @@ class NovaClientPlugin(microversion_mixin.MicroversionMixin,
def _get_args(self, version):
endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type')
- extensions = nc.discover_extensions(version)
return {
'session': self.context.keystone_session,
- 'extensions': extensions,
'endpoint_type': endpoint_type,
'service_type': self.COMPUTE,
'region_name': self._get_region_name(),
+ 'connect_retries': cfg.CONF.client_retry_limit,
'http_log_debug': self._get_client_option(CLIENT_NAME,
'http_log_debug')
}
@@ -168,6 +166,11 @@ class NovaClientPlugin(microversion_mixin.MicroversionMixin,
raise
return server
+ def fetch_server_attr(self, server_id, attr):
+ server = self.fetch_server(server_id)
+ fetched_attr = getattr(server, attr, None)
+ return fetched_attr
+
def refresh_server(self, server):
"""Refresh server's attributes.
@@ -221,7 +224,7 @@ class NovaClientPlugin(microversion_mixin.MicroversionMixin,
"""
# not checking with is_uuid_like as most tests use strings e.g. '1234'
- if isinstance(server, six.string_types):
+ if isinstance(server, str):
server = self.fetch_server(server)
if server is None:
return False
@@ -278,20 +281,15 @@ class NovaClientPlugin(microversion_mixin.MicroversionMixin,
return flavor
- def get_host(self, host_name):
- """Get the host id specified by name.
+ def get_host(self, hypervisor_hostname):
+ """Gets list of matching hypervisors by specified name.
- :param host_name: the name of host to find
- :returns: the list of match hosts
- :raises exception.EntityNotFound:
+ :param hypervisor_hostname: the name of host to find
+ :returns: list of matching hypervisor hosts
+ :raises nova client exceptions.NotFound:
"""
- host_list = self.client().hosts.list()
- for host in host_list:
- if host.host_name == host_name and host.service == self.COMPUTE:
- return host
-
- raise exception.EntityNotFound(entity='Host', name=host_name)
+ return self.client().hypervisors.search(hypervisor_hostname)
def get_keypair(self, key_name):
"""Get the public key specified by :key_name:
@@ -307,7 +305,7 @@ class NovaClientPlugin(microversion_mixin.MicroversionMixin,
def build_userdata(self, metadata, userdata=None, instance_user=None,
user_data_format='HEAT_CFNTOOLS'):
- """Build multipart data blob for CloudInit.
+ """Build multipart data blob for CloudInit and Ignition.
Data blob includes user-supplied Metadata, user data, and the required
Heat in-instance configuration.
@@ -329,6 +327,10 @@ class NovaClientPlugin(microversion_mixin.MicroversionMixin,
is_cfntools = user_data_format == 'HEAT_CFNTOOLS'
is_software_config = user_data_format == 'SOFTWARE_CONFIG'
+ if (is_software_config and
+ NovaClientPlugin.is_ignition_format(userdata)):
+ return NovaClientPlugin.build_ignition_data(metadata, userdata)
+
def make_subpart(content, filename, subtype=None):
if subtype is None:
subtype = os.path.splitext(filename)[0]
@@ -427,6 +429,51 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
return mime_blob.as_string()
+ @staticmethod
+ def is_ignition_format(userdata):
+ try:
+ payload = jsonutils.loads(userdata)
+ ig = payload.get("ignition")
+ return True if ig and ig.get("version") else False
+ except Exception:
+ return False
+
+ @staticmethod
+ def build_ignition_data(metadata, userdata):
+ if not metadata:
+ return userdata
+
+ payload = jsonutils.loads(userdata)
+ encoded_metadata = urlparse.quote(jsonutils.dumps(metadata))
+ path_list = ["/var/lib/heat-cfntools/cfn-init-data",
+ "/var/lib/cloud/data/cfn-init-data"]
+ ignition_format_metadata = {
+ "filesystem": "root",
+ "group": {"name": "root"},
+ "path": "",
+ "user": {"name": "root"},
+ "contents": {
+ "source": "data:," + encoded_metadata,
+ "verification": {}},
+ "mode": 0o640
+ }
+
+ for path in path_list:
+ storage = payload.setdefault('storage', {})
+ try:
+ files = storage.setdefault('files', [])
+ except AttributeError:
+ raise ValueError('Ignition "storage" section must be a map')
+ else:
+ try:
+ data = ignition_format_metadata.copy()
+ data["path"] = path
+ files.append(data)
+ except AttributeError:
+ raise ValueError('Ignition "files" section must be a list')
+
+ return jsonutils.dumps(payload)
+
def check_delete_server_complete(self, server_id):
"""Wait for server to disappear from Nova."""
try:
@@ -512,6 +559,10 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
return True
if status == 'VERIFY_RESIZE':
return False
+ task_state_in_nova = getattr(server, 'OS-EXT-STS:task_state', None)
+ # Wait till move out from any resize steps (including resize_finish).
+ if task_state_in_nova is not None and 'resize' in task_state_in_nova:
+ return False
else:
msg = _("Confirm resize for server %s failed") % server_id
raise exception.ResourceUnknownStatus(
@@ -545,12 +596,12 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
def meta_serialize(self, metadata):
"""Serialize non-string metadata values before sending them to Nova."""
- if not isinstance(metadata, collections.Mapping):
+ if not isinstance(metadata, collections.abc.Mapping):
raise exception.StackValidationFailed(message=_(
"nova server metadata needs to be a Map."))
return dict((key, (value if isinstance(value,
- six.string_types)
+ str)
else jsonutils.dumps(value))
) for (key, value) in metadata.items())
@@ -596,7 +647,7 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
"""
nc = self.client
- class ConsoleUrls(collections.Mapping):
+ class ConsoleUrls(collections.abc.Mapping):
def __init__(self, server):
self.console_method = server.get_console_url
self.support_console_types = ['novnc', 'xvpvnc',
@@ -618,7 +669,7 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
except exceptions.UnsupportedConsoleType as ex:
url = ex.message
except Exception as e:
- url = _('Cannot get console url: %s') % six.text_type(e)
+ url = _('Cannot get console url: %s') % str(e)
return url
@@ -637,7 +688,9 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
volume_id=volume_id,
device=device)
except Exception as ex:
- if self.is_client_exception(ex):
+ if self.is_conflict(ex):
+ return False
+ elif self.is_client_exception(ex):
raise exception.Error(_(
"Failed to attach volume %(vol)s to server %(srv)s "
"- %(err)s") % {'vol': volume_id,
@@ -652,12 +705,15 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
try:
self.client().volumes.delete_server_volume(server_id, attach_id)
except Exception as ex:
- if not (self.is_not_found(ex)
- or self.is_bad_request(ex)):
+ if self.is_conflict(ex):
+ return False
+ elif not (self.is_not_found(ex)
+ or self.is_bad_request(ex)):
raise exception.Error(
_("Could not detach attachment %(att)s "
"from server %(srv)s.") % {'srv': server_id,
'att': attach_id})
+ return True
def check_detach_volume_complete(self, server_id, attach_id):
"""Check that nova server lost attachment.
@@ -779,15 +835,6 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
return True
return False
- @os_client.MEMOIZE_EXTENSIONS
- def _list_extensions(self):
- extensions = self.client().list_extensions.show_all()
- return set(extension.alias for extension in extensions)
-
- def has_extension(self, alias):
- """Check if specific extension is present."""
- return alias in self._list_extensions()
-
class NovaBaseConstraint(constraints.BaseCustomConstraint):
@@ -820,4 +867,6 @@ class FlavorConstraint(NovaBaseConstraint):
class HostConstraint(NovaBaseConstraint):
+ expected_exceptions = (exceptions.NotFound,)
+
resource_getter_name = 'get_host'
diff --git a/heat/engine/clients/os/octavia.py b/heat/engine/clients/os/octavia.py
index c865e01ca..d9ec6b8c6 100644
--- a/heat/engine/clients/os/octavia.py
+++ b/heat/engine/clients/os/octavia.py
@@ -78,6 +78,17 @@ class OctaviaClientPlugin(client_plugin.ClientPlugin):
value=value, attr=DEFAULT_FIND_ATTR)
return policy['id']
+ def get_flavor(self, value):
+ flavor = self.client().find(path=constants.BASE_FLAVOR_URL,
+ value=value, attr=DEFAULT_FIND_ATTR)
+ return flavor['id']
+
+ def get_flavorprofile(self, value):
+ flavorprofile = self.client().find(
+ path=constants.BASE_FLAVORPROFILE_URL,
+ value=value, attr=DEFAULT_FIND_ATTR)
+ return flavorprofile['id']
+
class OctaviaConstraint(constraints.BaseCustomConstraint):
@@ -105,3 +116,11 @@ class PoolConstraint(OctaviaConstraint):
class L7PolicyConstraint(OctaviaConstraint):
base_url = constants.BASE_L7POLICY_URL
+
+
+class FlavorConstraint(OctaviaConstraint):
+ base_url = constants.BASE_FLAVOR_URL
+
+
+class FlavorProfileConstraint(OctaviaConstraint):
+ base_url = constants.BASE_FLAVORPROFILE_URL
diff --git a/heat/engine/clients/os/sahara.py b/heat/engine/clients/os/sahara.py
index 0718fc877..a0b8fe1c5 100644
--- a/heat/engine/clients/os/sahara.py
+++ b/heat/engine/clients/os/sahara.py
@@ -13,9 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from oslo_config import cfg
from saharaclient.api import base as sahara_base
from saharaclient import client as sahara_client
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -38,6 +38,7 @@ class SaharaClientPlugin(client_plugin.ClientPlugin):
'endpoint_type': endpoint_type,
'service_type': self.DATA_PROCESSING,
'session': con.keystone_session,
+ 'connect_retries': cfg.CONF.client_retry_limit,
'region_name': self._get_region_name()
}
client = sahara_client.Client('1.1', **args)
@@ -115,7 +116,7 @@ class SaharaClientPlugin(client_plugin.ClientPlugin):
raise exception.Error(
_("Error retrieving %(entity)s list from sahara: "
"%(err)s") % dict(entity=resource_name,
- err=six.text_type(ex)))
+ err=str(ex)))
num_matches = len(obj_list)
if num_matches == 0:
raise exception.EntityNotFound(entity=resource_name or 'entity',
diff --git a/heat/engine/clients/os/senlin.py b/heat/engine/clients/os/senlin.py
index 408cabe29..6b62d01c7 100644
--- a/heat/engine/clients/os/senlin.py
+++ b/heat/engine/clients/os/senlin.py
@@ -50,6 +50,17 @@ class SenlinClientPlugin(sdk_plugin.OpenStackSDKPlugin):
)
return False
+ def cluster_is_active(self, cluster_id):
+ cluster = self.client().get_cluster(cluster_id)
+ if cluster.status == 'ACTIVE':
+ return True
+ elif cluster.status == 'ERROR':
+ raise exception.ResourceInError(
+ status_reason=cluster.status_reason,
+ resource_status=cluster.status,
+ )
+ return False
+
def get_profile_id(self, profile_name):
profile = self.client().get_profile(profile_name)
return profile.id
@@ -72,15 +83,19 @@ class SenlinClientPlugin(sdk_plugin.OpenStackSDKPlugin):
if action['done']:
continue
all_executed = False
- if action['action_id'] is None:
- func = getattr(self.client(), action['func'])
- ret = func(**action['params'])
- if isinstance(ret, dict):
- action['action_id'] = ret['action']
+ if 'action_id' in action:
+ if action['action_id'] is None:
+ func = getattr(self.client(), action['func'])
+ ret = func(**action['params'])
+ if isinstance(ret, dict):
+ action['action_id'] = ret['action']
+ else:
+ action['action_id'] = ret.location.split('/')[-1]
else:
- action['action_id'] = ret.location.split('/')[-1]
+ ret = self.check_action_status(action['action_id'])
+ action['done'] = ret
else:
- ret = self.check_action_status(action['action_id'])
+ ret = self.cluster_is_active(action['cluster_id'])
action['done'] = ret
# Execute these actions one by one.
break
diff --git a/heat/engine/clients/os/swift.py b/heat/engine/clients/os/swift.py
index 1edf378f0..7c05fc7b9 100644
--- a/heat/engine/clients/os/swift.py
+++ b/heat/engine/clients/os/swift.py
@@ -17,10 +17,9 @@ import hashlib
import logging
import random
import time
+from urllib import parse
from oslo_config import cfg
-import six
-from six.moves.urllib import parse
from swiftclient import client as sc
from swiftclient import exceptions
from swiftclient import utils as swiftclient_utils
@@ -98,8 +97,8 @@ class SwiftClientPlugin(client_plugin.ClientPlugin):
if key_header not in self.client().head_account():
self.client().post_account({
key_header: hashlib.sha224(
- six.b(six.text_type(
- random.getrandbits(256)))).hexdigest()[:32]})
+ str(random.getrandbits(256)).encode(
+ "latin-1")).hexdigest()[:32]})
key = self.client().head_account()[key_header]
@@ -173,5 +172,5 @@ class SwiftClientPlugin(client_plugin.ClientPlugin):
'container %(container)s, '
'reason: %(reason)s.') %
{'container': files_container,
- 'reason': six.text_type(cex)})
+ 'reason': str(cex)})
return files
diff --git a/heat/engine/clients/os/trove.py b/heat/engine/clients/os/trove.py
index c645300b7..f818cab9e 100644
--- a/heat/engine/clients/os/trove.py
+++ b/heat/engine/clients/os/trove.py
@@ -11,6 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_config import cfg
from troveclient import client as tc
from troveclient import exceptions
@@ -36,6 +37,7 @@ class TroveClientPlugin(client_plugin.ClientPlugin):
'endpoint_type': endpoint_type,
'service_type': self.DATABASE,
'session': con.keystone_session,
+ 'retries': cfg.CONF.client_retry_limit,
'region_name': self._get_region_name()
}
diff --git a/heat/engine/clients/os/vitrage.py b/heat/engine/clients/os/vitrage.py
new file mode 100644
index 000000000..c51b5f2f2
--- /dev/null
+++ b/heat/engine/clients/os/vitrage.py
@@ -0,0 +1,30 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.engine.clients import client_plugin
+from oslo_log import log as logging
+from vitrageclient import client as vitrage_client
+
+LOG = logging.getLogger(__name__)
+
+CLIENT_NAME = 'vitrage'
+
+
+class VitrageClientPlugin(client_plugin.ClientPlugin):
+
+ exceptions_module = None
+
+ service_types = [RCA] = ['rca']
+
+ def _create(self):
+ return vitrage_client.Client('1', self.context.keystone_session)
diff --git a/heat/engine/clients/os/zaqar.py b/heat/engine/clients/os/zaqar.py
index a1b01f9a8..53b0a362b 100644
--- a/heat/engine/clients/os/zaqar.py
+++ b/heat/engine/clients/os/zaqar.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from oslo_log import log as logging
from zaqarclient.queues.v2 import client as zaqarclient
from zaqarclient.transport import errors as zaqar_errors
@@ -75,7 +73,7 @@ class ZaqarClientPlugin(client_plugin.ClientPlugin):
return isinstance(ex, zaqar_errors.ResourceNotFound)
def get_queue(self, queue_name):
- if not isinstance(queue_name, six.string_types):
+ if not isinstance(queue_name, str):
raise TypeError(_('Queue name must be a string'))
if not (0 < len(queue_name) <= 64):
raise ValueError(_('Queue name length must be 1-64'))
diff --git a/heat/engine/clients/os/zun.py b/heat/engine/clients/os/zun.py
index 1e1f2ca96..794cea9b8 100644
--- a/heat/engine/clients/os/zun.py
+++ b/heat/engine/clients/os/zun.py
@@ -28,9 +28,9 @@ class ZunClientPlugin(client_plugin.ClientPlugin):
default_version = '1.12'
supported_versions = [
- V1_12, V1_18
+ V1_12, V1_18, V1_36,
] = [
- '1.12', '1.18'
+ '1.12', '1.18', '1.36',
]
def _create(self, version=None):
diff --git a/heat/engine/conditions.py b/heat/engine/conditions.py
index cc18fa9b6..73e51f485 100644
--- a/heat/engine/conditions.py
+++ b/heat/engine/conditions.py
@@ -13,8 +13,6 @@
import collections
-import six
-
from heat.common.i18n import _
from heat.common import exception
@@ -26,12 +24,12 @@ _in_progress = object()
class Conditions(object):
def __init__(self, conditions_dict):
- assert isinstance(conditions_dict, collections.Mapping)
+ assert isinstance(conditions_dict, collections.abc.Mapping)
self._conditions = conditions_dict
self._resolved = {}
def validate(self):
- for name, cond in six.iteritems(self._conditions):
+ for name, cond in self._conditions.items():
self._check_condition_type(name, cond)
function.validate(cond)
@@ -56,7 +54,7 @@ class Conditions(object):
if isinstance(condition_name, bool):
return condition_name
- if not (isinstance(condition_name, six.string_types) and
+ if not (isinstance(condition_name, str) and
condition_name in self._conditions):
raise ValueError(_('Invalid condition "%s"') % condition_name)
diff --git a/heat/engine/constraint/common_constraints.py b/heat/engine/constraint/common_constraints.py
index 653a034c3..cef257141 100644
--- a/heat/engine/constraint/common_constraints.py
+++ b/heat/engine/constraint/common_constraints.py
@@ -15,7 +15,6 @@ import croniter
import eventlet
import netaddr
import pytz
-import six
from neutron_lib.api import validators
from oslo_utils import timeutils
@@ -35,7 +34,7 @@ class IPConstraint(constraints.BaseCustomConstraint):
def validate(self, value, context, template=None):
self._error_message = 'Invalid IP address'
- if not isinstance(value, six.string_types):
+ if not isinstance(value, str):
return False
msg = validators.validate_ip_address(value)
if msg is not None:
@@ -59,7 +58,7 @@ class DNSNameConstraint(constraints.BaseCustomConstraint):
self._error_message = ("'%(value)s' not in valid format."
" Reason: %(reason)s") % {
'value': value,
- 'reason': six.text_type(ex)}
+ 'reason': str(ex)}
return False
return True
@@ -114,7 +113,7 @@ class CIDRConstraint(constraints.BaseCustomConstraint):
return False
return True
except Exception as ex:
- self._error_message = 'Invalid net cidr %s ' % six.text_type(ex)
+ self._error_message = 'Invalid net cidr %s ' % str(ex)
return False
@@ -158,7 +157,7 @@ class CRONExpressionConstraint(constraints.BaseCustomConstraint):
return True
except Exception as ex:
self._error_message = _(
- 'Invalid CRON expression: %s') % six.text_type(ex)
+ 'Invalid CRON expression: %s') % str(ex)
return False
@@ -172,7 +171,7 @@ class TimezoneConstraint(constraints.BaseCustomConstraint):
return True
except Exception as ex:
self._error_message = _(
- 'Invalid timezone: %s') % six.text_type(ex)
+ 'Invalid timezone: %s') % str(ex)
return False
@@ -190,5 +189,5 @@ class ExpirationConstraint(constraints.BaseCustomConstraint):
except Exception as ex:
self._error_message = (_(
'Expiration {0} is invalid: {1}').format(value,
- six.text_type(ex)))
+ str(ex)))
return False
diff --git a/heat/engine/constraints.py b/heat/engine/constraints.py
index 6e068448d..dfed2c969 100644
--- a/heat/engine/constraints.py
+++ b/heat/engine/constraints.py
@@ -21,7 +21,6 @@ from oslo_config import cfg
from oslo_log import log
from oslo_utils import reflection
from oslo_utils import strutils
-import six
from heat.common import cache
from heat.common import exception
@@ -37,7 +36,7 @@ MEMOIZE = core.get_memoization_decorator(conf=cfg.CONF,
LOG = log.getLogger(__name__)
-class Schema(collections.Mapping):
+class Schema(collections.abc.Mapping):
"""Schema base class for validating properties or parameters.
Schema objects are serializable to dictionaries following a superset of
@@ -149,7 +148,7 @@ class Schema(collections.Mapping):
if isinstance(self.schema, AnyIndexDict):
self.schema.value.validate(context)
else:
- for nested_schema in six.itervalues(self.schema):
+ for nested_schema in self.schema.values():
nested_schema.validate(context)
def _validate_default(self, context):
@@ -195,9 +194,9 @@ class Schema(collections.Mapping):
elif self.type == self.NUMBER:
return Schema.str_to_num(value)
elif self.type == self.STRING:
- return six.text_type(value)
+ return str(value)
elif self.type == self.BOOLEAN:
- return strutils.bool_from_string(six.text_type(value),
+ return strutils.bool_from_string(str(value),
strict=True)
except ValueError:
raise ValueError(_('Value "%(val)s" is invalid for data type '
@@ -215,7 +214,7 @@ class Schema(collections.Mapping):
if type(constraint) not in skipped:
constraint.validate(value, self, context)
except ValueError as ex:
- raise exception.StackValidationFailed(message=six.text_type(ex))
+ raise exception.StackValidationFailed(message=str(ex))
def __getitem__(self, key):
if key == self.TYPE:
@@ -252,7 +251,7 @@ class Schema(collections.Mapping):
return self._len
-class AnyIndexDict(collections.Mapping):
+class AnyIndexDict(collections.abc.Mapping):
"""A Mapping that returns the same value for any integer index.
Used for storing the schema for a list. When converted to a dictionary,
@@ -265,7 +264,7 @@ class AnyIndexDict(collections.Mapping):
self.value = value
def __getitem__(self, key):
- if key != self.ANYTHING and not isinstance(key, six.integer_types):
+ if key != self.ANYTHING and not isinstance(key, int):
raise KeyError(_('Invalid key %s') % key)
return self.value
@@ -277,8 +276,7 @@ class AnyIndexDict(collections.Mapping):
return 1
-@six.python_2_unicode_compatible
-class Constraint(collections.Mapping):
+class Constraint(collections.abc.Mapping):
"""Parent class for constraints on allowable values for a Property.
Constraints are serializable to dictionaries following the HOT input
@@ -353,7 +351,7 @@ class Range(Constraint):
self.max = max
for param in (min, max):
- if not isinstance(param, (float, six.integer_types, type(None))):
+ if not isinstance(param, (float, int, type(None))):
raise exception.InvalidSchemaError(
message=_('min/max must be numeric'))
@@ -422,7 +420,7 @@ class Length(Range):
super(Length, self).__init__(min, max, description)
for param in (min, max):
- if not isinstance(param, (six.integer_types, type(None))):
+ if not isinstance(param, (int, type(None))):
msg = _('min/max length must be integral')
raise exception.InvalidSchemaError(message=msg)
@@ -471,7 +469,7 @@ class Modulo(Constraint):
'an offset value specified.'))
for param in (step, offset):
- if not isinstance(param, (float, six.integer_types, type(None))):
+ if not isinstance(param, (float, int, type(None))):
raise exception.InvalidSchemaError(
message=_('step/offset must be numeric'))
@@ -542,8 +540,8 @@ class AllowedValues(Constraint):
def __init__(self, allowed, description=None):
super(AllowedValues, self).__init__(description)
- if (not isinstance(allowed, collections.Sequence) or
- isinstance(allowed, six.string_types)):
+ if (not isinstance(allowed, collections.abc.Sequence) or
+ isinstance(allowed, str)):
raise exception.InvalidSchemaError(
message=_('AllowedValues must be a list'))
self.allowed = tuple(allowed)
@@ -589,7 +587,7 @@ class AllowedPattern(Constraint):
def __init__(self, pattern, description=None):
super(AllowedPattern, self).__init__(description)
- if not isinstance(pattern, six.string_types):
+ if not isinstance(pattern, str):
raise exception.InvalidSchemaError(
message=_('AllowedPattern must be a string'))
self.pattern = pattern
@@ -701,13 +699,13 @@ class BaseCustomConstraint(object):
try:
self.validate_with_client(context.clients, value_to_validate)
except self.expected_exceptions as e:
- self._error_message = six.text_type(e)
+ self._error_message = str(e)
return False
else:
return True
class_name = reflection.get_class_name(self, fully_qualified=False)
cache_value_prefix = "{0}:{1}".format(class_name,
- six.text_type(context.tenant_id))
+ str(context.tenant_id))
validation_result = check_cache_or_validate_value(
cache_value_prefix, value)
# if validation failed we should not store it in cache
diff --git a/heat/engine/dependencies.py b/heat/engine/dependencies.py
index 6015670a2..b61dd578c 100644
--- a/heat/engine/dependencies.py
+++ b/heat/engine/dependencies.py
@@ -14,19 +14,14 @@
import collections
import itertools
-import six
-
from heat.common import exception
from heat.common.i18n import _
-from heat.common.i18n import repr_wrapper
class CircularDependencyException(exception.HeatException):
msg_fmt = _("Circular Dependency Found: %(cycle)s")
-@repr_wrapper
-@six.python_2_unicode_compatible
class Node(object):
"""A node in a dependency graph."""
@@ -94,15 +89,14 @@ class Node(object):
def __str__(self):
"""Return a human-readable string representation of the node."""
- text = '{%s}' % ', '.join(six.text_type(n) for n in self)
- return six.text_type(text)
+ text = '{%s}' % ', '.join(str(n) for n in self)
+ return str(text)
def __repr__(self):
"""Return a string representation of the node."""
return repr(self.require)
-@six.python_2_unicode_compatible
class Graph(collections.defaultdict):
"""A mutable mapping of objects to nodes in a dependency graph."""
@@ -134,7 +128,7 @@ class Graph(collections.defaultdict):
for rqd in node:
yield (rqr, rqd)
return itertools.chain.from_iterable(outgoing_edges(*i)
- for i in six.iteritems(self))
+ for i in self.items())
def __delitem__(self, key):
"""Delete the node given by the specified key from the graph."""
@@ -149,10 +143,10 @@ class Graph(collections.defaultdict):
def __str__(self):
"""Convert the graph to a human-readable string."""
- pairs = ('%s: %s' % (six.text_type(k), six.text_type(v))
- for k, v in six.iteritems(self))
+ pairs = ('%s: %s' % (str(k), str(v))
+ for k, v in self.items())
text = '{%s}' % ', '.join(pairs)
- return six.text_type(text)
+ return str(text)
@staticmethod
def toposort(graph):
@@ -160,8 +154,8 @@ class Graph(collections.defaultdict):
This is a destructive operation for the graph.
"""
- for iteration in six.moves.xrange(len(graph)):
- for key, node in six.iteritems(graph):
+ for iteration in range(len(graph)):
+ for key, node in graph.items():
if not node:
yield key
del graph[key]
@@ -169,11 +163,9 @@ class Graph(collections.defaultdict):
else:
# There are nodes remaining, but none without
# dependencies: a cycle
- raise CircularDependencyException(cycle=six.text_type(graph))
+ raise CircularDependencyException(cycle=str(graph))
-@repr_wrapper
-@six.python_2_unicode_compatible
class Dependencies(object):
"""Helper class for calculating a dependency graph."""
@@ -230,8 +222,8 @@ class Dependencies(object):
return itertools.chain([(rqr, key)], get_edges(rqr))
# Get the edge list for each node that requires the current node
- edge_lists = six.moves.map(requirer_edges,
- self._graph[key].required_by())
+ edge_lists = map(requirer_edges,
+ self._graph[key].required_by())
# Combine the lists into one long list
return itertools.chain.from_iterable(edge_lists)
@@ -266,7 +258,7 @@ class Dependencies(object):
def __str__(self):
"""Return a human-readable string repr of the dependency graph."""
- return six.text_type(self._graph)
+ return str(self._graph)
def __repr__(self):
"""Return a consistent string representation of the object."""
diff --git a/heat/engine/environment.py b/heat/engine/environment.py
index 1efb9f374..7ff4fcca9 100644
--- a/heat/engine/environment.py
+++ b/heat/engine/environment.py
@@ -21,7 +21,6 @@ import weakref
from oslo_config import cfg
from oslo_log import log
from oslo_utils import fnmatch
-import six
from heat.common import environment_format as env_fmt
from heat.common import exception
@@ -54,9 +53,9 @@ def valid_restricted_actions(action):
def is_hook_definition(key, value):
is_valid_hook = False
if key == 'hooks':
- if isinstance(value, six.string_types):
+ if isinstance(value, str):
is_valid_hook = valid_hook_type(value)
- elif isinstance(value, collections.Sequence):
+ elif isinstance(value, collections.abc.Sequence):
is_valid_hook = all(valid_hook_type(hook) for hook in value)
if not is_valid_hook:
@@ -71,9 +70,9 @@ def is_hook_definition(key, value):
def is_valid_restricted_action(key, value):
valid_action = False
if key == 'restricted_actions':
- if isinstance(value, six.string_types):
+ if isinstance(value, str):
valid_action = valid_restricted_actions(value)
- elif isinstance(value, collections.Sequence):
+ elif isinstance(value, collections.abc.Sequence):
valid_action = all(valid_restricted_actions(
action) for action in value)
@@ -101,7 +100,7 @@ class ResourceInfo(object):
if name.endswith(('.yaml', '.template')):
# a template url for the resource "Type"
klass = TemplateResourceInfo
- elif not isinstance(value, six.string_types):
+ elif not isinstance(value, str):
klass = ClassResourceInfo
elif value.endswith(('.yaml', '.template')):
# a registered template
@@ -344,10 +343,8 @@ class ResourceRegistry(object):
if info.value.support_status.message is not None:
details = {
'name': info.name,
- 'status': six.text_type(
- info.value.support_status.status),
- 'message': six.text_type(
- info.value.support_status.message)
+ 'status': str(info.value.support_status.status),
+ 'message': str(info.value.support_status.message)
}
LOG.warning('%(name)s is %(status)s. %(message)s',
details)
@@ -364,7 +361,7 @@ class ResourceRegistry(object):
if show_all or isinstance(registry[name], TemplateResourceInfo):
msg = ('%(p)sRegistered: %(t)s' %
{'p': prefix,
- 't': six.text_type(registry[name])})
+ 't': str(registry[name])})
LOG.info(msg)
def remove_item(self, info):
@@ -394,13 +391,13 @@ class ResourceRegistry(object):
"""
ress = self._registry['resources']
restricted_actions = set()
- for name_pattern, resource in six.iteritems(ress):
+ for name_pattern, resource in ress.items():
if fnmatch.fnmatchcase(resource_name, name_pattern):
if 'restricted_actions' in resource:
actions = resource['restricted_actions']
- if isinstance(actions, six.string_types):
+ if isinstance(actions, str):
restricted_actions.add(actions)
- elif isinstance(actions, collections.Sequence):
+ elif isinstance(actions, collections.abc.Sequence):
restricted_actions |= set(actions)
return restricted_actions
@@ -429,14 +426,14 @@ class ResourceRegistry(object):
everything.
"""
ress = self._registry['resources']
- for name_pattern, resource in six.iteritems(ress):
+ for name_pattern, resource in ress.items():
if fnmatch.fnmatchcase(resource_name, name_pattern):
if 'hooks' in resource:
hooks = resource['hooks']
- if isinstance(hooks, six.string_types):
+ if isinstance(hooks, str):
if hook == hooks:
return True
- elif isinstance(hooks, collections.Sequence):
+ elif isinstance(hooks, collections.abc.Sequence):
if hook in hooks:
return True
return False
@@ -444,7 +441,7 @@ class ResourceRegistry(object):
def remove_resources_except(self, resource_name):
ress = self._registry['resources']
new_resources = {}
- for name, res in six.iteritems(ress):
+ for name, res in ress.items():
if fnmatch.fnmatchcase(resource_name, name):
new_resources.update(res)
if resource_name in ress:
@@ -477,7 +474,7 @@ class ResourceRegistry(object):
# handle: "OS::*" -> "Dreamhost::*"
def is_a_glob(resource_type):
return resource_type.endswith('*')
- globs = six.moves.filter(is_a_glob, iter(self._registry))
+ globs = filter(is_a_glob, iter(self._registry))
for pattern in globs:
if self._registry[pattern].matches(resource_type):
yield self._registry[pattern]
@@ -550,7 +547,7 @@ class ResourceRegistry(object):
msg = _('Non-empty resource type is required '
'for resource "%s"') % resource_name
raise exception.StackValidationFailed(message=msg)
- elif not isinstance(resource_type, six.string_types):
+ elif not isinstance(resource_type, str):
msg = _('Resource "%s" type is not a string') % resource_name
raise exception.StackValidationFailed(message=msg)
@@ -558,7 +555,7 @@ class ResourceRegistry(object):
info = self.get_resource_info(resource_type,
resource_name=resource_name)
except exception.EntityNotFound as exc:
- raise exception.StackValidationFailed(message=six.text_type(exc))
+ raise exception.StackValidationFailed(message=str(exc))
return info.get_class_to_instantiate()
@@ -590,55 +587,63 @@ class ResourceRegistry(object):
if support_status is not None and not support.is_valid_status(
support_status):
msg = (_('Invalid support status and should be one of %s') %
- six.text_type(support.SUPPORT_STATUSES))
+ str(support.SUPPORT_STATUSES))
raise exception.Invalid(reason=msg)
- def is_resource(key):
- return isinstance(self._registry[key], (ClassResourceInfo,
- TemplateResourceInfo))
-
- def status_matches(cls):
- return (support_status is None or
- cls.get_class().support_status.status ==
- support_status)
-
- def is_available(cls):
- if cnxt is None:
- return True
-
+ enforcer = policy.ResourceEnforcer()
+ if type_name is not None:
try:
- return cls.get_class().is_service_available(cnxt)[0]
+ name_exp = re.compile(type_name)
except Exception:
+ return []
+ else:
+ name_exp = None
+
+ def matches(name, info):
+ # Only return actual plugins or template resources, not aliases
+ if not isinstance(info, (ClassResourceInfo, TemplateResourceInfo)):
return False
- def not_hidden_matches(cls):
- return cls.get_class().support_status.status != support.HIDDEN
+ # If filtering by name, check for match
+ if name_exp is not None and not name_exp.match(name):
+ return False
- def is_allowed(enforcer, name):
- if cnxt is None:
- return True
- try:
- enforcer.enforce(cnxt, name, is_registered_policy=True)
- except enforcer.exc:
+ rsrc_cls = info.get_class_to_instantiate()
+
+ # Never match hidden resource types
+ if rsrc_cls.support_status.status == support.HIDDEN:
return False
- else:
- return True
- enforcer = policy.ResourceEnforcer()
+ # If filtering by version, check for match
+ if (version is not None and
+ rsrc_cls.support_status.version != version):
+ return False
- def name_matches(name):
- try:
- return type_name is None or re.match(type_name, name)
- except: # noqa
+ # If filtering by support status, check for match
+ if (support_status is not None and
+ rsrc_cls.support_status.status != support_status):
return False
- def version_matches(cls):
- return (version is None or
- cls.get_class().support_status.version == version)
+ if cnxt is not None:
+ # Check for resource policy
+ try:
+ enforcer.enforce(cnxt, name, is_registered_policy=True)
+ except enforcer.exc:
+ return False
+
+ # Check for service availability
+ try:
+ avail, err = rsrc_cls.is_service_available(cnxt)
+ except Exception:
+ avail = False
+ if not avail:
+ return False
+
+ return True
import heat.engine.resource
- def resource_description(name, info, with_description):
+ def resource_description(name, info):
if not with_description:
return name
rsrc_cls = info.get_class()
@@ -649,15 +654,9 @@ class ResourceRegistry(object):
'description': rsrc_cls.getdoc(),
}
- return [resource_description(name, cls, with_description)
- for name, cls in six.iteritems(self._registry)
- if (is_resource(name) and
- name_matches(name) and
- status_matches(cls) and
- is_available(cls) and
- is_allowed(enforcer, name) and
- not_hidden_matches(cls) and
- version_matches(cls))]
+ return [resource_description(name, info)
+ for name, info in self._registry.items()
+ if matches(name, info)]
class Environment(object):
@@ -693,7 +692,7 @@ class Environment(object):
if env_fmt.PARAMETERS in env:
self.params = env[env_fmt.PARAMETERS]
else:
- self.params = dict((k, v) for (k, v) in six.iteritems(env)
+ self.params = dict((k, v) for (k, v) in env.items()
if k not in (env_fmt.PARAMETER_DEFAULTS,
env_fmt.ENCRYPTED_PARAM_NAMES,
env_fmt.EVENT_SINKS,
diff --git a/heat/engine/function.py b/heat/engine/function.py
index 9dfc8d3d5..2169e2194 100644
--- a/heat/engine/function.py
+++ b/heat/engine/function.py
@@ -16,14 +16,11 @@ import collections
import itertools
import weakref
-import six
-
from heat.common import exception
from heat.common.i18n import _
-@six.add_metaclass(abc.ABCMeta)
-class Function(object):
+class Function(metaclass=abc.ABCMeta):
"""Abstract base class for template functions."""
def __init__(self, stack, fn_name, args):
@@ -98,13 +95,13 @@ class Function(object):
return all_dep_attrs(self.args)
def res_dep_attrs(resource_name):
- return six.moves.zip(itertools.repeat(resource_name),
- self.dep_attrs(resource_name))
+ return zip(itertools.repeat(resource_name),
+ self.dep_attrs(resource_name))
resource_names = self.stack.enabled_rsrc_names()
- return itertools.chain.from_iterable(six.moves.map(res_dep_attrs,
- resource_names))
+ return itertools.chain.from_iterable(map(res_dep_attrs,
+ resource_names))
def __reduce__(self):
"""Return a representation of the function suitable for pickling.
@@ -160,8 +157,7 @@ class Function(object):
__hash__ = None
-@six.add_metaclass(abc.ABCMeta)
-class Macro(Function):
+class Macro(Function, metaclass=abc.ABCMeta):
"""Abstract base class for template macros.
A macro differs from a function in that it controls how the template is
@@ -199,7 +195,7 @@ class Macro(Function):
def result(self):
"""Return the resolved result of the macro contents."""
- return resolve(self.parsed)
+ return resolve(self.parsed, nullable=True)
def dependencies(self, path):
return dependencies(self.parsed, '.'.join([path, self.fn_name]))
@@ -254,15 +250,30 @@ class Macro(Function):
return repr(self.parsed)
-def resolve(snippet):
- if isinstance(snippet, Function):
- return snippet.result()
+def _non_null_item(i):
+ k, v = i
+ return v is not Ellipsis
+
- if isinstance(snippet, collections.Mapping):
- return dict((k, resolve(v)) for k, v in snippet.items())
- elif (not isinstance(snippet, six.string_types) and
- isinstance(snippet, collections.Iterable)):
- return [resolve(v) for v in snippet]
+def _non_null_value(v):
+ return v is not Ellipsis
+
+
+def resolve(snippet, nullable=False):
+ if isinstance(snippet, Function):
+ result = snippet.result()
+ if not (nullable or _non_null_value(result)):
+ result = None
+ return result
+
+ if isinstance(snippet, collections.abc.Mapping):
+ return dict(filter(_non_null_item,
+ ((k, resolve(v, nullable=True))
+ for k, v in snippet.items())))
+ elif (not isinstance(snippet, str) and
+ isinstance(snippet, collections.abc.Iterable)):
+ return list(filter(_non_null_value,
+ (resolve(v, nullable=True) for v in snippet)))
return snippet
@@ -270,7 +281,7 @@ def resolve(snippet):
def validate(snippet, path=None):
if path is None:
path = []
- elif isinstance(path, six.string_types):
+ elif isinstance(path, str):
path = [path]
if isinstance(snippet, Function):
@@ -281,12 +292,12 @@ def validate(snippet, path=None):
except Exception as e:
raise exception.StackValidationFailed(
path=path + [snippet.fn_name],
- message=six.text_type(e))
- elif isinstance(snippet, collections.Mapping):
- for k, v in six.iteritems(snippet):
+ message=str(e))
+ elif isinstance(snippet, collections.abc.Mapping):
+ for k, v in snippet.items():
validate(v, path + [k])
- elif (not isinstance(snippet, six.string_types) and
- isinstance(snippet, collections.Iterable)):
+ elif (not isinstance(snippet, str) and
+ isinstance(snippet, collections.abc.Iterable)):
basepath = list(path)
parent = basepath.pop() if basepath else ''
for i, v in enumerate(snippet):
@@ -303,16 +314,16 @@ def dependencies(snippet, path=''):
if isinstance(snippet, Function):
return snippet.dependencies(path)
- elif isinstance(snippet, collections.Mapping):
+ elif isinstance(snippet, collections.abc.Mapping):
def mkpath(key):
- return '.'.join([path, six.text_type(key)])
+ return '.'.join([path, str(key)])
deps = (dependencies(value,
mkpath(key)) for key, value in snippet.items())
return itertools.chain.from_iterable(deps)
- elif (not isinstance(snippet, six.string_types) and
- isinstance(snippet, collections.Iterable)):
+ elif (not isinstance(snippet, str) and
+ isinstance(snippet, collections.abc.Iterable)):
def mkpath(idx):
return ''.join([path, '[%d]' % idx])
@@ -337,11 +348,11 @@ def dep_attrs(snippet, resource_name):
if isinstance(snippet, Function):
return snippet.dep_attrs(resource_name)
- elif isinstance(snippet, collections.Mapping):
+ elif isinstance(snippet, collections.abc.Mapping):
attrs = (dep_attrs(val, resource_name) for val in snippet.values())
return itertools.chain.from_iterable(attrs)
- elif (not isinstance(snippet, six.string_types) and
- isinstance(snippet, collections.Iterable)):
+ elif (not isinstance(snippet, str) and
+ isinstance(snippet, collections.abc.Iterable)):
attrs = (dep_attrs(value, resource_name) for value in snippet)
return itertools.chain.from_iterable(attrs)
return []
@@ -360,11 +371,11 @@ def all_dep_attrs(snippet):
if isinstance(snippet, Function):
return snippet.all_dep_attrs()
- elif isinstance(snippet, collections.Mapping):
+ elif isinstance(snippet, collections.abc.Mapping):
res_attrs = (all_dep_attrs(value) for value in snippet.values())
return itertools.chain.from_iterable(res_attrs)
- elif (not isinstance(snippet, six.string_types) and
- isinstance(snippet, collections.Iterable)):
+ elif (not isinstance(snippet, str) and
+ isinstance(snippet, collections.abc.Iterable)):
res_attrs = (all_dep_attrs(value) for value in snippet)
return itertools.chain.from_iterable(res_attrs)
return []
diff --git a/heat/engine/hot/functions.py b/heat/engine/hot/functions.py
index 713815e89..33b75f4e4 100644
--- a/heat/engine/hot/functions.py
+++ b/heat/engine/hot/functions.py
@@ -12,14 +12,14 @@
# under the License.
import collections
+import functools
import hashlib
import itertools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
-import six
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
import yaql
from yaql.language import exceptions
@@ -76,17 +76,17 @@ class GetParam(function.Function):
raise ValueError(_('Function "%s" must have arguments') %
self.fn_name)
- if isinstance(args, six.string_types):
+ if isinstance(args, str):
param_name = args
path_components = []
- elif isinstance(args, collections.Sequence):
+ elif isinstance(args, collections.abc.Sequence):
param_name = args[0]
path_components = args[1:]
else:
raise TypeError(_('Argument to "%s" must be string or list') %
self.fn_name)
- if not isinstance(param_name, six.string_types):
+ if not isinstance(param_name, str):
raise TypeError(_('Parameter name in "%s" must be string') %
self.fn_name)
@@ -96,16 +96,16 @@ class GetParam(function.Function):
raise exception.UserParameterMissing(key=param_name)
def get_path_component(collection, key):
- if not isinstance(collection, (collections.Mapping,
- collections.Sequence)):
+ if not isinstance(collection, (collections.abc.Mapping,
+ collections.abc.Sequence)):
raise TypeError(_('"%s" can\'t traverse path') % self.fn_name)
- if not isinstance(key, (six.string_types, int)):
+ if not isinstance(key, (str, int)):
raise TypeError(_('Path components in "%s" '
'must be strings') % self.fn_name)
- if isinstance(collection, collections.Sequence
- ) and isinstance(key, six.string_types):
+ if isinstance(collection, collections.abc.Sequence
+ ) and isinstance(key, str):
try:
key = int(key)
except ValueError:
@@ -116,7 +116,7 @@ class GetParam(function.Function):
return collection[key]
try:
- return six.moves.reduce(get_path_component, path_components,
+ return functools.reduce(get_path_component, path_components,
parameter)
except (KeyError, IndexError, TypeError):
return ''
@@ -167,8 +167,8 @@ class GetAttThenSelect(function.Function):
self._path_components) = self._parse_args()
def _parse_args(self):
- if (not isinstance(self.args, collections.Sequence) or
- isinstance(self.args, six.string_types)):
+ if (not isinstance(self.args, collections.abc.Sequence) or
+ isinstance(self.args, str)):
raise TypeError(_('Argument to "%s" must be a list') %
self.fn_name)
@@ -200,7 +200,7 @@ class GetAttThenSelect(function.Function):
attrs = [self._attr_path()]
except Exception as exc:
LOG.debug("Ignoring exception calculating required attributes"
- ": %s %s", type(exc).__name__, six.text_type(exc))
+ ": %s %s", type(exc).__name__, str(exc))
attrs = []
else:
attrs = []
@@ -314,7 +314,7 @@ class GetAttAllAttributes(GetAtt):
'forms: [resource_name] or '
'[resource_name, attribute, (path), ...]'
) % self.fn_name)
- elif isinstance(self.args, collections.Sequence):
+ elif isinstance(self.args, collections.abc.Sequence):
if len(self.args) > 1:
return super(GetAttAllAttributes, self)._parse_args()
else:
@@ -372,12 +372,12 @@ class Replace(function.Function):
self._mapping, self._string = self._parse_args()
if not isinstance(self._mapping,
- (collections.Mapping, function.Function)):
+ (collections.abc.Mapping, function.Function)):
raise TypeError(_('"%s" parameters must be a mapping') %
self.fn_name)
def _parse_args(self):
- if not isinstance(self.args, collections.Mapping):
+ if not isinstance(self.args, collections.abc.Mapping):
raise TypeError(_('Arguments to "%s" must be a map') %
self.fn_name)
@@ -400,13 +400,13 @@ class Replace(function.Function):
return ''
if not isinstance(value,
- (six.string_types, six.integer_types,
+ (str, int,
float, bool)):
raise TypeError(_('"%(name)s" params must be strings or numbers, '
'param %(param)s is not valid') %
{'name': self.fn_name, 'param': param})
- return six.text_type(value)
+ return str(value)
def result(self):
template = function.resolve(self._string)
@@ -415,10 +415,10 @@ class Replace(function.Function):
if self._strict:
unreplaced_keys = set(mapping)
- if not isinstance(template, six.string_types):
+ if not isinstance(template, str):
raise TypeError(_('"%s" template must be a string') % self.fn_name)
- if not isinstance(mapping, collections.Mapping):
+ if not isinstance(mapping, collections.abc.Mapping):
raise TypeError(_('"%s" params must be a map') % self.fn_name)
def replace(strings, keys):
@@ -426,7 +426,7 @@ class Replace(function.Function):
return strings
placeholder = keys[0]
- if not isinstance(placeholder, six.string_types):
+ if not isinstance(placeholder, str):
raise TypeError(_('"%s" param placeholders must be strings') %
self.fn_name)
@@ -490,9 +490,10 @@ class ReplaceJson(Replace):
else:
_raise_empty_param_value_error()
- if not isinstance(value, (six.string_types, six.integer_types,
- float, bool)):
- if isinstance(value, (collections.Mapping, collections.Sequence)):
+ if not isinstance(value, (str, int, float, bool)):
+ if isinstance(
+ value, (collections.abc.Mapping, collections.abc.Sequence)
+ ):
if not self._allow_empty_value and len(value) == 0:
_raise_empty_param_value_error()
try:
@@ -507,7 +508,7 @@ class ReplaceJson(Replace):
raise TypeError(_('"%s" params must be strings, numbers, '
'list or map.') % self.fn_name)
- ret_value = six.text_type(value)
+ ret_value = str(value)
if not self._allow_empty_value and not ret_value:
_raise_empty_param_value_error()
return ret_value
@@ -553,7 +554,7 @@ class GetFile(function.Function):
assert self.files is not None, "No stack definition in Function"
args = function.resolve(self.args)
- if not (isinstance(args, six.string_types)):
+ if not (isinstance(args, str)):
raise TypeError(_('Argument to "%s" must be a string') %
self.fn_name)
@@ -603,19 +604,19 @@ class Join(function.Function):
strings = function.resolve(self._strings)
if strings is None:
strings = []
- if (isinstance(strings, six.string_types) or
- not isinstance(strings, collections.Sequence)):
+ if (isinstance(strings, str) or
+ not isinstance(strings, collections.abc.Sequence)):
raise TypeError(_('"%s" must operate on a list') % self.fn_name)
delim = function.resolve(self._delim)
- if not isinstance(delim, six.string_types):
+ if not isinstance(delim, str):
raise TypeError(_('"%s" delimiter must be a string') %
self.fn_name)
def ensure_string(s):
if s is None:
return ''
- if not isinstance(s, six.string_types):
+ if not isinstance(s, str):
raise TypeError(
_('Items to join must be strings not %s'
) % (repr(s)[:200]))
@@ -668,15 +669,15 @@ class JoinMultiple(function.Function):
strings = []
for jl in r_joinlists:
if jl:
- if (isinstance(jl, six.string_types) or
- not isinstance(jl, collections.Sequence)):
+ if (isinstance(jl, str) or
+ not isinstance(jl, collections.abc.Sequence)):
raise TypeError(_('"%s" must operate on '
'a list') % self.fn_name)
strings += jl
delim = function.resolve(self._delim)
- if not isinstance(delim, six.string_types):
+ if not isinstance(delim, str):
raise TypeError(_('"%s" delimiter must be a string') %
self.fn_name)
@@ -685,9 +686,11 @@ class JoinMultiple(function.Function):
) % (repr(s)[:200])
if s is None:
return ''
- elif isinstance(s, six.string_types):
+ elif isinstance(s, str):
return s
- elif isinstance(s, (collections.Mapping, collections.Sequence)):
+ elif isinstance(
+ s, (collections.abc.Mapping, collections.abc.Sequence)
+ ):
try:
return jsonutils.dumps(s, default=None, sort_keys=True)
except TypeError:
@@ -725,14 +728,14 @@ class MapMerge(function.Function):
def result(self):
args = function.resolve(self.args)
- if not isinstance(args, collections.Sequence):
+ if not isinstance(args, collections.abc.Sequence):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
def ensure_map(m):
if m is None:
return {}
- elif isinstance(m, collections.Mapping):
+ elif isinstance(m, collections.abc.Mapping):
return m
else:
msg = _('Incorrect arguments: Items to merge must be maps.')
@@ -776,7 +779,7 @@ class MapReplace(function.Function):
def ensure_map(m):
if m is None:
return {}
- elif isinstance(m, collections.Mapping):
+ elif isinstance(m, collections.abc.Mapping):
return m
else:
msg = (_('Incorrect arguments: to "%(fn_name)s", arguments '
@@ -801,7 +804,7 @@ class MapReplace(function.Function):
repl_keys = ensure_map(repl_map.get('keys', {}))
repl_values = ensure_map(repl_map.get('values', {}))
ret_map = {}
- for k, v in six.iteritems(in_map):
+ for k, v in in_map.items():
key = repl_keys.get(k)
if key is None:
key = k
@@ -901,7 +904,7 @@ class Repeat(function.Function):
self._parse_args()
def _parse_args(self):
- if not isinstance(self.args, collections.Mapping):
+ if not isinstance(self.args, collections.abc.Mapping):
raise TypeError(_('Arguments to "%s" must be a map') %
self.fn_name)
@@ -923,26 +926,26 @@ class Repeat(function.Function):
super(Repeat, self).validate()
if not isinstance(self._for_each, function.Function):
- if not isinstance(self._for_each, collections.Mapping):
+ if not isinstance(self._for_each, collections.abc.Mapping):
raise TypeError(_('The "for_each" argument to "%s" must '
'contain a map') % self.fn_name)
def _valid_arg(self, arg):
- if not (isinstance(arg, (collections.Sequence,
+ if not (isinstance(arg, (collections.abc.Sequence,
function.Function)) and
- not isinstance(arg, six.string_types)):
+ not isinstance(arg, str)):
raise TypeError(_('The values of the "for_each" argument to '
'"%s" must be lists') % self.fn_name)
def _do_replacement(self, keys, values, template):
- if isinstance(template, six.string_types):
+ if isinstance(template, str):
for (key, value) in zip(keys, values):
template = template.replace(key, value)
return template
- elif isinstance(template, collections.Sequence):
+ elif isinstance(template, collections.abc.Sequence):
return [self._do_replacement(keys, values, elem)
for elem in template]
- elif isinstance(template, collections.Mapping):
+ elif isinstance(template, collections.abc.Mapping):
return dict((self._do_replacement(keys, values, k),
self._do_replacement(keys, values, v))
for (k, v) in template.items())
@@ -951,7 +954,7 @@ class Repeat(function.Function):
def result(self):
for_each = function.resolve(self._for_each)
- keys, lists = six.moves.zip(*for_each.items())
+ keys, lists = zip(*for_each.items())
# use empty list for references(None) else validation will fail
value_lens = []
@@ -970,7 +973,7 @@ class Repeat(function.Function):
'loop.') % self.fn_name)
template = function.resolve(self._template)
- iter_func = itertools.product if self._nested_loop else six.moves.zip
+ iter_func = itertools.product if self._nested_loop else zip
return [self._do_replacement(keys, replacements, template)
for replacements in iter_func(*values)]
@@ -993,10 +996,10 @@ class RepeatWithMap(Repeat):
"""
def _valid_arg(self, arg):
- if not (isinstance(arg, (collections.Sequence,
- collections.Mapping,
+ if not (isinstance(arg, (collections.abc.Sequence,
+ collections.abc.Mapping,
function.Function)) and
- not isinstance(arg, six.string_types)):
+ not isinstance(arg, str)):
raise TypeError(_('The values of the "for_each" argument to '
'"%s" must be lists or maps') % self.fn_name)
@@ -1067,7 +1070,7 @@ class Digest(function.Function):
def validate_usage(self, args):
if not (isinstance(args, list) and
- all([isinstance(a, six.string_types) for a in args])):
+ all([isinstance(a, str) for a in args])):
msg = _('Argument to function "%s" must be a list of strings')
raise TypeError(msg % self.fn_name)
@@ -1075,18 +1078,15 @@ class Digest(function.Function):
msg = _('Function "%s" usage: ["<algorithm>", "<value>"]')
raise ValueError(msg % self.fn_name)
- if six.PY3:
- algorithms = hashlib.algorithms_available
- else:
- algorithms = hashlib.algorithms
+ algorithms = hashlib.algorithms_available
if args[0].lower() not in algorithms:
msg = _('Algorithm must be one of %s')
- raise ValueError(msg % six.text_type(algorithms))
+ raise ValueError(msg % str(algorithms))
def digest(self, algorithm, value):
_hash = hashlib.new(algorithm)
- _hash.update(six.b(value))
+ _hash.update(value.encode('latin-1'))
return _hash.hexdigest()
@@ -1121,7 +1121,7 @@ class StrSplit(function.Function):
'example': example}
self.fn_name = fn_name
- if isinstance(args, (six.string_types, collections.Mapping)):
+ if isinstance(args, (str, collections.abc.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
@@ -1191,7 +1191,7 @@ class Yaql(function.Function):
def __init__(self, stack, fn_name, args):
super(Yaql, self).__init__(stack, fn_name, args)
- if not isinstance(self.args, collections.Mapping):
+ if not isinstance(self.args, collections.abc.Mapping):
raise TypeError(_('Arguments to "%s" must be a map.') %
self.fn_name)
@@ -1214,7 +1214,7 @@ class Yaql(function.Function):
self._parse(self._expression)
def _parse(self, expression):
- if not isinstance(expression, six.string_types):
+ if not isinstance(expression, str):
raise TypeError(_('The "expression" argument to %s must '
'contain a string.') % self.fn_name)
@@ -1278,13 +1278,16 @@ class If(function.Macro):
evaluates to false.
"""
+ def _read_args(self):
+ return self.args
+
def parse_args(self, parse_func):
try:
if (not self.args or
- not isinstance(self.args, collections.Sequence) or
- isinstance(self.args, six.string_types)):
+ not isinstance(self.args, collections.abc.Sequence) or
+ isinstance(self.args, str)):
raise ValueError()
- condition, value_if_true, value_if_false = self.args
+ condition, value_if_true, value_if_false = self._read_args()
except ValueError:
msg = _('Arguments to "%s" must be of the form: '
'[condition_name, value_if_true, value_if_false]')
@@ -1302,6 +1305,40 @@ class If(function.Macro):
return self.template.conditions(self.stack).is_enabled(cond)
+class IfNullable(If):
+ """A function to return corresponding value based on condition evaluation.
+
+ Takes the form::
+
+ if:
+ - <condition_name>
+ - <value_if_true>
+ - <value_if_false>
+
+ The value_if_true to be returned if the specified condition evaluates
+ to true, the value_if_false to be returned if the specified condition
+ evaluates to false.
+
+ If the value_if_false is omitted and the condition is false, the enclosing
+ item (list item, dictionary key/value pair, property definition) will be
+ treated as if it were not mentioned in the template::
+
+ if:
+ - <condition_name>
+ - <value_if_true>
+ """
+
+ def _read_args(self):
+ if not (2 <= len(self.args) <= 3):
+ raise ValueError()
+
+ if len(self.args) == 2:
+ condition, value_if_true = self.args
+ return condition, value_if_true, Ellipsis
+
+ return self.args
+
+
class ConditionBoolean(function.Function):
"""Abstract parent class of boolean condition functions."""
@@ -1310,8 +1347,8 @@ class ConditionBoolean(function.Function):
self._check_args()
def _check_args(self):
- if not (isinstance(self.args, collections.Sequence) and
- not isinstance(self.args, six.string_types)):
+ if not (isinstance(self.args, collections.abc.Sequence) and
+ not isinstance(self.args, str)):
msg = _('Arguments to "%s" must be a list of conditions')
raise ValueError(msg % self.fn_name)
if not self.args or len(self.args) < 2:
@@ -1405,8 +1442,8 @@ class Filter(function.Function):
self._values, self._sequence = self._parse_args()
def _parse_args(self):
- if (not isinstance(self.args, collections.Sequence) or
- isinstance(self.args, six.string_types)):
+ if (not isinstance(self.args, collections.abc.Sequence) or
+ isinstance(self.args, str)):
raise TypeError(_('Argument to "%s" must be a list') %
self.fn_name)
@@ -1467,7 +1504,7 @@ class MakeURL(function.Function):
if arg in args:
if arg == self.QUERY:
if not isinstance(args[arg], (function.Function,
- collections.Mapping)):
+ collections.abc.Mapping)):
raise TypeError(_('The "%(arg)s" argument to '
'"%(fn_name)s" must be a map') %
{'arg': arg,
@@ -1476,7 +1513,7 @@ class MakeURL(function.Function):
elif arg == self.PORT:
port = args[arg]
if not isinstance(port, function.Function):
- if not isinstance(port, six.integer_types):
+ if not isinstance(port, int):
try:
port = int(port)
except ValueError:
@@ -1493,7 +1530,7 @@ class MakeURL(function.Function):
'must be in range 1-65535') % port)
else:
if not isinstance(args[arg], (function.Function,
- six.string_types)):
+ str)):
raise TypeError(_('The "%(arg)s" argument to '
'"%(fn_name)s" must be a string') %
{'arg': arg,
@@ -1502,7 +1539,7 @@ class MakeURL(function.Function):
def validate(self):
super(MakeURL, self).validate()
- if not isinstance(self.args, collections.Mapping):
+ if not isinstance(self.args, collections.abc.Mapping):
raise TypeError(_('The arguments to "%s" must '
'be a map') % self.fn_name)
@@ -1544,7 +1581,7 @@ class MakeURL(function.Function):
port = args.get(self.PORT, '')
if port:
yield ':'
- yield six.text_type(port)
+ yield str(port)
path = urlparse.quote(args.get(self.PATH, ''))
@@ -1584,16 +1621,16 @@ class ListConcat(function.Function):
def result(self):
args = function.resolve(self.args)
- if (isinstance(args, six.string_types) or
- not isinstance(args, collections.Sequence)):
+ if (isinstance(args, str) or
+ not isinstance(args, collections.abc.Sequence)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
def ensure_list(m):
if m is None:
return []
- elif (isinstance(m, collections.Sequence) and
- not isinstance(m, six.string_types)):
+ elif (isinstance(m, collections.abc.Sequence) and
+ not isinstance(m, str)):
return m
else:
msg = _('Incorrect arguments: Items to concat must be lists. '
@@ -1657,7 +1694,7 @@ class Contains(function.Function):
resolved_value = function.resolve(self.value)
resolved_sequence = function.resolve(self.sequence)
- if not isinstance(resolved_sequence, collections.Sequence):
+ if not isinstance(resolved_sequence, collections.abc.Sequence):
raise TypeError(_('Second argument to "%s" should be '
'a sequence.') % self.fn_name)
diff --git a/heat/engine/hot/template.py b/heat/engine/hot/template.py
index f0f46a4ea..ee56a6421 100644
--- a/heat/engine/hot/template.py
+++ b/heat/engine/hot/template.py
@@ -12,8 +12,6 @@
import functools
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine.cfn import functions as cfn_funcs
@@ -159,7 +157,7 @@ class HOTemplate20130523(template_common.CommonTemplate):
if not attrs:
raise exception.StackValidationFailed(message=message)
try:
- for attr, attr_value in six.iteritems(attrs):
+ for attr, attr_value in attrs.items():
if attr not in allowed_keys:
raise KeyError(err_msg % attr)
if sub_section not in attrs:
@@ -183,7 +181,7 @@ class HOTemplate20130523(template_common.CommonTemplate):
for name, attrs in sorted(data.items()):
cfn_object = {}
- for attr, attr_value in six.iteritems(attrs):
+ for attr, attr_value in attrs.items():
cfn_attr = mapping[attr]
if cfn_attr is not None:
cfn_object[cfn_attr] = attr_value
@@ -201,18 +199,18 @@ class HOTemplate20130523(template_common.CommonTemplate):
def get_section_name(self, section):
cfn_to_hot_attrs = dict(
- zip(six.itervalues(self._HOT_TO_CFN_ATTRS),
- six.iterkeys(self._HOT_TO_CFN_ATTRS)))
+ zip(self._HOT_TO_CFN_ATTRS.values(),
+ self._HOT_TO_CFN_ATTRS.keys()))
return cfn_to_hot_attrs.get(section, section)
def param_schemata(self, param_defaults=None):
parameter_section = self.t.get(self.PARAMETERS) or {}
pdefaults = param_defaults or {}
- for name, schema in six.iteritems(parameter_section):
+ for name, schema in parameter_section.items():
if name in pdefaults:
parameter_section[name]['default'] = pdefaults[name]
- params = six.iteritems(parameter_section)
+ params = parameter_section.items()
return dict((name, self.param_schema_class.from_dict(name, schema))
for name, schema in params)
@@ -228,7 +226,7 @@ class HOTemplate20130523(template_common.CommonTemplate):
valid_keys = frozenset(self._RESOURCE_KEYS)
def defns():
- for name, snippet in six.iteritems(resources):
+ for name, snippet in resources.items():
try:
invalid_keys = set(snippet) - valid_keys
if invalid_keys:
@@ -239,7 +237,7 @@ class HOTemplate20130523(template_common.CommonTemplate):
defn_data = dict(self._rsrc_defn_args(stack, name,
snippet))
except (TypeError, ValueError, KeyError) as ex:
- msg = six.text_type(ex)
+ msg = str(ex)
raise exception.StackValidationFailed(message=msg)
defn = rsrc_defn.ResourceDefinition(name, **defn_data)
@@ -250,7 +248,7 @@ class HOTemplate20130523(template_common.CommonTemplate):
enabled = conditions.is_enabled(cond_name)
except ValueError as exc:
path = [self.RESOURCES, name, self.RES_CONDITION]
- message = six.text_type(exc)
+ message = str(exc)
raise exception.StackValidationFailed(path=path,
message=message)
if not enabled:
@@ -490,7 +488,7 @@ class HOTemplate20161014(HOTemplate20160408):
tmpl, template_id, files, env)
self._parser_condition_functions = {}
- for n, f in six.iteritems(self.functions):
+ for n, f in self.functions.items():
if not f == hot_funcs.Removed:
self._parser_condition_functions[n] = function.Invalid
else:
@@ -512,14 +510,14 @@ class HOTemplate20161014(HOTemplate20160408):
yield ('external_id',
self._parse_resource_field(self.RES_EXTERNAL_ID,
- (six.string_types,
+ (str,
function.Function),
'string',
name, data, parse))
yield ('condition',
self._parse_resource_field(self.RES_CONDITION,
- (six.string_types, bool,
+ (str, bool,
function.Function),
'string_or_boolean',
name, data, parse_cond))
@@ -760,3 +758,55 @@ class HOTemplate20180831(HOTemplate20180302):
'yaql': hot_funcs.Yaql,
'contains': hot_funcs.Contains
}
+
+
+class HOTemplate20210416(HOTemplate20180831):
+ functions = {
+ 'get_attr': hot_funcs.GetAttAllAttributes,
+ 'get_file': hot_funcs.GetFile,
+ 'get_param': hot_funcs.GetParam,
+ 'get_resource': hot_funcs.GetResource,
+ 'list_join': hot_funcs.JoinMultiple,
+ 'repeat': hot_funcs.RepeatWithNestedLoop,
+ 'resource_facade': hot_funcs.ResourceFacade,
+ 'str_replace': hot_funcs.ReplaceJson,
+
+ # functions added in 2015-04-30
+ 'digest': hot_funcs.Digest,
+
+ # functions added in 2015-10-15
+ 'str_split': hot_funcs.StrSplit,
+
+ # functions added in 2016-04-08
+ 'map_merge': hot_funcs.MapMerge,
+
+ # functions added in 2016-10-14
+ 'yaql': hot_funcs.Yaql,
+ 'map_replace': hot_funcs.MapReplace,
+ # Modified in 2021-04-16
+ 'if': hot_funcs.IfNullable,
+
+ # functions added in 2017-02-24
+ 'filter': hot_funcs.Filter,
+ 'str_replace_strict': hot_funcs.ReplaceJsonStrict,
+
+ # functions added in 2017-09-01
+ 'make_url': hot_funcs.MakeURL,
+ 'list_concat': hot_funcs.ListConcat,
+ 'str_replace_vstrict': hot_funcs.ReplaceJsonVeryStrict,
+ 'list_concat_unique': hot_funcs.ListConcatUnique,
+ 'contains': hot_funcs.Contains,
+
+ # functions removed from 2015-10-15
+ 'Fn::Select': hot_funcs.Removed,
+
+ # functions removed from 2014-10-16
+ 'Fn::GetAZs': hot_funcs.Removed,
+ 'Fn::Join': hot_funcs.Removed,
+ 'Fn::Split': hot_funcs.Removed,
+ 'Fn::Replace': hot_funcs.Removed,
+ 'Fn::Base64': hot_funcs.Removed,
+ 'Fn::MemberListToMap': hot_funcs.Removed,
+ 'Fn::ResourceFacade': hot_funcs.Removed,
+ 'Ref': hot_funcs.Removed,
+ }
diff --git a/heat/engine/node_data.py b/heat/engine/node_data.py
index 06580cb86..da1c6e636 100644
--- a/heat/engine/node_data.py
+++ b/heat/engine/node_data.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
class NodeData(object):
"""Data about a node in the graph, to be passed along to other nodes."""
@@ -53,8 +51,8 @@ class NodeData(object):
"""Return a dict of all available top-level attribute values."""
attrs = {k: v
for k, v in self._attributes.items()
- if isinstance(k, six.string_types)}
- for v in six.itervalues(attrs):
+ if isinstance(k, str)}
+ for v in attrs.values():
if isinstance(v, Exception):
raise v
return attrs
@@ -69,7 +67,7 @@ class NodeData(object):
def attribute_names(self):
"""Iterate over valid top-level attribute names."""
for key in self._attributes:
- if isinstance(key, six.string_types):
+ if isinstance(key, str):
yield key
else:
yield key[0]
@@ -80,7 +78,7 @@ class NodeData(object):
This is the format that is serialised and stored in the database's
SyncPoints.
"""
- for v in six.itervalues(self._attributes):
+ for v in self._attributes.values():
if isinstance(v, Exception):
raise v
diff --git a/heat/engine/output.py b/heat/engine/output.py
index 1c12232ce..80a180db1 100644
--- a/heat/engine/output.py
+++ b/heat/engine/output.py
@@ -13,7 +13,6 @@
import collections
import copy
-import six
from heat.common import exception
from heat.engine import function
@@ -47,8 +46,7 @@ class OutputDefinition(object):
if self._deps is None:
try:
required_resources = function.dependencies(self._value)
- self._deps = set(six.moves.map(lambda rp: rp.name,
- required_resources))
+ self._deps = set(map(lambda rp: rp.name, required_resources))
except (exception.InvalidTemplateAttribute,
exception.InvalidTemplateReference):
# This output ain't gonna work anyway
@@ -83,7 +81,7 @@ class OutputDefinition(object):
if self._description is None:
return 'No description given'
- return six.text_type(self._description)
+ return str(self._description)
def render_hot(self):
def items():
diff --git a/heat/engine/parameters.py b/heat/engine/parameters.py
index 168ac3e59..cb05c6ed7 100644
--- a/heat/engine/parameters.py
+++ b/heat/engine/parameters.py
@@ -18,7 +18,6 @@ import itertools
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import strutils
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -87,8 +86,7 @@ class Schema(constr.Schema):
message=_('Default must be a comma-delimited list '
'string: %s') % err)
elif self.type == self.LIST and isinstance(self.default, list):
- default_value = [(six.text_type(x))
- for x in self.default]
+ default_value = [(str(x)) for x in self.default]
try:
self.validate_constraints(default_value, context,
[constr.CustomConstraint])
@@ -184,7 +182,6 @@ class Schema(constr.Schema):
return super(Schema, self).__getitem__(key)
-@six.python_2_unicode_compatible
class Parameter(object):
"""A template parameter."""
@@ -247,10 +244,10 @@ class Parameter(object):
else:
raise exception.UserParameterMissing(key=self.name)
except exception.StackValidationFailed as ex:
- msg = err_msg % dict(name=self.name, exp=six.text_type(ex))
+ msg = err_msg % dict(name=self.name, exp=str(ex))
raise exception.StackValidationFailed(message=msg)
except exception.InvalidSchemaError as ex:
- msg = err_msg % dict(name=self.name, exp=six.text_type(ex))
+ msg = err_msg % dict(name=self.name, exp=str(ex))
raise exception.InvalidSchemaError(message=msg)
def value(self):
@@ -302,13 +299,13 @@ class Parameter(object):
@classmethod
def _value_as_text(cls, value):
- return six.text_type(value)
+ return str(value)
def __str__(self):
"""Return a string representation of the parameter."""
value = self.value()
if self.hidden():
- return six.text_type('******')
+ return str('******')
else:
return self._value_as_text(value)
@@ -330,7 +327,7 @@ class NumberParam(Parameter):
try:
Schema.str_to_num(val)
except (ValueError, TypeError) as ex:
- raise exception.StackValidationFailed(message=six.text_type(ex))
+ raise exception.StackValidationFailed(message=str(ex))
self.schema.validate_value(val, context)
def value(self):
@@ -346,7 +343,7 @@ class BooleanParam(Parameter):
try:
strutils.bool_from_string(val, strict=True)
except ValueError as ex:
- raise exception.StackValidationFailed(message=six.text_type(ex))
+ raise exception.StackValidationFailed(message=str(ex))
self.schema.validate_value(val, context)
def value(self):
@@ -372,42 +369,42 @@ class StringParam(Parameter):
class ParsedParameter(Parameter):
"""A template parameter with cached parsed value."""
- __slots__ = ('parsed',)
+ __slots__ = ('_parsed',)
def __init__(self, name, schema, value=None):
super(ParsedParameter, self).__init__(name, schema, value)
- self._update_parsed()
-
- def set_default(self, value):
- super(ParsedParameter, self).set_default(value)
- self._update_parsed()
-
- def _update_parsed(self):
- if self.has_value():
- if self.user_value is not None:
- self.parsed = self.parse(self.user_value)
+ self._parsed = None
+
+ @property
+ def parsed(self):
+ if self._parsed is None:
+ if self.has_value():
+ if self.user_value is not None:
+ self._parsed = self.parse(self.user_value)
+ else:
+ self._parsed = self.parse(self.default())
else:
- self.parsed = self.parse(self.default())
+ self._parsed = self.default_parsed()
+ return self._parsed
-class CommaDelimitedListParam(ParsedParameter, collections.Sequence):
+class CommaDelimitedListParam(ParsedParameter, collections.abc.Sequence):
"""A template parameter of type "CommaDelimitedList"."""
- __slots__ = ('parsed',)
+ __slots__ = tuple()
- def __init__(self, name, schema, value=None):
- self.parsed = []
- super(CommaDelimitedListParam, self).__init__(name, schema, value)
+ def default_parsed(self):
+ return []
def parse(self, value):
# only parse when value is not already a list
if isinstance(value, list):
- return [(six.text_type(x)) for x in value]
+ return [(str(x)) for x in value]
try:
return param_utils.delim_string_to_list(value)
except (KeyError, AttributeError) as err:
message = _('Value must be a comma-delimited list string: %s')
- raise ValueError(message % six.text_type(err))
+ raise ValueError(message % str(err))
return value
def value(self):
@@ -432,23 +429,22 @@ class CommaDelimitedListParam(ParsedParameter, collections.Sequence):
try:
parsed = self.parse(val)
except ValueError as ex:
- raise exception.StackValidationFailed(message=six.text_type(ex))
+ raise exception.StackValidationFailed(message=str(ex))
self.schema.validate_value(parsed, context)
class JsonParam(ParsedParameter):
"""A template parameter who's value is map or list."""
- __slots__ = ('parsed',)
+ __slots__ = tuple()
- def __init__(self, name, schema, value=None):
- self.parsed = {}
- super(JsonParam, self).__init__(name, schema, value)
+ def default_parsed(self):
+ return {}
def parse(self, value):
try:
val = value
- if not isinstance(val, six.string_types):
+ if not isinstance(val, str):
# turn off oslo_serialization's clever to_primitive()
val = jsonutils.dumps(val, default=None)
if val:
@@ -481,12 +477,11 @@ class JsonParam(ParsedParameter):
try:
parsed = self.parse(val)
except ValueError as ex:
- raise exception.StackValidationFailed(message=six.text_type(ex))
+ raise exception.StackValidationFailed(message=str(ex))
self.schema.validate_value(parsed, context)
-@six.add_metaclass(abc.ABCMeta)
-class Parameters(collections.Mapping):
+class Parameters(collections.abc.Mapping, metaclass=abc.ABCMeta):
"""Parameters of a stack.
The parameters of a stack, with type checking, defaults, etc. specified by
@@ -513,7 +508,7 @@ class Parameters(collections.Mapping):
schemata = self.tmpl.param_schemata()
user_parameters = (user_parameter(si) for si in
- six.iteritems(schemata))
+ schemata.items())
pseudo_parameters = self._pseudo_parameters(stack_identifier)
self.params = dict((p.name,
@@ -534,7 +529,7 @@ class Parameters(collections.Mapping):
"""
self._validate_user_parameters()
- for param in six.itervalues(self.params):
+ for param in self.params.values():
param.validate(validate_value, context)
def __contains__(self, key):
@@ -560,7 +555,7 @@ class Parameters(collections.Mapping):
function) and return the resulting dictionary.
"""
return dict((n, func(p))
- for n, p in six.iteritems(self.params) if filter_func(p))
+ for n, p in self.params.items() if filter_func(p))
def set_stack_id(self, stack_identifier):
"""Set the StackId pseudo parameter value."""
diff --git a/heat/engine/plugin_manager.py b/heat/engine/plugin_manager.py
index 8dad27818..cde1591aa 100644
--- a/heat/engine/plugin_manager.py
+++ b/heat/engine/plugin_manager.py
@@ -17,7 +17,6 @@ import sys
from oslo_config import cfg
from oslo_log import log
-import six
from heat.common import plugin_loader
@@ -49,15 +48,14 @@ class PluginManager(object):
'heat.engine')
def modules():
- pkg_modules = six.moves.map(plugin_loader.load_modules,
- packages())
+ pkg_modules = map(plugin_loader.load_modules, packages())
return itertools.chain.from_iterable(pkg_modules)
self.modules = list(modules())
def map_to_modules(self, function):
"""Iterate over the results of calling a function on every module."""
- return six.moves.map(function, self.modules)
+ return map(function, self.modules)
class PluginMapping(object):
@@ -72,7 +70,7 @@ class PluginMapping(object):
mappings provided by that module. Any other arguments passed will be
passed to the mapping functions.
"""
- if isinstance(names, six.string_types):
+ if isinstance(names, str):
names = [names]
self.names = ['%s_mapping' % name for name in names]
@@ -95,7 +93,7 @@ class PluginMapping(object):
'from %(module)s', fmt_data)
raise
else:
- if isinstance(mapping_dict, collections.Mapping):
+ if isinstance(mapping_dict, collections.abc.Mapping):
return mapping_dict
elif mapping_dict is not None:
LOG.error('Invalid type for %(mapping_name)s '
@@ -109,5 +107,5 @@ class PluginMapping(object):
Mappings are returned as a list of (key, value) tuples.
"""
mod_dicts = plugin_manager.map_to_modules(self.load_from_module)
- return itertools.chain.from_iterable(six.iteritems(d) for d
+ return itertools.chain.from_iterable(d.items() for d
in mod_dicts)
diff --git a/heat/engine/properties.py b/heat/engine/properties.py
index 14d688fa1..2d9623baa 100644
--- a/heat/engine/properties.py
+++ b/heat/engine/properties.py
@@ -14,7 +14,6 @@
import collections
from oslo_serialization import jsonutils
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -274,9 +273,9 @@ class Property(object):
def _get_string(self, value):
if value is None:
value = self.has_default() and self.default() or ''
- if not isinstance(value, six.string_types):
+ if not isinstance(value, str):
if isinstance(value, (bool, int)):
- value = six.text_type(value)
+ value = str(value)
else:
raise ValueError(_('Value must be a string; got %r') % value)
return value
@@ -301,29 +300,28 @@ class Property(object):
def _get_map(self, value, validate=False, translation=None):
if value is None:
value = self.default() if self.has_default() else {}
- if not isinstance(value, collections.Mapping):
+ if not isinstance(value, collections.abc.Mapping):
# This is to handle passing Lists via Json parameters exposed
# via a provider resource, in particular lists-of-dicts which
# cannot be handled correctly via comma_delimited_list
if self.schema.allow_conversion:
- if isinstance(value, six.string_types):
+ if isinstance(value, str):
return value
- elif isinstance(value, collections.Sequence):
+ elif isinstance(value, collections.abc.Sequence):
return jsonutils.dumps(value)
raise TypeError(_('"%s" is not a map') % value)
- return dict(self._get_children(six.iteritems(value),
+ return dict(self._get_children(value.items(),
validate=validate,
translation=translation))
def _get_list(self, value, validate=False, translation=None):
if value is None:
value = self.has_default() and self.default() or []
- if self.schema.allow_conversion and isinstance(value,
- six.string_types):
- value = param_utils.delim_string_to_list(value)
- if (not isinstance(value, collections.Sequence) or
- isinstance(value, six.string_types)):
+ if self.schema.allow_conversion and isinstance(value, str):
+ value = param_utils.delim_string_to_list(value)
+ if (not isinstance(value, collections.abc.Sequence) or
+ isinstance(value, str)):
raise TypeError(_('"%s" is not a list') % repr(value))
return [v[1] for v in self._get_children(enumerate(value),
@@ -341,7 +339,7 @@ class Property(object):
value = self.has_default() and self.default() or False
if isinstance(value, bool):
return value
- if isinstance(value, six.string_types):
+ if isinstance(value, str):
normalised = value.lower()
if normalised not in ['true', 'false']:
raise ValueError(_('"%s" is not a valid boolean') % normalised)
@@ -374,10 +372,16 @@ class Property(object):
return _value
-class Properties(collections.Mapping):
+def _default_resolver(d, nullable=False):
+ return d
- def __init__(self, schema, data, resolver=lambda d: d, parent_name=None,
- context=None, section=None, translation=None):
+
+class Properties(collections.abc.Mapping):
+
+ def __init__(self, schema, data, resolver=_default_resolver,
+ parent_name=None,
+ context=None, section=None, translation=None,
+ rsrc_description=None):
self.props = dict((k, Property(s, k, context, path=parent_name))
for k, s in schema.items())
self.resolve = resolver
@@ -387,6 +391,7 @@ class Properties(collections.Mapping):
self.context = context
self.translation = (trans.Translation(properties=self)
if translation is None else translation)
+ self.rsrc_description = rsrc_description or None
def update_translation(self, rules, client_resolve=True,
ignore_resolve_error=False):
@@ -430,7 +435,7 @@ class Properties(collections.Mapping):
else:
path = [key]
raise exception.StackValidationFailed(
- path=path, message=six.text_type(e))
+ path=path, message=str(e))
# are there unimplemented Properties
if not prop.implemented() and key in self.data:
@@ -453,60 +458,82 @@ class Properties(collections.Mapping):
if any(res.action == res.INIT for res in deps):
return True
- def get_user_value(self, key, validate=False):
+ def get_user_value(self, key):
if key not in self:
raise KeyError(_('Invalid Property %s') % key)
prop = self.props[key]
+ value, found = self._resolve_user_value(key, prop, validate=False)
+ return value
+
+ def _resolve_user_value(self, key, prop, validate):
+ """Return the user-supplied value (or None), and whether it was found.
+
+ This allows us to distinguish between, on the one hand, either a
+ Function that returns None or an explicit null value passed and, on the
+ other hand, either no value passed or a Macro that returns Ellipsis,
+ meaning that the result should be treated the same as if no value were
+ passed.
+ """
+ if key not in self.data:
+ return None, False
+
if (self.translation.is_deleted(prop.path) or
self.translation.is_replaced(prop.path)):
- return
- if key in self.data:
- try:
- unresolved_value = self.data[key]
- if validate:
- if self._find_deps_any_in_init(unresolved_value):
- validate = False
-
- value = self.resolve(unresolved_value)
-
- if self.translation.has_translation(prop.path):
- value = self.translation.translate(prop.path,
- value,
- self.data)
-
- return prop.get_value(value, validate,
- translation=self.translation)
- # Children can raise StackValidationFailed with unique path which
- # is necessary for further use in StackValidationFailed exception.
- # So we need to handle this exception in this method.
- except exception.StackValidationFailed as e:
- raise exception.StackValidationFailed(path=e.path,
- message=e.error_message)
- # the resolver function could raise any number of exceptions,
- # so handle this generically
- except Exception as e:
- raise ValueError(six.text_type(e))
+ return None, False
+
+ try:
+ unresolved_value = self.data[key]
+ if validate:
+ if self._find_deps_any_in_init(unresolved_value):
+ validate = False
+
+ value = self.resolve(unresolved_value, nullable=True)
+ if value is Ellipsis:
+ # Treat as if the property value were not specified at all
+ return None, False
+
+ if self.translation.has_translation(prop.path):
+ value = self.translation.translate(prop.path,
+ value,
+ self.data)
+
+ return prop.get_value(value, validate,
+ translation=self.translation), True
+ # Children can raise StackValidationFailed with unique path which
+ # is necessary for further use in StackValidationFailed exception.
+ # So we need to handle this exception in this method.
+ except exception.StackValidationFailed as e:
+ raise exception.StackValidationFailed(path=e.path,
+ message=e.error_message)
+ # the resolver function could raise any number of exceptions,
+ # so handle this generically
+ except Exception as e:
+ raise ValueError(str(e))
def _get_property_value(self, key, validate=False):
if key not in self:
raise KeyError(_('Invalid Property %s') % key)
prop = self.props[key]
- if not self.translation.is_deleted(prop.path) and key in self.data:
- return self.get_user_value(key, validate)
- elif self.translation.has_translation(prop.path):
+ value, found = self._resolve_user_value(key, prop, validate)
+ if found:
+ return value
+ if self.translation.has_translation(prop.path):
value = self.translation.translate(prop.path, prop_data=self.data,
validate=validate)
if value is not None or prop.has_default():
return prop.get_value(value)
- elif prop.required():
- raise ValueError(_('Property %s not assigned') % key)
- elif prop.has_default():
+
+ if prop.has_default():
return prop.get_value(None, validate,
translation=self.translation)
elif prop.required():
raise ValueError(_('Property %s not assigned') % key)
+ elif key == 'description' and prop.schema.update_allowed:
+ return self.rsrc_description
+ else:
+ return None
def __getitem__(self, key):
return self._get_property_value(key)
@@ -652,7 +679,7 @@ class Properties(collections.Mapping):
return {}, {}
param_prop_defs = [param_prop_def_items(n, s, template_type)
- for n, s in six.iteritems(schemata(schema))
+ for n, s in schemata(schema).items()
if s.implemented]
param_items, prop_items = zip(*param_prop_defs)
return dict(param_items), dict(prop_items)
diff --git a/heat/engine/properties_group.py b/heat/engine/properties_group.py
index 928b49959..bc73aef45 100644
--- a/heat/engine/properties_group.py
+++ b/heat/engine/properties_group.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
@@ -78,7 +76,7 @@ class PropertiesGroup(object):
self.validate_schema(item)
elif isinstance(item, list):
for name in item:
- if not isinstance(name, six.string_types):
+ if not isinstance(name, str):
raise exception.InvalidSchemaError(message=next_msg)
else:
raise exception.InvalidSchemaError(message=next_msg)
diff --git a/heat/engine/resource.py b/heat/engine/resource.py
index c383f0abf..de9a708a8 100644
--- a/heat/engine/resource.py
+++ b/heat/engine/resource.py
@@ -23,7 +23,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import reflection
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -80,7 +79,7 @@ class NoActionRequired(Exception):
msg = (_("The resource %(res)s could not perform "
"scaling action: %(reason)s") %
{'res': res_name, 'reason': reason})
- super(Exception, self).__init__(six.text_type(msg))
+ super(Exception, self).__init__(str(msg))
class PollDelay(Exception):
@@ -97,7 +96,6 @@ class PollDelay(Exception):
self.period = period
-@six.python_2_unicode_compatible
class Resource(status.ResourceStatus):
BASE_ATTRIBUTES = (SHOW, ) = (attributes.SHOW_ATTR, )
@@ -159,6 +157,9 @@ class Resource(status.ResourceStatus):
# a signal to this resource
signal_needs_metadata_updates = True
+ # Whether the resource is always replaced when CHECK_FAILED
+ always_replace_on_check_failed = True
+
def __new__(cls, name, definition, stack):
"""Create a new Resource of the appropriate class for its type."""
@@ -187,7 +188,7 @@ class Resource(status.ResourceStatus):
ex = exception.ResourceTypeUnavailable(
resource_type=resource_type,
service_name=cls.default_client_name,
- reason=six.text_type(exc))
+ reason=str(exc))
raise ex
else:
if not svc_available:
@@ -195,7 +196,7 @@ class Resource(status.ResourceStatus):
resource_type=resource_type,
service_name=cls.default_client_name,
reason=reason)
- LOG.info(six.text_type(ex))
+ LOG.info(str(ex))
raise ex
def __init__(self, name, definition, stack):
@@ -450,7 +451,7 @@ class Resource(status.ResourceStatus):
def calc_update_allowed(self, props):
update_allowed_set = set(self.update_allowed_properties)
- for (psk, psv) in six.iteritems(props.props):
+ for (psk, psv) in props.props.items():
if psv.update_allowed():
update_allowed_set.add(psk)
return update_allowed_set
@@ -514,7 +515,7 @@ class Resource(status.ResourceStatus):
"not setting metadata",
{'name': self.name, 'id': self.id, 'st': db_res.status})
raise exception.ResourceNotAvailable(resource_name=self.name)
- LOG.debug('Setting metadata for %s', six.text_type(self))
+ LOG.debug('Setting metadata for %s', str(self))
if refresh:
metadata = merge_metadata(metadata, db_res.rsrc_metadata)
if db_res.update_metadata(metadata):
@@ -654,7 +655,7 @@ class Resource(status.ResourceStatus):
"""
update_allowed_set = self.calc_update_allowed(after_props)
immutable_set = set()
- for (psk, psv) in six.iteritems(after_props.props):
+ for (psk, psv) in after_props.props.items():
if psv.immutable():
immutable_set.add(psk)
@@ -669,7 +670,7 @@ class Resource(status.ResourceStatus):
# already been validated.
LOG.warning('Ignoring error in old property value '
'%(prop_name)s: %(msg)s',
- {'prop_name': key, 'msg': six.text_type(exc)})
+ {'prop_name': key, 'msg': str(exc)})
return True
return before != after_props.get(key)
@@ -704,13 +705,13 @@ class Resource(status.ResourceStatus):
if self.resource_id is not None:
text = '%s "%s" [%s] %s' % (class_name, self.name,
self.resource_id,
- six.text_type(self.stack))
+ str(self.stack))
else:
text = '%s "%s" %s' % (class_name, self.name,
- six.text_type(self.stack))
+ str(self.stack))
else:
text = '%s "%s"' % (class_name, self.name)
- return six.text_type(text)
+ return str(text)
def add_explicit_dependencies(self, deps):
"""Add all dependencies explicitly specified in the template.
@@ -915,27 +916,27 @@ class Resource(status.ResourceStatus):
try:
set_in_progress()
yield
- except exception.UpdateInProgress as ex:
+ except exception.UpdateInProgress:
with excutils.save_and_reraise_exception():
LOG.info('Update in progress for %s', self.name)
except expected_exceptions as ex:
with excutils.save_and_reraise_exception():
- self.state_set(action, self.COMPLETE, six.text_type(ex),
+ self.state_set(action, self.COMPLETE, str(ex),
lock=lock_release)
- LOG.debug('%s', six.text_type(ex))
+ LOG.debug('%s', str(ex))
except Exception as ex:
LOG.info('%(action)s: %(info)s',
{"action": action,
- "info": six.text_type(self)},
+ "info": str(self)},
exc_info=True)
failure = exception.ResourceFailure(ex, self, action)
- self.state_set(action, self.FAILED, six.text_type(failure),
+ self.state_set(action, self.FAILED, str(failure),
lock=lock_release)
raise failure
except BaseException as exc:
with excutils.save_and_reraise_exception():
try:
- reason = six.text_type(exc)
+ reason = str(exc)
msg = '%s aborted' % action
if reason:
msg += ' (%s)' % reason
@@ -1004,7 +1005,6 @@ class Resource(status.ResourceStatus):
action
)
- @scheduler.wrappertask
def _do_action(self, action, pre_func=None, resource_data=None):
"""Perform a transition to a new state via a specified action.
@@ -1027,7 +1027,7 @@ class Resource(status.ResourceStatus):
pre_func()
handler_args = [resource_data] if resource_data is not None else []
- yield self.action_handler_task(action, args=handler_args)
+ yield from self.action_handler_task(action, args=handler_args)
def _update_stored_properties(self):
old_props = self._stored_properties_data
@@ -1066,7 +1066,7 @@ class Resource(status.ResourceStatus):
refd_attrs |= get_dep_attrs(stk_defn.resource_definition(r_name)
for r_name in enabled_resources)
- subset_outputs = isinstance(in_outputs, collections.Iterable)
+ subset_outputs = isinstance(in_outputs, collections.abc.Iterable)
if subset_outputs or in_outputs:
if not subset_outputs:
in_outputs = stk_defn.enabled_output_names()
@@ -1104,7 +1104,7 @@ class Resource(status.ResourceStatus):
"""
def get_attrs(attrs, cacheable_only=False):
for attr in attrs:
- path = (attr,) if isinstance(attr, six.string_types) else attr
+ path = (attr,) if isinstance(attr, str) else attr
if (cacheable_only and
(self.attributes.get_cache_mode(path[0]) ==
attributes.Schema.CACHE_NONE)):
@@ -1188,7 +1188,6 @@ class Resource(status.ResourceStatus):
message="%s" % error_message)
raise
- @scheduler.wrappertask
def create(self):
"""Create the resource.
@@ -1198,22 +1197,22 @@ class Resource(status.ResourceStatus):
action = self.CREATE
if (self.action, self.status) != (self.INIT, self.COMPLETE):
exc = exception.Error(_('State %s invalid for create')
- % six.text_type(self.state))
+ % str(self.state))
raise exception.ResourceFailure(exc, self, action)
if self.external_id is not None:
- yield self._do_action(self.ADOPT,
- resource_data={
- 'resource_id': self.external_id})
- self.check()
+ yield from self._do_action(self.ADOPT,
+ resource_data={
+ 'resource_id': self.external_id})
+ yield from self.check()
return
# This method can be called when we replace a resource, too. In that
# case, a hook has already been dealt with in `Resource.update` so we
# shouldn't do it here again:
if self.stack.action == self.stack.CREATE:
- yield self._break_if_required(
- self.CREATE, environment.HOOK_PRE_CREATE)
+ yield from self._break_if_required(self.CREATE,
+ environment.HOOK_PRE_CREATE)
LOG.info('creating %s', self)
@@ -1238,13 +1237,13 @@ class Resource(status.ResourceStatus):
delay = timeutils.retry_backoff_delay(count[action],
jitter_max=2.0)
waiter = scheduler.TaskRunner(self.pause)
- yield waiter.as_task(timeout=delay)
+ yield from waiter.as_task(timeout=delay)
elif action == self.CREATE:
# Only validate properties in first create call.
pre_func = self.properties.validate
try:
- yield self._do_action(action, pre_func)
+ yield from self._do_action(action, pre_func)
if action == self.CREATE:
first_failure = None
break
@@ -1281,8 +1280,8 @@ class Resource(status.ResourceStatus):
raise first_failure
if self.stack.action == self.stack.CREATE:
- yield self._break_if_required(
- self.CREATE, environment.HOOK_POST_CREATE)
+ yield from self._break_if_required(self.CREATE,
+ environment.HOOK_POST_CREATE)
@staticmethod
def pause():
@@ -1311,7 +1310,7 @@ class Resource(status.ResourceStatus):
adopt.
"""
self._update_stored_properties()
- return self._do_action(self.ADOPT, resource_data=resource_data)
+ yield from self._do_action(self.ADOPT, resource_data=resource_data)
def handle_adopt(self, resource_data=None):
resource_id, data, metadata = self._get_resource_info(resource_data)
@@ -1326,7 +1325,7 @@ class Resource(status.ResourceStatus):
# save the resource data
if data and isinstance(data, dict):
- for key, value in six.iteritems(data):
+ for key, value in data.items():
self.data_set(key, value)
# save the resource metadata
@@ -1387,7 +1386,9 @@ class Resource(status.ResourceStatus):
prev_resource, check_init_complete=True):
if self.status == self.FAILED:
# always replace when a resource is in CHECK_FAILED
- if self.action == self.CHECK or self.needs_replace_failed():
+ if ((self.action == self.CHECK and
+ self.always_replace_on_check_failed) or (
+ self.needs_replace_failed())):
raise UpdateReplace(self)
if self.state == (self.DELETE, self.COMPLETE):
@@ -1412,7 +1413,7 @@ class Resource(status.ResourceStatus):
if 'replace' in restricted_actions:
ex = exception.ResourceActionRestricted(action='replace')
failure = exception.ResourceFailure(ex, self, self.UPDATE)
- self._add_event(self.UPDATE, self.FAILED, six.text_type(ex))
+ self._add_event(self.UPDATE, self.FAILED, str(ex))
raise failure
else:
raise UpdateReplace(self.name)
@@ -1447,7 +1448,7 @@ class Resource(status.ResourceStatus):
except Exception as e:
failure = exception.ResourceFailure(e, self, self.action)
self.state_set(self.UPDATE, self.FAILED,
- six.text_type(failure))
+ str(failure))
raise failure
self.replaced_by = None
@@ -1456,6 +1457,23 @@ class Resource(status.ResourceStatus):
new_requires=new_requires)
runner(timeout=timeout, progress_callback=progress_callback)
+ def handle_preempt(self):
+ """Pre-empt an in-progress update when a new update is available.
+
+ This method is called when a previous convergence update is in
+ progress but a new update for the resource is available. By default
+ it does nothing, but subclasses may override it to cancel the
+ in-progress update if it is safe to do so.
+
+ Note that this method does not run in the context of the in-progress
+ update and has no access to runtime information about it; nor is it
+ safe to make changes to the Resource in the database. If implemented,
+ this method should cause the existing update to complete by external
+ means. If this leaves the resource in a FAILED state, that should be
+ taken into account in needs_replace_failed().
+ """
+ return
+
def preview_update(self, after, before, after_props, before_props,
prev_resource, check_init_complete=False):
"""Simulates update without actually updating the resource.
@@ -1560,16 +1578,16 @@ class Resource(status.ResourceStatus):
# if any exception happen, we should set the resource to
# FAILED, then raise ResourceFailure
failure = exception.ResourceFailure(e, self, action)
- self.state_set(action, self.FAILED, six.text_type(failure))
+ self.state_set(action, self.FAILED, str(failure))
raise failure
@classmethod
def check_is_substituted(cls, new_res_type):
- support_status = getattr(cls, 'support_status', None)
- if support_status:
- is_substituted = support_status.is_substituted(new_res_type)
- return is_substituted
- return False
+ support_status = getattr(cls, 'support_status', None)
+ if support_status:
+ is_substituted = support_status.is_substituted(new_res_type)
+ return is_substituted
+ return False
def _persist_update_no_change(self, new_template_id):
"""Persist an update where the resource is unchanged."""
@@ -1586,7 +1604,6 @@ class Resource(status.ResourceStatus):
elif new_template_id is not None:
self.store(lock=lock)
- @scheduler.wrappertask
def update(self, after, before=None, prev_resource=None,
new_template_id=None, new_requires=None):
"""Return a task to update the resource.
@@ -1614,8 +1631,8 @@ class Resource(status.ResourceStatus):
after_props, before_props = self._prepare_update_props(after, before)
- yield self._break_if_required(
- self.UPDATE, environment.HOOK_PRE_UPDATE)
+ yield from self._break_if_required(self.UPDATE,
+ environment.HOOK_PRE_UPDATE)
try:
registry = self.stack.env.registry
@@ -1637,7 +1654,7 @@ class Resource(status.ResourceStatus):
self._prepare_update_replace(action)
except exception.ResourceActionRestricted as ae:
failure = exception.ResourceFailure(ae, self, action)
- self._add_event(action, self.FAILED, six.text_type(ae))
+ self._add_event(action, self.FAILED, str(ae))
raise failure
if not needs_update:
@@ -1675,9 +1692,9 @@ class Resource(status.ResourceStatus):
if new_template_id is not None:
self.current_template_id = new_template_id
- yield self.action_handler_task(action,
- args=[after, tmpl_diff,
- prop_diff])
+ yield from self.action_handler_task(action,
+ args=[after, tmpl_diff,
+ prop_diff])
except UpdateReplace:
with excutils.save_and_reraise_exception():
self.current_template_id = self.old_template_id
@@ -1690,8 +1707,8 @@ class Resource(status.ResourceStatus):
if new_requires is not None:
self.requires = new_requires
- yield self._break_if_required(
- self.UPDATE, environment.HOOK_POST_UPDATE)
+ yield from self._break_if_required(self.UPDATE,
+ environment.HOOK_POST_UPDATE)
def prepare_for_replace(self):
"""Prepare resource for replacing.
@@ -1732,7 +1749,7 @@ class Resource(status.ResourceStatus):
raise failure
with self.frozen_properties():
- return self._do_action(action)
+ yield from self._do_action(action)
else:
if self.state == (self.INIT, self.COMPLETE):
# No need to store status; better to leave the resource in
@@ -1771,12 +1788,12 @@ class Resource(status.ResourceStatus):
(self.action != self.SUSPEND and
self.status != self.COMPLETE)):
exc = exception.Error(_('State %s invalid for suspend')
- % six.text_type(self.state))
+ % str(self.state))
raise exception.ResourceFailure(exc, self, action)
LOG.info('suspending %s', self)
with self.frozen_properties():
- return self._do_action(action)
+ yield from self._do_action(action)
def resume(self):
"""Return a task to resume the resource.
@@ -1792,22 +1809,21 @@ class Resource(status.ResourceStatus):
(self.RESUME, self.FAILED),
(self.RESUME, self.COMPLETE)):
exc = exception.Error(_('State %s invalid for resume')
- % six.text_type(self.state))
+ % str(self.state))
raise exception.ResourceFailure(exc, self, action)
LOG.info('resuming %s', self)
with self.frozen_properties():
- return self._do_action(action)
+ yield from self._do_action(action)
def snapshot(self):
"""Snapshot the resource and return the created data, if any."""
LOG.info('snapshotting %s', self)
with self.frozen_properties():
- return self._do_action(self.SNAPSHOT)
+ yield from self._do_action(self.SNAPSHOT)
- @scheduler.wrappertask
def delete_snapshot(self, data):
- yield self.action_handler_task('delete_snapshot', args=[data])
+ yield from self.action_handler_task('delete_snapshot', args=[data])
def physical_resource_name(self):
if self.id is None or self.action == self.INIT:
@@ -1957,7 +1973,6 @@ class Resource(status.ResourceStatus):
return self.resource_id
return None
- @scheduler.wrappertask
def delete(self):
"""A task to delete the resource.
@@ -1985,8 +2000,8 @@ class Resource(status.ResourceStatus):
# case, a hook has already been dealt with in `Resource.update` so we
# shouldn't do it here again:
if self.stack.action == self.stack.DELETE:
- yield self._break_if_required(
- self.DELETE, environment.HOOK_PRE_DELETE)
+ yield from self._break_if_required(self.DELETE,
+ environment.HOOK_PRE_DELETE)
LOG.info('deleting %s', self)
@@ -2016,25 +2031,24 @@ class Resource(status.ResourceStatus):
while True:
count += 1
LOG.info('delete %(name)s attempt %(attempt)d' %
- {'name': six.text_type(self), 'attempt': count+1})
+ {'name': str(self), 'attempt': count + 1})
if count:
delay = timeutils.retry_backoff_delay(count,
jitter_max=2.0)
waiter = scheduler.TaskRunner(self.pause)
- yield waiter.as_task(timeout=delay)
+ yield from waiter.as_task(timeout=delay)
with excutils.exception_filter(should_retry):
- yield self.action_handler_task(action,
- *action_args)
+ yield from self.action_handler_task(action,
+ *action_args)
break
if self.stack.action == self.stack.DELETE:
- yield self._break_if_required(
- self.DELETE, environment.HOOK_POST_DELETE)
+ yield from self._break_if_required(self.DELETE,
+ environment.HOOK_POST_DELETE)
- @scheduler.wrappertask
def destroy(self):
"""A task to delete the resource and remove it from the database."""
- yield self.delete()
+ yield from self.delete()
if self.id is None:
return
@@ -2042,7 +2056,7 @@ class Resource(status.ResourceStatus):
try:
resource_objects.Resource.delete(self.context, self.id)
except exception.NotFound:
- # Don't fail on delete if the db entry has
+ # Don't fail on delete if the DB entry has
# not been created yet.
pass
@@ -2057,7 +2071,7 @@ class Resource(status.ResourceStatus):
self.id,
{'physical_resource_id': self.resource_id})
except Exception as ex:
- LOG.warning('db error %s', ex)
+ LOG.warning('DB error %s', ex)
def store(self, set_metadata=False, lock=LOCK_NONE):
"""Create the resource in the database.
@@ -2069,7 +2083,7 @@ class Resource(status.ResourceStatus):
rs = {'action': self.action,
'status': self.status,
- 'status_reason': six.text_type(self.status_reason),
+ 'status_reason': str(self.status_reason),
'stack_id': self.stack.id,
'physical_resource_id': self.resource_id,
'name': self.name,
@@ -2097,7 +2111,7 @@ class Resource(status.ResourceStatus):
self.context, self.id, rs)
if lock != self.LOCK_NONE:
LOG.error('No calling_engine_id in store() %s',
- six.text_type(rs))
+ str(rs))
else:
self._store_with_lock(rs, lock)
else:
@@ -2123,7 +2137,7 @@ class Resource(status.ResourceStatus):
self._incr_atomic_key(self._atomic_key)
else:
LOG.info('Resource %s is locked or does not exist',
- six.text_type(self))
+ str(self))
LOG.debug('Resource id:%(resource_id)s locked or does not exist. '
'Expected atomic_key:%(atomic_key)s, '
'accessing from engine_id:%(engine_id)s',
@@ -2348,9 +2362,9 @@ class Resource(status.ResourceStatus):
logic specific to the resource implementation.
"""
if self.resource_id is not None:
- return six.text_type(self.resource_id)
+ return str(self.resource_id)
else:
- return six.text_type(self.name)
+ return str(self.name)
def FnGetRefId(self):
"""For the intrinsic function Ref.
@@ -2362,7 +2376,7 @@ class Resource(status.ResourceStatus):
def physical_resource_name_or_FnGetRefId(self):
res_name = self.physical_resource_name()
if res_name is not None:
- return six.text_type(res_name)
+ return str(res_name)
else:
return Resource.get_reference_id(self)
@@ -2416,13 +2430,13 @@ class Resource(status.ResourceStatus):
hook = details['unset_hook']
if not environment.valid_hook_type(hook):
msg = (_('Invalid hook type "%(hook)s" for %(resource)s') %
- {'hook': hook, 'resource': six.text_type(self)})
+ {'hook': hook, 'resource': str(self)})
raise exception.InvalidBreakPointHook(message=msg)
if not self.has_hook(hook):
msg = (_('The "%(hook)s" hook is not defined '
'on %(resource)s') %
- {'hook': hook, 'resource': six.text_type(self)})
+ {'hook': hook, 'resource': str(self)})
raise exception.InvalidBreakPointHook(message=msg)
def _unset_hook(self, details):
@@ -2431,7 +2445,7 @@ class Resource(status.ResourceStatus):
hook = details['unset_hook']
self.clear_hook(hook)
LOG.info('Clearing %(hook)s hook on %(resource)s',
- {'hook': hook, 'resource': six.text_type(self)})
+ {'hook': hook, 'resource': str(self)})
self._add_event(self.action, self.status,
"Hook %s is cleared" % hook)
@@ -2442,7 +2456,7 @@ class Resource(status.ResourceStatus):
def get_string_details():
if details is None:
return 'No signal details provided'
- if isinstance(details, six.string_types):
+ if isinstance(details, str):
return details
if isinstance(details, dict):
if all(k in details for k in ('previous', 'current',
@@ -2468,8 +2482,8 @@ class Resource(status.ResourceStatus):
# No spam required
return
LOG.info('signal %(name)s : %(msg)s',
- {'name': six.text_type(self),
- 'msg': six.text_type(ex)},
+ {'name': str(self),
+ 'msg': str(ex)},
exc_info=True)
failure = exception.ResourceFailure(ex, self)
raise failure
diff --git a/heat/engine/resources/alarm_base.py b/heat/engine/resources/alarm_base.py
index ab32f4f5f..81700f5a0 100644
--- a/heat/engine/resources/alarm_base.py
+++ b/heat/engine/resources/alarm_base.py
@@ -17,7 +17,7 @@ from heat.engine import properties
from heat.engine import resource
from heat.engine import support
-from six.moves.urllib import parse as urlparse
+from urllib import parse
COMMON_PROPERTIES = (
@@ -231,7 +231,7 @@ class BaseAlarm(resource.Resource):
for queue in kwargs.pop(queue_type, []):
query = {'queue_name': queue}
- yield 'trust+zaqar://?%s' % urlparse.urlencode(query)
+ yield 'trust+zaqar://?%s' % parse.urlencode(query)
action_props = {arg_types[0]: list(get_urls(*arg_types))
for arg_types in ((ALARM_ACTIONS, ALARM_QUEUES),
diff --git a/heat/engine/resources/aws/autoscaling/autoscaling_group.py b/heat/engine/resources/aws/autoscaling/autoscaling_group.py
index 481cca7cd..2bcc4b62b 100644
--- a/heat/engine/resources/aws/autoscaling/autoscaling_group.py
+++ b/heat/engine/resources/aws/autoscaling/autoscaling_group.py
@@ -13,7 +13,6 @@
from oslo_log import log as logging
from oslo_utils import excutils
-import six
from heat.common import exception
from heat.common import grouputils
@@ -186,6 +185,10 @@ class AutoScalingGroup(cooldown.CooldownMixin, instgrp.InstanceGroup):
schema=rolling_update_schema)
}
+ def get_size(self):
+ """Get desired capacity."""
+ return self.properties[self.DESIRED_CAPACITY]
+
def handle_create(self):
return self.create_with_template(self.child_template())
@@ -327,7 +330,7 @@ class AutoScalingGroup(cooldown.CooldownMixin, instgrp.InstanceGroup):
with excutils.save_and_reraise_exception():
try:
notif.update({'suffix': 'error',
- 'message': six.text_type(resize_ex),
+ 'message': str(resize_ex),
'capacity': grouputils.get_size(self),
})
notification.send(**notif)
diff --git a/heat/engine/resources/aws/autoscaling/launch_config.py b/heat/engine/resources/aws/autoscaling/launch_config.py
index c28d9f859..8dd6a2b2d 100644
--- a/heat/engine/resources/aws/autoscaling/launch_config.py
+++ b/heat/engine/resources/aws/autoscaling/launch_config.py
@@ -11,9 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
@@ -200,7 +197,7 @@ class LaunchConfiguration(resource.Resource):
for sg in server.security_groups]
}
lc_props = function.resolve(self.properties.data)
- for key, value in six.iteritems(instance_props):
+ for key, value in instance_props.items():
# the properties which are specified in launch configuration,
# will override the attributes from the instance
lc_props.setdefault(key, value)
diff --git a/heat/engine/resources/aws/autoscaling/scaling_policy.py b/heat/engine/resources/aws/autoscaling/scaling_policy.py
index b00d4b15b..c4310fe26 100644
--- a/heat/engine/resources/aws/autoscaling/scaling_policy.py
+++ b/heat/engine/resources/aws/autoscaling/scaling_policy.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
@@ -85,7 +83,8 @@ class AWSScalingPolicy(heat_sp.AutoScalingPolicy):
attributes_schema = {
ALARM_URL: attributes.Schema(
_("A signed url to handle the alarm. (Heat extension)."),
- type=attributes.Schema.STRING
+ type=attributes.Schema.STRING,
+ cache_mode=attributes.Schema.CACHE_NONE
),
}
@@ -101,9 +100,9 @@ class AWSScalingPolicy(heat_sp.AutoScalingPolicy):
def get_reference_id(self):
if self.resource_id is not None:
- return six.text_type(self._get_ec2_signed_url())
+ return str(self._get_ec2_signed_url(never_expire=True))
else:
- return six.text_type(self.name)
+ return str(self.name)
def resource_mapping():
diff --git a/heat/engine/resources/aws/cfn/stack.py b/heat/engine/resources/aws/cfn/stack.py
index dd8aa3511..1dd77b3a3 100644
--- a/heat/engine/resources/aws/cfn/stack.py
+++ b/heat/engine/resources/aws/cfn/stack.py
@@ -12,7 +12,6 @@
# under the License.
from requests import exceptions
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -95,7 +94,7 @@ class NestedStack(stack_resource.StackResource):
def get_reference_id(self):
identifier = self.nested_identifier()
if identifier is None:
- return six.text_type(self.name)
+ return str(self.name)
return identifier.arn()
diff --git a/heat/engine/resources/aws/cfn/wait_condition_handle.py b/heat/engine/resources/aws/cfn/wait_condition_handle.py
index 92ff0addb..6d665dad2 100644
--- a/heat/engine/resources/aws/cfn/wait_condition_handle.py
+++ b/heat/engine/resources/aws/cfn/wait_condition_handle.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.engine.resources import signal_responder
from heat.engine.resources import wait_condition as wc_base
from heat.engine import support
@@ -39,9 +37,9 @@ class WaitConditionHandle(wc_base.BaseWaitConditionHandle):
def get_reference_id(self):
if self.resource_id:
wc = signal_responder.WAITCONDITION
- return six.text_type(self._get_ec2_signed_url(signal_type=wc))
+ return str(self._get_ec2_signed_url(signal_type=wc))
else:
- return six.text_type(self.name)
+ return str(self.name)
def metadata_update(self, new_metadata=None):
"""DEPRECATED. Should use handle_signal instead."""
diff --git a/heat/engine/resources/aws/ec2/eip.py b/heat/engine/resources/aws/ec2/eip.py
index f343c6e61..a6b0340af 100644
--- a/heat/engine/resources/aws/ec2/eip.py
+++ b/heat/engine/resources/aws/ec2/eip.py
@@ -12,7 +12,6 @@
# under the License.
from oslo_log import log as logging
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -138,13 +137,13 @@ class ElasticIp(resource.Resource):
def get_reference_id(self):
eip = self._ipaddress()
if eip:
- return six.text_type(eip)
+ return str(eip)
else:
- return six.text_type(self.name)
+ return str(self.name)
def _resolve_attribute(self, name):
if name == self.ALLOCATION_ID:
- return six.text_type(self.resource_id)
+ return str(self.resource_id)
class ElasticIpAssociation(resource.Resource):
diff --git a/heat/engine/resources/aws/ec2/instance.py b/heat/engine/resources/aws/ec2/instance.py
index e35a5b3ad..fc536c29f 100644
--- a/heat/engine/resources/aws/ec2/instance.py
+++ b/heat/engine/resources/aws/ec2/instance.py
@@ -15,9 +15,6 @@ import copy
from oslo_config import cfg
from oslo_log import log as logging
-import six
-
-cfg.CONF.import_opt('max_server_name_length', 'heat.common.config')
from heat.common import exception
from heat.common.i18n import _
@@ -28,6 +25,9 @@ from heat.engine import properties
from heat.engine import resource
from heat.engine.resources import scheduler_hints as sh
+
+cfg.CONF.import_opt('max_server_name_length', 'heat.common.config')
+
LOG = logging.getLogger(__name__)
@@ -396,7 +396,7 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
LOG.info('%(name)s._resolve_attribute(%(attname)s) == %(res)s',
{'name': self.name, 'attname': name, 'res': res})
- return six.text_type(res) if res else None
+ return str(res) if res else None
def _port_data_delete(self):
# delete the port data which implicit-created
@@ -415,7 +415,7 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
unsorted_nics = []
for entry in network_interfaces:
nic = (entry
- if not isinstance(entry, six.string_types)
+ if not isinstance(entry, str)
else {'NetworkInterfaceId': entry,
'DeviceIndex': len(unsorted_nics)})
unsorted_nics.append(nic)
@@ -520,7 +520,7 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
hint = tm[self.NOVA_SCHEDULER_HINT_KEY]
hint_value = tm[self.NOVA_SCHEDULER_HINT_VALUE]
if hint in scheduler_hints:
- if isinstance(scheduler_hints[hint], six.string_types):
+ if isinstance(scheduler_hints[hint], str):
scheduler_hints[hint] = [scheduler_hints[hint]]
scheduler_hints[hint].append(hint_value)
else:
@@ -558,6 +558,7 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
if server is not None:
self.resource_id_set(server.id)
+ assert server is not None
creator = progress.ServerCreateProgress(server.id)
attachers = []
for vol_id, device in self.volumes():
@@ -864,7 +865,7 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
status = cp.get_status(server)
LOG.debug('%(name)s check_suspend_complete status = %(status)s',
{'name': self.name, 'status': status})
- if status in list(cp.deferred_server_statuses + ['ACTIVE']):
+ if status in (cp.deferred_server_statuses | {'ACTIVE'}):
return status == 'SUSPENDED'
else:
exc = exception.ResourceUnknownStatus(
diff --git a/heat/engine/resources/aws/ec2/internet_gateway.py b/heat/engine/resources/aws/ec2/internet_gateway.py
index 93926ab8b..2f58ac321 100644
--- a/heat/engine/resources/aws/ec2/internet_gateway.py
+++ b/heat/engine/resources/aws/ec2/internet_gateway.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import properties
@@ -102,7 +100,7 @@ class VPCGatewayAttachment(resource.Resource):
default_client_name = 'neutron'
def _vpc_route_tables(self, ignore_errors=False):
- for res in six.itervalues(self.stack):
+ for res in self.stack.values():
if res.has_interface('AWS::EC2::RouteTable'):
try:
vpc_id = self.properties[self.VPC_ID]
diff --git a/heat/engine/resources/aws/ec2/security_group.py b/heat/engine/resources/aws/ec2/security_group.py
index 38b298e8f..a1db04626 100644
--- a/heat/engine/resources/aws/ec2/security_group.py
+++ b/heat/engine/resources/aws/ec2/security_group.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import properties
@@ -169,12 +167,12 @@ class NeutronSecurityGroup(object):
rule['direction'] = 'egress'
for rule in updated[self.sg.SECURITY_GROUP_INGRESS]:
rule['direction'] = 'ingress'
- updated_rules = list(six.itervalues(updated))
+ updated_rules = list(updated.values())
updated_all = updated_rules[0] + updated_rules[1]
ids_to_delete = [id for id, rule in existing.items()
if rule not in updated_all]
rules_to_create = [rule for rule in updated_all
- if rule not in six.itervalues(existing)]
+ if rule not in existing.values()]
return ids_to_delete, rules_to_create
diff --git a/heat/engine/resources/aws/iam/user.py b/heat/engine/resources/aws/iam/user.py
index 702781329..993cad508 100644
--- a/heat/engine/resources/aws/iam/user.py
+++ b/heat/engine/resources/aws/iam/user.py
@@ -12,7 +12,6 @@
# under the License.
from oslo_log import log as logging
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -80,7 +79,7 @@ class User(stack_user.StackUser):
# If a non-string (e.g embedded IAM dict policy) is passed, we
# ignore the policy (don't reject it because we previously ignored
# and we don't want to break templates which previously worked
- if not isinstance(policy, six.string_types):
+ if not isinstance(policy, str):
LOG.debug("Ignoring policy %s, must be string "
"resource name", policy)
continue
@@ -118,7 +117,7 @@ class User(stack_user.StackUser):
def access_allowed(self, resource_name):
policies = (self.properties[self.POLICIES] or [])
for policy in policies:
- if not isinstance(policy, six.string_types):
+ if not isinstance(policy, str):
LOG.debug("Ignoring policy %s, must be string "
"resource name", policy)
continue
diff --git a/heat/engine/resources/aws/lb/loadbalancer.py b/heat/engine/resources/aws/lb/loadbalancer.py
index 439de0643..fee343cd9 100644
--- a/heat/engine/resources/aws/lb/loadbalancer.py
+++ b/heat/engine/resources/aws/lb/loadbalancer.py
@@ -14,7 +14,6 @@ import os
from oslo_config import cfg
from oslo_log import log as logging
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -623,7 +622,7 @@ backend servers
'Interval must be larger than Timeout'}
def get_reference_id(self):
- return six.text_type(self.name)
+ return str(self.name)
def _resolve_attribute(self, name):
"""We don't really support any of these yet."""
diff --git a/heat/engine/resources/aws/s3/s3.py b/heat/engine/resources/aws/s3/s3.py
index 5cad6cbdb..2801ba3a7 100644
--- a/heat/engine/resources/aws/s3/s3.py
+++ b/heat/engine/resources/aws/s3/s3.py
@@ -10,8 +10,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import six
-from six.moves.urllib import parse as urlparse
+from urllib import parse
from heat.common import exception
from heat.common.i18n import _
@@ -165,11 +164,11 @@ class S3Bucket(resource.Resource):
self.client_plugin().ignore_not_found(ex)
def get_reference_id(self):
- return six.text_type(self.resource_id)
+ return str(self.resource_id)
def _resolve_attribute(self, name):
url = self.client().get_auth()[0]
- parsed = list(urlparse.urlparse(url))
+ parsed = list(parse.urlparse(url))
if name == self.DOMAIN_NAME:
return parsed[1].split(':')[0]
elif name == self.WEBSITE_URL:
diff --git a/heat/engine/resources/openstack/aodh/alarm.py b/heat/engine/resources/openstack/aodh/alarm.py
index d9c6eada8..f646867ef 100644
--- a/heat/engine/resources/openstack/aodh/alarm.py
+++ b/heat/engine/resources/openstack/aodh/alarm.py
@@ -11,14 +11,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources import alarm_base
from heat.engine.resources.openstack.heat import none_resource
from heat.engine import support
+from heat.engine import translation
class AodhAlarm(alarm_base.BaseAlarm):
@@ -156,7 +155,7 @@ class AodhAlarm(alarm_base.BaseAlarm):
# make sure the matching_metadata appears in the query like this:
# {field: metadata.$prefix.x, ...}
- for m_k, m_v in six.iteritems(mmd):
+ for m_k, m_v in mmd.items():
key = 'metadata.%s' % prefix
if m_k.startswith('metadata.'):
m_k = m_k[len('metadata.'):]
@@ -167,7 +166,7 @@ class AodhAlarm(alarm_base.BaseAlarm):
# NOTE(prazumovsky): type of query value must be a string, but
# matching_metadata value type can not be a string, so we
# must convert value to a string type.
- query.append(dict(field=key, op='eq', value=six.text_type(m_v)))
+ query.append(dict(field=key, op='eq', value=str(m_v)))
if self.MATCHING_METADATA in kwargs:
del kwargs[self.MATCHING_METADATA]
if self.QUERY in kwargs:
@@ -328,9 +327,108 @@ class EventAlarm(alarm_base.BaseAlarm):
self.get_alarm_props(new_props))
+class LBMemberHealthAlarm(alarm_base.BaseAlarm):
+ """A resource that implements a Loadbalancer Member Health Alarm.
+
+ Allows setting alarms based on the health of load balancer pool members,
+ where the health of a member is determined by the member reporting an
+ operating_status of ERROR beyond an initial grace period after creation
+ (120 seconds by default).
+ """
+
+ alarm_type = "loadbalancer_member_health"
+
+ support_status = support.SupportStatus(version='13.0.0')
+
+ PROPERTIES = (
+ POOL, STACK, AUTOSCALING_GROUP_ID
+ ) = (
+ "pool", "stack", "autoscaling_group_id"
+ )
+
+ RULE_PROPERTIES = (
+ POOL_ID, STACK_ID
+ ) = (
+ "pool_id", "stack_id"
+ )
+
+ properties_schema = {
+ POOL: properties.Schema(
+ properties.Schema.STRING,
+ _("Name or ID of the loadbalancer pool for which the health of "
+ "each member will be evaluated."),
+ update_allowed=True,
+ required=True,
+ ),
+ STACK: properties.Schema(
+ properties.Schema.STRING,
+ _("Name or ID of the root / top level Heat stack containing the "
+ "loadbalancer pool and members. An update will be triggered "
+ "on the root Stack if an unhealthy member is detected in the "
+ "loadbalancer pool."),
+ update_allowed=False,
+ required=True,
+ ),
+ AUTOSCALING_GROUP_ID: properties.Schema(
+ properties.Schema.STRING,
+ _("ID of the Heat autoscaling group that contains the "
+ "loadbalancer members. Unhealthy members will be marked "
+ "as such before an update is triggered on the root stack."),
+ update_allowed=True,
+ required=True,
+ ),
+ }
+
+ properties_schema.update(alarm_base.common_properties_schema)
+
+ def get_alarm_props(self, props):
+ """Apply all relevant compatibility xforms."""
+ kwargs = self.actions_to_urls(props)
+ kwargs['type'] = self.alarm_type
+
+ for prop in (self.POOL, self.STACK, self.AUTOSCALING_GROUP_ID):
+ if prop in kwargs:
+ del kwargs[prop]
+
+ rule = {
+ self.POOL_ID: props[self.POOL],
+ self.STACK_ID: props[self.STACK],
+ self.AUTOSCALING_GROUP_ID: props[self.AUTOSCALING_GROUP_ID]
+ }
+
+ kwargs["loadbalancer_member_health_rule"] = rule
+ return kwargs
+
+ def translation_rules(self, properties):
+ translation_rules = [
+ translation.TranslationRule(
+ properties,
+ translation.TranslationRule.RESOLVE,
+ [self.POOL],
+ client_plugin=self.client_plugin('octavia'),
+ finder='get_pool'
+ ),
+ ]
+ return translation_rules
+
+ def handle_create(self):
+ props = self.get_alarm_props(self.properties)
+ props['name'] = self.physical_resource_name()
+ alarm = self.client().alarm.create(props)
+ self.resource_id_set(alarm['alarm_id'])
+
+ def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+ if prop_diff:
+ new_props = json_snippet.properties(self.properties_schema,
+ self.context)
+ self.client().alarm.update(self.resource_id,
+ self.get_alarm_props(new_props))
+
+
def resource_mapping():
return {
'OS::Aodh::Alarm': AodhAlarm,
'OS::Aodh::CombinationAlarm': CombinationAlarm,
'OS::Aodh::EventAlarm': EventAlarm,
+ 'OS::Aodh::LBMemberHealthAlarm': LBMemberHealthAlarm,
}
diff --git a/heat/engine/resources/openstack/barbican/container.py b/heat/engine/resources/openstack/barbican/container.py
index 91d205e14..feb7639b3 100644
--- a/heat/engine/resources/openstack/barbican/container.py
+++ b/heat/engine/resources/openstack/barbican/container.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
@@ -192,12 +190,12 @@ class CertificateContainer(GenericContainer):
}
def create_container(self):
- info = dict((k, v) for k, v in six.iteritems(self.properties)
+ info = dict((k, v) for k, v in self.properties.items()
if v is not None)
return self.client_plugin().create_certificate(**info)
def get_refs(self):
- return [v for k, v in six.iteritems(self.properties)
+ return [v for k, v in self.properties.items()
if (k != self.NAME and v is not None)]
@@ -239,12 +237,12 @@ class RSAContainer(GenericContainer):
}
def create_container(self):
- info = dict((k, v) for k, v in six.iteritems(self.properties)
+ info = dict((k, v) for k, v in self.properties.items()
if v is not None)
return self.client_plugin().create_rsa(**info)
def get_refs(self):
- return [v for k, v in six.iteritems(self.properties)
+ return [v for k, v in self.properties.items()
if (k != self.NAME and v is not None)]
diff --git a/heat/engine/resources/openstack/barbican/order.py b/heat/engine/resources/openstack/barbican/order.py
index c2cc8c8aa..8fe2f57c7 100644
--- a/heat/engine/resources/openstack/barbican/order.py
+++ b/heat/engine/resources/openstack/barbican/order.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
@@ -228,8 +226,8 @@ class Order(resource.Resource):
raise exception.ResourcePropertyDependency(
prop1=self.PROFILE, prop2=self.CA_ID
)
- declared_props = sorted([k for k, v in six.iteritems(
- self.properties) if k != self.TYPE and v is not None])
+ declared_props = sorted([k for k, v in self.properties.items()
+ if k != self.TYPE and v is not None])
allowed_props = sorted(self.ALLOWED_PROPERTIES_FOR_TYPE[
self.properties[self.TYPE]])
diff = sorted(set(declared_props) - set(allowed_props))
diff --git a/heat/engine/resources/openstack/blazar/host.py b/heat/engine/resources/openstack/blazar/host.py
index e8745dac6..272fe90ce 100644
--- a/heat/engine/resources/openstack/blazar/host.py
+++ b/heat/engine/resources/openstack/blazar/host.py
@@ -41,7 +41,7 @@ class Host(resource.Resource):
)
ATTRIBUTES = (
- HYPERVISOR_HOSTNAME, HYPERVISOR_TYPE, HYPERVISOR_VERSION,
+ HYPERVISOR_HOSTNAME, HYPERVISOR_TYPE, HYPERVISOR_VERSION,
VCPUS, CPU_INFO, MEMORY_MB, LOCAL_GB,
SERVICE_NAME, RESERVABLE, STATUS, TRUST_ID,
EXTRA_CAPABILITY_ATTR, CREATED_AT, UPDATED_AT,
diff --git a/heat/engine/resources/openstack/cinder/qos_specs.py b/heat/engine/resources/openstack/cinder/qos_specs.py
index e77abb163..9e770f8cb 100644
--- a/heat/engine/resources/openstack/cinder/qos_specs.py
+++ b/heat/engine/resources/openstack/cinder/qos_specs.py
@@ -161,6 +161,7 @@ class QoSAssociation(resource.Resource):
for vt in self.properties[self.VOLUME_TYPES]:
self.client().qos_specs.associate(self.properties[self.QOS_SPECS],
vt)
+ self.resource_id_set(self.uuid)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""Associate volume types to QoS."""
diff --git a/heat/engine/resources/openstack/cinder/volume.py b/heat/engine/resources/openstack/cinder/volume.py
index 7fb4294ae..8180e49a8 100644
--- a/heat/engine/resources/openstack/cinder/volume.py
+++ b/heat/engine/resources/openstack/cinder/volume.py
@@ -13,7 +13,6 @@
from oslo_log import log as logging
from oslo_serialization import jsonutils
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -161,8 +160,13 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
MULTI_ATTACH: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether allow the volume to be attached more than once.'),
- support_status=support.SupportStatus(version='6.0.0'),
- default=False
+ default=False,
+ support_status=support.SupportStatus(
+ status=support.HIDDEN,
+ version='13.0.0',
+ previous_status=support.SupportStatus(
+ status=support.SUPPORTED,
+ version='6.0.0'))
),
}
@@ -201,7 +205,8 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
),
STATUS: attributes.Schema(
_('The current status of the volume.'),
- type=attributes.Schema.STRING
+ type=attributes.Schema.STRING,
+ cache_mode=attributes.Schema.CACHE_NONE
),
CREATED_AT: attributes.Schema(
_('The timestamp indicating volume creation.'),
@@ -274,7 +279,8 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
def _create_arguments(self):
arguments = {
'size': self.properties[self.SIZE],
- 'availability_zone': self.properties[self.AVAILABILITY_ZONE],
+ 'availability_zone': (self.properties[self.AVAILABILITY_ZONE] or
+ None),
}
scheduler_hints = self._scheduler_hints(
@@ -290,7 +296,7 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
arguments['imageRef'] = self.properties[self.IMAGE_REF]
optionals = (self.SNAPSHOT_ID, self.VOLUME_TYPE, self.SOURCE_VOLID,
- self.METADATA, self.MULTI_ATTACH)
+ self.METADATA)
arguments.update((prop, self.properties[prop]) for prop in optionals
if self.properties[prop] is not None)
@@ -303,7 +309,7 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
cinder = self.client()
vol = cinder.volumes.get(self.resource_id)
if name == self.METADATA_ATTR:
- return six.text_type(jsonutils.dumps(vol.metadata))
+ return str(jsonutils.dumps(vol.metadata))
elif name == self.METADATA_VALUES_ATTR:
return vol.metadata
if name == self.DISPLAY_NAME_ATTR:
@@ -312,7 +318,7 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
return vol.description
elif name == self.ATTACHMENTS_LIST:
return vol.attachments
- return six.text_type(getattr(vol, name))
+ return str(getattr(vol, name))
def check_create_complete(self, vol_id):
complete = super(CinderVolume, self).check_create_complete(vol_id)
@@ -348,7 +354,7 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
if self.client_plugin().is_client_exception(ex):
raise exception.Error(_(
"Failed to extend volume %(vol)s - %(err)s") % {
- 'vol': self.resource_id, 'err': six.text_type(ex)})
+ 'vol': self.resource_id, 'err': str(ex)})
else:
raise
return True
@@ -502,9 +508,9 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
def _detach_volume_to_complete(self, prg_detach):
if not prg_detach.called:
- self.client_plugin('nova').detach_volume(prg_detach.srv_id,
- prg_detach.attach_id)
- prg_detach.called = True
+ prg_detach.called = self.client_plugin(
+ 'nova').detach_volume(prg_detach.srv_id,
+ prg_detach.attach_id)
return False
if not prg_detach.cinder_complete:
prg_detach.cinder_complete = self.client_plugin(
@@ -742,11 +748,12 @@ class CinderVolumeAttachment(vb.BaseVolumeAttachment):
# self.resource_id is not replaced prematurely
volume_id = self.properties[self.VOLUME_ID]
server_id = self.properties[self.INSTANCE_ID]
- self.client_plugin('nova').detach_volume(server_id,
- self.resource_id)
+
prg_detach = progress.VolumeDetachProgress(
server_id, volume_id, self.resource_id)
- prg_detach.called = True
+
+ prg_detach.called = self.client_plugin(
+ 'nova').detach_volume(server_id, self.resource_id)
if self.VOLUME_ID in prop_diff:
volume_id = prop_diff.get(self.VOLUME_ID)
diff --git a/heat/engine/resources/openstack/designate/domain.py b/heat/engine/resources/openstack/designate/domain.py
index d2d88bbf6..f12045362 100644
--- a/heat/engine/resources/openstack/designate/domain.py
+++ b/heat/engine/resources/openstack/designate/domain.py
@@ -10,128 +10,28 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import six
from heat.common.i18n import _
-from heat.engine import attributes
-from heat.engine import constraints
-from heat.engine import properties
-from heat.engine import resource
+from heat.engine.resources.openstack.heat import none_resource
from heat.engine import support
-class DesignateDomain(resource.Resource):
+class DesignateDomain(none_resource.NoneResource):
"""Heat Template Resource for Designate Domain.
Designate provides DNS-as-a-Service services for OpenStack. So, domain
is a realm with an identification string, unique in DNS.
"""
-
support_status = support.SupportStatus(
status=support.HIDDEN,
version='10.0.0',
- message=_('Use OS::Designate::Zone instead.'),
+ message=_('This resource has been removed, use '
+ 'OS::Designate::Zone instead.'),
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='8.0.0',
previous_status=support.SupportStatus(version='5.0.0')))
- entity = 'domains'
-
- default_client_name = 'designate'
-
- PROPERTIES = (
- NAME, TTL, DESCRIPTION, EMAIL
- ) = (
- 'name', 'ttl', 'description', 'email'
- )
-
- ATTRIBUTES = (
- SERIAL,
- ) = (
- 'serial',
- )
-
- properties_schema = {
- # Based on RFC 1035, length of name is set to max of 255
- NAME: properties.Schema(
- properties.Schema.STRING,
- _('Domain name.'),
- required=True,
- constraints=[constraints.Length(max=255)]
- ),
- # Based on RFC 1035, range for ttl is set to 1 to signed 32 bit number
- TTL: properties.Schema(
- properties.Schema.INTEGER,
- _('Time To Live (Seconds).'),
- update_allowed=True,
- constraints=[constraints.Range(min=1,
- max=2147483647)]
- ),
- # designate mandates to the max length of 160 for description
- DESCRIPTION: properties.Schema(
- properties.Schema.STRING,
- _('Description of domain.'),
- update_allowed=True,
- constraints=[constraints.Length(max=160)]
- ),
- EMAIL: properties.Schema(
- properties.Schema.STRING,
- _('Domain email.'),
- update_allowed=True,
- required=True
- )
- }
-
- attributes_schema = {
- SERIAL: attributes.Schema(
- _("DNS domain serial."),
- type=attributes.Schema.STRING
- ),
- }
-
- def handle_create(self):
- args = dict((k, v) for k, v in six.iteritems(self.properties) if v)
- domain = self.client_plugin().domain_create(**args)
-
- self.resource_id_set(domain.id)
-
- def handle_update(self, json_snippet, tmpl_diff, prop_diff):
- args = dict()
-
- if prop_diff.get(self.EMAIL):
- args['email'] = prop_diff.get(self.EMAIL)
-
- if prop_diff.get(self.TTL):
- args['ttl'] = prop_diff.get(self.TTL)
-
- if prop_diff.get(self.DESCRIPTION):
- args['description'] = prop_diff.get(self.DESCRIPTION)
-
- if len(args.keys()) > 0:
- args['id'] = self.resource_id
- self.client_plugin().domain_update(**args)
-
- def _resolve_attribute(self, name):
- if self.resource_id is None:
- return
- if name == self.SERIAL:
- domain = self.client().domains.get(self.resource_id)
- return domain.serial
-
- # FIXME(kanagaraj-manickam) Remove this method once designate defect
- # 1485552 is fixed.
- def _show_resource(self):
- return dict(self.client().domains.get(self.resource_id).items())
-
- def parse_live_resource_data(self, resource_properties, resource_data):
- domain_reality = {}
-
- for key in self.PROPERTIES:
- domain_reality.update({key: resource_data.get(key)})
-
- return domain_reality
-
def resource_mapping():
return {
diff --git a/heat/engine/resources/openstack/designate/record.py b/heat/engine/resources/openstack/designate/record.py
index 675e03810..4bf8f2096 100644
--- a/heat/engine/resources/openstack/designate/record.py
+++ b/heat/engine/resources/openstack/designate/record.py
@@ -11,16 +11,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common.i18n import _
-from heat.engine import constraints
-from heat.engine import properties
-from heat.engine import resource
+from heat.engine.resources.openstack.heat import none_resource
from heat.engine import support
-class DesignateRecord(resource.Resource):
+class DesignateRecord(none_resource.NoneResource):
"""Heat Template Resource for Designate Record.
Designate provides DNS-as-a-Service services for OpenStack. Record is
@@ -31,157 +27,13 @@ class DesignateRecord(resource.Resource):
support_status = support.SupportStatus(
status=support.HIDDEN,
version='10.0.0',
- message=_('Use OS::Designate::RecordSet instead.'),
+ message=_('This resource has been removed, use '
+ 'OS::Designate::RecordSet instead.'),
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='8.0.0',
previous_status=support.SupportStatus(version='5.0.0')))
- entity = 'records'
-
- default_client_name = 'designate'
-
- PROPERTIES = (
- NAME, TTL, DESCRIPTION, TYPE, DATA, PRIORITY, DOMAIN
- ) = (
- 'name', 'ttl', 'description', 'type', 'data', 'priority', 'domain'
- )
-
- _ALLOWED_TYPES = (
- A, AAAA, CNAME, MX, SRV, TXT, SPF,
- NS, PTR, SSHFP, SOA
- ) = (
- 'A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF',
- 'NS', 'PTR', 'SSHFP', 'SOA'
- )
-
- properties_schema = {
- # Based on RFC 1035, length of name is set to max of 255
- NAME: properties.Schema(
- properties.Schema.STRING,
- _('Record name.'),
- required=True,
- constraints=[constraints.Length(max=255)]
- ),
- # Based on RFC 1035, range for ttl is set to 1 to signed 32 bit number
- TTL: properties.Schema(
- properties.Schema.INTEGER,
- _('Time To Live (Seconds).'),
- update_allowed=True,
- constraints=[constraints.Range(min=1,
- max=2147483647)]
- ),
- # designate mandates to the max length of 160 for description
- DESCRIPTION: properties.Schema(
- properties.Schema.STRING,
- _('Description of record.'),
- update_allowed=True,
- constraints=[constraints.Length(max=160)]
- ),
- TYPE: properties.Schema(
- properties.Schema.STRING,
- _('DNS Record type.'),
- update_allowed=True,
- required=True,
- constraints=[constraints.AllowedValues(
- _ALLOWED_TYPES
- )]
- ),
- DATA: properties.Schema(
- properties.Schema.STRING,
- _('DNS record data, varies based on the type of record. For more '
- 'details, please refer rfc 1035.'),
- update_allowed=True,
- required=True
- ),
- # Based on RFC 1035, range for priority is set to 0 to signed 16 bit
- # number
- PRIORITY: properties.Schema(
- properties.Schema.INTEGER,
- _('DNS record priority. It is considered only for MX and SRV '
- 'types, otherwise, it is ignored.'),
- update_allowed=True,
- constraints=[constraints.Range(min=0,
- max=65536)]
- ),
- DOMAIN: properties.Schema(
- properties.Schema.STRING,
- _('DNS Domain id or name.'),
- required=True,
- constraints=[constraints.CustomConstraint('designate.domain')]
- ),
- }
-
- def handle_create(self):
- args = dict(
- name=self.properties[self.NAME],
- type=self.properties[self.TYPE],
- description=self.properties[self.DESCRIPTION],
- ttl=self.properties[self.TTL],
- data=self.properties[self.DATA],
- # priority is considered only for MX and SRV record.
- priority=(self.properties[self.PRIORITY]
- if self.properties[self.TYPE] in (self.MX, self.SRV)
- else None),
- domain=self.properties[self.DOMAIN]
- )
-
- domain = self.client_plugin().record_create(**args)
-
- self.resource_id_set(domain.id)
-
- def handle_update(self, json_snippet, tmpl_diff, prop_diff):
- args = dict()
-
- if prop_diff.get(self.TTL):
- args['ttl'] = prop_diff.get(self.TTL)
-
- if prop_diff.get(self.DESCRIPTION):
- args['description'] = prop_diff.get(self.DESCRIPTION)
-
- if prop_diff.get(self.TYPE):
- args['type'] = prop_diff.get(self.TYPE)
-
- # priority is considered only for MX and SRV record.
- if prop_diff.get(self.PRIORITY):
- args['priority'] = (prop_diff.get(self.PRIORITY)
- if (prop_diff.get(self.TYPE) or
- self.properties[self.TYPE]) in
- (self.MX, self.SRV)
- else None)
-
- if prop_diff.get(self.DATA):
- args['data'] = prop_diff.get(self.DATA)
-
- if len(args.keys()) > 0:
- args['id'] = self.resource_id
- args['domain'] = self.properties[self.DOMAIN]
- self.client_plugin().record_update(**args)
-
- def handle_delete(self):
- if self.resource_id is not None:
- with self.client_plugin().ignore_not_found:
- self.client_plugin().record_delete(
- id=self.resource_id,
- domain=self.properties[self.DOMAIN]
- )
-
- # FIXME(kanagaraj-manickam) Remove this method once designate defect
- # 1485552 is fixed.
- def _show_resource(self):
- kwargs = dict(domain=self.properties[self.DOMAIN],
- id=self.resource_id)
- return dict(six.iteritems(self.client_plugin().record_show(**kwargs)))
-
- def parse_live_resource_data(self, resource_properties, resource_data):
- record_reality = {}
-
- properties_keys = list(set(self.PROPERTIES) - {self.NAME, self.DOMAIN})
- for key in properties_keys:
- record_reality.update({key: resource_data.get(key)})
-
- return record_reality
-
def resource_mapping():
return {
diff --git a/heat/engine/resources/openstack/designate/recordset.py b/heat/engine/resources/openstack/designate/recordset.py
index a97cb90ad..b993504ef 100644
--- a/heat/engine/resources/openstack/designate/recordset.py
+++ b/heat/engine/resources/openstack/designate/recordset.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
@@ -98,12 +96,8 @@ class DesignateRecordSet(resource.Resource):
entity = 'recordsets'
- def client(self):
- return super(DesignateRecordSet,
- self).client(version=self.client_plugin().V2)
-
def handle_create(self):
- args = dict((k, v) for k, v in six.iteritems(self.properties) if v)
+ args = dict((k, v) for k, v in self.properties.items() if v)
args['type_'] = args.pop(self.TYPE)
if not args.get(self.NAME):
args[self.NAME] = self.physical_resource_name()
@@ -154,9 +148,10 @@ class DesignateRecordSet(resource.Resource):
recordset=self.resource_id,
zone=self.properties[self.ZONE]
)
+ return self.resource_id
def check_delete_complete(self, handler_data=None):
- if self.resource_id is not None:
+ if handler_data:
with self.client_plugin().ignore_not_found:
return self._check_status_complete()
diff --git a/heat/engine/resources/openstack/designate/zone.py b/heat/engine/resources/openstack/designate/zone.py
index 6cd659c2a..7d1f91332 100644
--- a/heat/engine/resources/openstack/designate/zone.py
+++ b/heat/engine/resources/openstack/designate/zone.py
@@ -10,7 +10,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -19,6 +18,7 @@ from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
+from heat.engine import translation
class DesignateZone(resource.Resource):
@@ -32,9 +32,9 @@ class DesignateZone(resource.Resource):
version='8.0.0')
PROPERTIES = (
- NAME, TTL, DESCRIPTION, EMAIL, TYPE, MASTERS
+ NAME, TTL, DESCRIPTION, EMAIL, TYPE, PRIMARIES, MASTERS
) = (
- 'name', 'ttl', 'description', 'email', 'type', 'masters'
+ 'name', 'ttl', 'description', 'email', 'type', 'primaries', 'masters'
)
ATTRIBUTES = (
@@ -81,17 +81,28 @@ class DesignateZone(resource.Resource):
TYPE: properties.Schema(
properties.Schema.STRING,
_('Type of zone. PRIMARY is controlled by Designate, SECONDARY '
- 'zones are slaved from another DNS Server.'),
+ 'zones are transferred from another DNS Server.'),
default=PRIMARY,
constraints=[constraints.AllowedValues(
allowed=TYPES)]
),
MASTERS: properties.Schema(
properties.Schema.LIST,
- _('The servers to slave from to get DNS information and is '
- 'mandatory for zone type SECONDARY, otherwise ignored.'),
+ _('The primary servers to transfer DNS zone information from. '
+ 'Mandatory for zone type SECONDARY, otherwise ignored.'),
+ update_allowed=True,
+ support_status=support.SupportStatus(
+ status=support.DEPRECATED,
+ version='15.0.0',
+ message=_('Use ``primaries`` instead.')
+ )
+ ),
+ PRIMARIES: properties.Schema(
+ properties.Schema.LIST,
+ _('The primary servers to transfer DNS zone information from. '
+ 'Mandatory for zone type SECONDARY, otherwise ignored.'),
update_allowed=True
- )
+ ),
}
attributes_schema = {
@@ -105,10 +116,6 @@ class DesignateZone(resource.Resource):
entity = 'zones'
- def client(self):
- return super(DesignateZone,
- self).client(version=self.client_plugin().V2)
-
def validate(self):
super(DesignateZone, self).validate()
@@ -125,8 +132,19 @@ class DesignateZone(resource.Resource):
raise_invalid_exception(self.PRIMARY, self.EMAIL)
raise_invalid_exception(self.SECONDARY, self.MASTERS)
+ def translation_rules(self, props):
+ return [
+ # Translate to "masters" as that is what the Designate API uses,
+ # even though we have deprecated that name for the property
+ # in favour of "primaries".
+ translation.TranslationRule(props,
+ translation.TranslationRule.REPLACE,
+ translation_path=[self.MASTERS],
+ value_name=self.PRIMARIES)
+ ]
+
def handle_create(self):
- args = dict((k, v) for k, v in six.iteritems(self.properties) if v)
+ args = dict((k, v) for k, v in self.properties.items() if v)
args['type_'] = args.pop(self.TYPE)
zone = self.client().zones.create(**args)
diff --git a/heat/engine/resources/openstack/glance/image.py b/heat/engine/resources/openstack/glance/image.py
index cb571b546..aee760142 100644
--- a/heat/engine/resources/openstack/glance/image.py
+++ b/heat/engine/resources/openstack/glance/image.py
@@ -31,12 +31,12 @@ class GlanceWebImage(resource.Resource):
NAME, IMAGE_ID, MIN_DISK, MIN_RAM, PROTECTED,
DISK_FORMAT, CONTAINER_FORMAT, LOCATION, TAGS,
ARCHITECTURE, KERNEL_ID, OS_DISTRO, OS_VERSION, OWNER,
- VISIBILITY, RAMDISK_ID
+ VISIBILITY, RAMDISK_ID, ACTIVE, MEMBERS
) = (
'name', 'id', 'min_disk', 'min_ram', 'protected',
'disk_format', 'container_format', 'location', 'tags',
- 'architecture', 'kernel_id', 'os_distro', 'os_version', 'owner',
- 'visibility', 'ramdisk_id'
+ 'architecture', 'kernel_id', 'os_distro', 'os_version',
+ 'owner', 'visibility', 'ramdisk_id', 'active', 'members'
)
glance_id_pattern = ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
@@ -75,6 +75,7 @@ class GlanceWebImage(resource.Resource):
properties.Schema.BOOLEAN,
_('Whether the image can be deleted. If the value is True, '
'the image is protected and cannot be deleted.'),
+ update_allowed=True,
default=False
),
DISK_FORMAT: properties.Schema(
@@ -156,6 +157,28 @@ class GlanceWebImage(resource.Resource):
constraints=[
constraints.AllowedPattern(glance_id_pattern)
]
+ ),
+ ACTIVE: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('Activate or deactivate the image. Requires Admin Access.'),
+ default=True,
+ update_allowed=True,
+ support_status=support.SupportStatus(version='16.0.0')
+ ),
+ MEMBERS: properties.Schema(
+ properties.Schema.LIST,
+ _('List of additional members that are permitted '
+ 'to read the image. This may be a Keystone Project '
+ 'IDs or User IDs, depending on the Glance configuration '
+ 'in use.'),
+ schema=properties.Schema(
+ properties.Schema.STRING,
+ _('A member ID. This may be a Keystone Project ID '
+ 'or User ID, depending on the Glance configuration '
+ 'in use.')
+ ),
+ update_allowed=True,
+ support_status=support.SupportStatus(version='16.0.0')
)
}
@@ -166,42 +189,78 @@ class GlanceWebImage(resource.Resource):
def handle_create(self):
args = dict((k, v) for k, v in self.properties.items()
if v is not None)
-
+ members = args.pop(self.MEMBERS, [])
+ active = args.pop(self.ACTIVE)
location = args.pop(self.LOCATION)
images = self.client().images
- image_id = images.create(
- **args).id
+ image = images.create(**args)
+ image_id = image.id
self.resource_id_set(image_id)
-
images.image_import(image_id, method='web-download', uri=location)
-
- return image_id
-
- def check_create_complete(self, image_id):
- image = self.client().images.get(image_id)
- return image.status == 'active'
+ for member in members:
+ self.client().image_members.create(image_id, member)
+ return active
+
+ def check_create_complete(self, active):
+ image = self.client().images.get(self.resource_id)
+ if image.status == 'killed':
+ raise exception.ResourceInError(
+ resource_status=image.status,
+ )
+ if not active:
+ if image.status == 'active':
+ self.client().images.deactivate(self.resource_id)
+ return True
+ elif image.status == 'deactivated':
+ return True
+ else:
+ return False
+ else:
+ return image.status == 'active'
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
- if prop_diff and self.TAGS in prop_diff:
- existing_tags = self.properties.get(self.TAGS) or []
- diff_tags = prop_diff.pop(self.TAGS) or []
-
- new_tags = set(diff_tags) - set(existing_tags)
- for tag in new_tags:
- self.client().image_tags.update(
- self.resource_id,
- tag)
-
- removed_tags = set(existing_tags) - set(diff_tags)
- for tag in removed_tags:
- with self.client_plugin().ignore_not_found:
- self.client().image_tags.delete(
+ if prop_diff:
+ active = prop_diff.pop(self.ACTIVE, None)
+ if active is False:
+ self.client().images.deactivate(self.resource_id)
+
+ if self.TAGS in prop_diff:
+ existing_tags = self.properties.get(self.TAGS) or []
+ diff_tags = prop_diff.pop(self.TAGS) or []
+
+ new_tags = set(diff_tags) - set(existing_tags)
+ for tag in new_tags:
+ self.client().image_tags.update(
self.resource_id,
tag)
- images = self.client().images
-
- images.update(self.resource_id, **prop_diff)
+ removed_tags = set(existing_tags) - set(diff_tags)
+ for tag in removed_tags:
+ with self.client_plugin().ignore_not_found:
+ self.client().image_tags.delete(
+ self.resource_id,
+ tag)
+
+ if self.MEMBERS in prop_diff:
+ existing_members = self.properties.get(self.MEMBERS) or []
+ diff_members = prop_diff.pop(self.MEMBERS) or []
+
+ new_members = set(diff_members) - set(existing_members)
+ for _member in new_members:
+ self.glance().image_members.create(
+ self.resource_id, _member)
+ removed_members = set(existing_members) - set(diff_members)
+ for _member in removed_members:
+ self.glance().image_members.delete(
+ self.resource_id, _member)
+
+ self.client().images.update(self.resource_id, **prop_diff)
+ return active
+
+ def check_update_complete(self, active):
+ if active:
+ self.client().images.reactivate(self.resource_id)
+ return True
def validate(self):
super(GlanceWebImage, self).validate()
@@ -214,11 +273,18 @@ class GlanceWebImage(resource.Resource):
"match.")
raise exception.StackValidationFailed(message=msg)
+ if (self.properties[self.MEMBERS]
+ and self.properties[self.VISIBILITY] != 'shared'):
+ raise exception.ResourcePropertyValueDependency(
+ prop1=self.MEMBERS,
+ prop2=self.VISIBILITY,
+ value='shared')
+
def get_live_resource_data(self):
image_data = super(GlanceWebImage, self).get_live_resource_data()
if image_data.get('status') in ('deleted', 'killed'):
- raise exception.EntityNotFound(entity='Resource',
- name=self.name)
+ raise exception.EntityNotFound(entity='Resource',
+ name=self.name)
return image_data
def parse_live_resource_data(self, resource_properties, resource_data):
@@ -479,8 +545,8 @@ class GlanceImage(resource.Resource):
def get_live_resource_data(self):
image_data = super(GlanceImage, self).get_live_resource_data()
if image_data.get('status') in ('deleted', 'killed'):
- raise exception.EntityNotFound(entity='Resource',
- name=self.name)
+ raise exception.EntityNotFound(entity='Resource',
+ name=self.name)
return image_data
def parse_live_resource_data(self, resource_properties, resource_data):
diff --git a/heat/engine/resources/openstack/heat/autoscaling_group.py b/heat/engine/resources/openstack/heat/autoscaling_group.py
index ed48bebf3..8561048cc 100644
--- a/heat/engine/resources/openstack/heat/autoscaling_group.py
+++ b/heat/engine/resources/openstack/heat/autoscaling_group.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from oslo_log import log as logging
from heat.common import exception
@@ -208,9 +206,9 @@ class AutoScalingResourceGroup(aws_asg.AutoScalingGroup):
template_version=template_version)
def _attribute_output_name(self, *attr_path):
- return ', '.join(six.text_type(a) for a in attr_path)
+ return ', '.join(str(a) for a in attr_path)
- def get_attribute(self, key, *path):
+ def get_attribute(self, key, *path): # noqa: C901
if key == self.CURRENT_SIZE:
return grouputils.get_size(self)
@@ -280,7 +278,7 @@ class AutoScalingResourceGroup(aws_asg.AutoScalingGroup):
def _nested_output_defns(self, resource_names, get_attr_fn, get_res_fn):
for attr in self.referenced_attrs():
- if isinstance(attr, six.string_types):
+ if isinstance(attr, str):
key, path = attr, []
else:
key, path = attr[0], list(attr[1:])
diff --git a/heat/engine/resources/openstack/heat/delay.py b/heat/engine/resources/openstack/heat/delay.py
index 643a2b2c0..5a6623273 100644
--- a/heat/engine/resources/openstack/heat/delay.py
+++ b/heat/engine/resources/openstack/heat/delay.py
@@ -51,7 +51,7 @@ class Delay(resource.Resource):
)
PROPERTIES = (
- MIN_WAIT_SECS, MAX_JITTER, JITTER_MULTIPLIER_SECS, ACTIONS,
+ MIN_WAIT_SECS, MAX_JITTER, JITTER_MULTIPLIER_SECS, DELAY_ACTIONS,
) = (
'min_wait', 'max_jitter', 'jitter_multiplier', 'actions',
)
@@ -84,7 +84,7 @@ class Delay(resource.Resource):
constraints.Range(min=0),
]
),
- ACTIONS: properties.Schema(
+ DELAY_ACTIONS: properties.Schema(
properties.Schema.LIST,
_('Actions during which the delay will occur.'),
update_allowed=True,
@@ -119,7 +119,7 @@ class Delay(resource.Resource):
def _wait_secs(self, action):
"""Return a (randomised) wait time for the specified action."""
- if action not in self.properties[self.ACTIONS]:
+ if action not in self.properties[self.DELAY_ACTIONS]:
return 0
min_wait_secs, max_jitter_secs = self._delay_parameters()
diff --git a/heat/engine/resources/openstack/heat/deployed_server.py b/heat/engine/resources/openstack/heat/deployed_server.py
index cd62dae24..8402e107d 100644
--- a/heat/engine/resources/openstack/heat/deployed_server.py
+++ b/heat/engine/resources/openstack/heat/deployed_server.py
@@ -150,6 +150,8 @@ class DeployedServer(server_base.BaseServer):
),
}
+ default_client_name = 'heat'
+
def __init__(self, name, json_snippet, stack):
super(DeployedServer, self).__init__(name, json_snippet, stack)
self._register_access_key()
diff --git a/heat/engine/resources/openstack/heat/instance_group.py b/heat/engine/resources/openstack/heat/instance_group.py
index 103737d44..86b19f181 100644
--- a/heat/engine/resources/openstack/heat/instance_group.py
+++ b/heat/engine/resources/openstack/heat/instance_group.py
@@ -12,7 +12,6 @@
# under the License.
import functools
-import six
from oslo_log import log as logging
@@ -140,6 +139,10 @@ class InstanceGroup(stack_resource.StackResource):
schema=rolling_update_schema)
}
+ def get_size(self):
+ """Get desired size."""
+ return self.properties[self.SIZE]
+
def validate(self):
"""Add validation for update_policy."""
self.validate_launchconfig()
@@ -335,7 +338,12 @@ class InstanceGroup(stack_resource.StackResource):
old_template = group_data.template()
capacity = group_data.size(include_failed=True)
- batches = list(self._get_batches(capacity, batch_size, min_in_service))
+
+ target_capacity = min(self.get_size() or capacity, capacity)
+
+ batches = list(self._get_batches(target_capacity,
+ capacity, batch_size,
+ min_in_service))
update_timeout = self._update_timeout(len(batches), pause_sec)
@@ -359,7 +367,7 @@ class InstanceGroup(stack_resource.StackResource):
self._lb_reload()
@staticmethod
- def _get_batches(capacity, batch_size, min_in_service):
+ def _get_batches(target_capacity, capacity, batch_size, min_in_service):
"""Return an iterator over the batches in a batched update.
Each batch is a tuple comprising the total size of the group after
@@ -368,15 +376,14 @@ class InstanceGroup(stack_resource.StackResource):
updating an existing one).
"""
- efft_capacity = capacity
updated = 0
- while rolling_update.needs_update(capacity, efft_capacity, updated):
- batch = rolling_update.next_batch(capacity, efft_capacity,
+ while rolling_update.needs_update(target_capacity, capacity, updated):
+ batch = rolling_update.next_batch(target_capacity, capacity,
updated, batch_size,
min_in_service)
yield batch
- efft_capacity, num_updates = batch
+ capacity, num_updates = batch
updated += num_updates
def _check_for_completion(self, updater):
@@ -422,9 +429,6 @@ class InstanceGroup(stack_resource.StackResource):
lbs = [self.stack[name] for name in lb_names]
lbutils.reconfigure_loadbalancers(lbs, id_list)
- def get_reference_id(self):
- return self.physical_resource_name_or_FnGetRefId()
-
def _group_data(self, refresh=False):
"""Return a cached GroupInspector object for the nested stack."""
if refresh or getattr(self, '_group_inspector', None) is None:
@@ -460,7 +464,7 @@ class InstanceGroup(stack_resource.StackResource):
def _nested_output_defns(self, resource_names, get_attr_fn, get_res_fn):
for attr in self.referenced_attrs():
- if isinstance(attr, six.string_types):
+ if isinstance(attr, str):
key = attr
else:
key = attr[0]
diff --git a/heat/engine/resources/openstack/heat/multi_part.py b/heat/engine/resources/openstack/heat/multi_part.py
index 6644721d9..e7b811739 100644
--- a/heat/engine/resources/openstack/heat/multi_part.py
+++ b/heat/engine/resources/openstack/heat/multi_part.py
@@ -45,9 +45,9 @@ class MultipartMime(software_config.SoftwareConfig):
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
- PARTS, CONFIG, FILENAME, TYPE, SUBTYPE
+ PARTS, CONFIG, FILENAME, TYPE, SUBTYPE, GROUP
) = (
- 'parts', 'config', 'filename', 'type', 'subtype'
+ 'parts', 'config', 'filename', 'type', 'subtype', 'group'
)
TYPES = (
@@ -57,6 +57,14 @@ class MultipartMime(software_config.SoftwareConfig):
)
properties_schema = {
+ GROUP: properties.Schema(
+ properties.Schema.STRING,
+ _('Namespace to group this multi-part configs by when delivered '
+ 'to a server. This may imply what configuration tool is going '
+ 'to perform the configuration.'),
+ support_status=support.SupportStatus(version='14.0.0'),
+ default='Heat::Ungrouped'
+ ),
PARTS: properties.Schema(
properties.Schema.LIST,
_('Parts belonging to this message.'),
@@ -96,7 +104,7 @@ class MultipartMime(software_config.SoftwareConfig):
props = {
rpc_api.SOFTWARE_CONFIG_NAME: self.physical_resource_name(),
rpc_api.SOFTWARE_CONFIG_CONFIG: self.get_message(),
- rpc_api.SOFTWARE_CONFIG_GROUP: 'Heat::Ungrouped'
+ rpc_api.SOFTWARE_CONFIG_GROUP: self.properties[self.GROUP]
}
sc = self.rpc_client().create_software_config(self.context, **props)
self.resource_id_set(sc[rpc_api.SOFTWARE_CONFIG_ID])
diff --git a/heat/engine/resources/openstack/heat/none_resource.py b/heat/engine/resources/openstack/heat/none_resource.py
index 8932344eb..e0e871a37 100644
--- a/heat/engine/resources/openstack/heat/none_resource.py
+++ b/heat/engine/resources/openstack/heat/none_resource.py
@@ -11,7 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
import uuid
from heat.engine import properties
@@ -47,7 +46,7 @@ class NoneResource(resource.Resource):
self.translate_properties(self.properties, client_resolve)
def handle_create(self):
- self.resource_id_set(six.text_type(uuid.uuid4()))
+ self.resource_id_set(str(uuid.uuid4()))
# set is_placeholder flag when resource trying to replace original
# resource with a placeholder resource.
self.data_set(self.IS_PLACEHOLDER, 'True')
diff --git a/heat/engine/resources/openstack/heat/random_string.py b/heat/engine/resources/openstack/heat/random_string.py
index 9c457a936..4b03d9854 100644
--- a/heat/engine/resources/openstack/heat/random_string.py
+++ b/heat/engine/resources/openstack/heat/random_string.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.common import password_gen
@@ -235,7 +233,7 @@ class RandomString(resource.Resource):
if self.resource_id is not None:
return self.data().get('value')
else:
- return six.text_type(self.name)
+ return str(self.name)
def resource_mapping():
diff --git a/heat/engine/resources/openstack/heat/remote_stack.py b/heat/engine/resources/openstack/heat/remote_stack.py
index 3b8dafdb9..4574de834 100644
--- a/heat/engine/resources/openstack/heat/remote_stack.py
+++ b/heat/engine/resources/openstack/heat/remote_stack.py
@@ -13,7 +13,6 @@
from oslo_log import log as logging
from oslo_serialization import jsonutils
-import six
import tempfile
from heat.common import auth_plugin
@@ -49,7 +48,7 @@ class TempCACertFile(object):
try:
self._cacert_temp_file = tempfile.NamedTemporaryFile()
self._cacert_temp_file.write(
- six.text_type(self._cacert).encode('utf-8'))
+ str(self._cacert).encode('utf-8'))
# Add seek func to make sure the writen context will flush to
# tempfile with python 2.7. we can use flush() for python 2.7
# but not 3.5.
@@ -73,6 +72,8 @@ class RemoteStack(resource.Resource):
"""
default_client_name = 'heat'
+ entity = 'stacks'
+
PROPERTIES = (
CONTEXT, TEMPLATE, TIMEOUT, PARAMETERS,
) = (
@@ -264,7 +265,7 @@ class RemoteStack(resource.Resource):
location = "remote cloud"
else:
location = 'region "%s"' % self._region_name
- exc_info = dict(location=location, exc=six.text_type(ex))
+ exc_info = dict(location=location, exc=str(ex))
msg = _('Cannot establish connection to Heat endpoint at '
'%(location)s due to "%(exc)s"') % exc_info
raise exception.StackValidationFailed(message=msg)
@@ -285,7 +286,7 @@ class RemoteStack(resource.Resource):
location = "remote cloud"
else:
location = 'region "%s"' % self._region_name
- exc_info = dict(location=location, exc=six.text_type(ex))
+ exc_info = dict(location=location, exc=str(ex))
msg = _('Failed validating stack template using Heat endpoint at '
'%(location)s due to "%(exc)s"') % exc_info
raise exception.StackValidationFailed(message=msg)
@@ -314,6 +315,8 @@ class RemoteStack(resource.Resource):
with TempCACertFile(self.cacert) as cacert_path:
self.heat(
cacert_path).stacks.delete(stack_id=self.resource_id)
+ return self.resource_id
+ return None
def handle_resume(self):
if self.resource_id is None:
@@ -414,15 +417,12 @@ class RemoteStack(resource.Resource):
def check_create_complete(self, *args):
return self._check_action_complete(action=self.CREATE)
- def check_delete_complete(self, *args):
- if self.resource_id is None:
- return True
+ def check_delete_complete(self, deleting_resource_id=None):
+ if deleting_resource_id is not None:
+ with self.client_plugin().ignore_not_found:
+ return self._check_action_complete(action=self.DELETE)
- try:
- return self._check_action_complete(action=self.DELETE)
- except Exception as ex:
- self.client_plugin().ignore_not_found(ex)
- return True
+ return True
def check_resume_complete(self, *args):
return self._check_action_complete(action=self.RESUME)
@@ -467,7 +467,7 @@ class RemoteStack(resource.Resource):
after_props.get(self.CONTEXT).get(
'region_name') != before_props.get(self.CONTEXT).get(
'region_name')):
- return True
+ return True
return False
diff --git a/heat/engine/resources/openstack/heat/resource_chain.py b/heat/engine/resources/openstack/heat/resource_chain.py
index 9cadc78ce..ff58711f1 100644
--- a/heat/engine/resources/openstack/heat/resource_chain.py
+++ b/heat/engine/resources/openstack/heat/resource_chain.py
@@ -12,7 +12,6 @@
# under the License.
import functools
-import six
from oslo_log import log as logging
@@ -155,7 +154,7 @@ class ResourceChain(stack_resource.StackResource):
return {}
def _attribute_output_name(self, *attr_path):
- return ', '.join(six.text_type(a) for a in attr_path)
+ return ', '.join(str(a) for a in attr_path)
def get_attribute(self, key, *path):
if key == self.ATTR_ATTRIBUTES and not path:
@@ -198,7 +197,7 @@ class ResourceChain(stack_resource.StackResource):
def _nested_output_defns(self, resource_names, get_attr_fn, get_res_fn):
for attr in self.referenced_attrs():
- if isinstance(attr, six.string_types):
+ if isinstance(attr, str):
key, path = attr, []
else:
key, path = attr[0], list(attr[1:])
@@ -227,7 +226,7 @@ class ResourceChain(stack_resource.StackResource):
@staticmethod
def _resource_names(resource_types):
"""Returns a list of unique resource names to create."""
- return [six.text_type(i) for i, t in enumerate(resource_types)]
+ return [str(i) for i, t in enumerate(resource_types)]
def _build_resource_definition(self, resource_name, resource_type,
depends_on=None):
diff --git a/heat/engine/resources/openstack/heat/resource_group.py b/heat/engine/resources/openstack/heat/resource_group.py
index c43746d2c..48ef05ae9 100644
--- a/heat/engine/resources/openstack/heat/resource_group.py
+++ b/heat/engine/resources/openstack/heat/resource_group.py
@@ -15,7 +15,6 @@ import collections
import copy
import functools
import itertools
-import six
from oslo_log import log as logging
@@ -306,7 +305,7 @@ class ResourceGroup(stack_resource.StackResource):
first_name = next(self._resource_names())
test_tmpl = self._assemble_nested([first_name],
include_all=True)
- res_def = next(six.itervalues(test_tmpl.resource_definitions(None)))
+ res_def = next(iter(test_tmpl.resource_definitions(None).values()))
# make sure we can resolve the nested resource type
self.stack.env.get_class_to_instantiate(res_def.resource_type)
@@ -323,28 +322,28 @@ class ResourceGroup(stack_resource.StackResource):
raise exception.StackValidationFailed(
ex, path=[self.stack.t.RESOURCES, path])
- def _current_blacklist(self):
+ def _current_skiplist(self):
db_rsrc_names = self.data().get('name_blacklist')
if db_rsrc_names:
return db_rsrc_names.split(',')
else:
return []
- def _get_new_blacklist_entries(self, properties, current_blacklist):
+ def _get_new_skiplist_entries(self, properties, current_skiplist):
insp = grouputils.GroupInspector.from_parent_resource(self)
- # Now we iterate over the removal policies, and update the blacklist
+ # Now we iterate over the removal policies, and update the skiplist
# with any additional names
for r in properties.get(self.REMOVAL_POLICIES, []):
if self.REMOVAL_RSRC_LIST in r:
# Tolerate string or int list values
for n in r[self.REMOVAL_RSRC_LIST]:
- str_n = six.text_type(n)
- if (str_n in current_blacklist or
+ str_n = str(n)
+ if (str_n in current_skiplist or
self.resource_id is None or
str_n in insp.member_names(include_failed=True)):
yield str_n
- elif isinstance(n, six.string_types):
+ elif isinstance(n, str):
try:
refids = self.get_output(self.REFS_MAP)
except (exception.NotFound,
@@ -365,52 +364,52 @@ class ResourceGroup(stack_resource.StackResource):
# outdated values after stack update.
self._outputs = None
- def _update_name_blacklist(self, properties):
+ def _update_name_skiplist(self, properties):
"""Resolve the remove_policies to names for removal."""
# To avoid reusing names after removal, we store a comma-separated
- # blacklist in the resource data - in cases where you want to
+ # skiplist in the resource data - in cases where you want to
# overwrite the stored data, removal_policies_mode: update can be used
- curr_bl = set(self._current_blacklist())
+ curr_sl = set(self._current_skiplist())
p_mode = properties.get(self.REMOVAL_POLICIES_MODE,
self.REMOVAL_POLICY_APPEND)
if p_mode == self.REMOVAL_POLICY_UPDATE:
- init_bl = set()
+ init_sl = set()
else:
- init_bl = curr_bl
- updated_bl = init_bl | set(self._get_new_blacklist_entries(properties,
- curr_bl))
+ init_sl = curr_sl
+ updated_sl = init_sl | set(self._get_new_skiplist_entries(properties,
+ curr_sl))
- # If the blacklist has changed, update the resource data
- if updated_bl != curr_bl:
- self.data_set('name_blacklist', ','.join(sorted(updated_bl)))
+ # If the skiplist has changed, update the resource data
+ if updated_sl != curr_sl:
+ self.data_set('name_blacklist', ','.join(sorted(updated_sl)))
- def _name_blacklist(self):
- """Get the list of resource names to blacklist."""
- bl = set(self._current_blacklist())
+ def _name_skiplist(self):
+ """Get the list of resource names to skiplist."""
+ sl = set(self._current_skiplist())
if self.resource_id is None:
- bl |= set(self._get_new_blacklist_entries(self.properties, bl))
- return bl
+ sl |= set(self._get_new_skiplist_entries(self.properties, sl))
+ return sl
def _resource_names(self, size=None):
- name_blacklist = self._name_blacklist()
+ name_skiplist = self._name_skiplist()
if size is None:
size = self.get_size()
- def is_blacklisted(name):
- return name in name_blacklist
+ def is_skipped(name):
+ return name in name_skiplist
- candidates = six.moves.map(six.text_type, itertools.count())
+ candidates = map(str, itertools.count())
- return itertools.islice(six.moves.filterfalse(is_blacklisted,
+ return itertools.islice(itertools.filterfalse(is_skipped,
candidates),
size)
- def _count_black_listed(self, existing_members):
- """Return the number of current resource names that are blacklisted."""
- return len(self._name_blacklist() & set(existing_members))
+ def _count_skipped(self, existing_members):
+ """Return the number of current resource names that are skipped."""
+ return len(self._name_skiplist() & set(existing_members))
def handle_create(self):
- self._update_name_blacklist(self.properties)
+ self._update_name_skiplist(self.properties)
if self.update_policy.get(self.BATCH_CREATE) and self.get_size():
batch_create = self.update_policy[self.BATCH_CREATE]
max_batch_size = batch_create[self.MAX_BATCH_SIZE]
@@ -469,7 +468,7 @@ class ResourceGroup(stack_resource.StackResource):
checkers = []
self.properties = json_snippet.properties(self.properties_schema,
self.context)
- self._update_name_blacklist(self.properties)
+ self._update_name_skiplist(self.properties)
if prop_diff and self.res_def_changed(prop_diff):
updaters = self._try_rolling_update()
if updaters:
@@ -488,11 +487,11 @@ class ResourceGroup(stack_resource.StackResource):
def _attribute_output_name(self, *attr_path):
if attr_path[0] == self.REFS:
return self.REFS
- return ', '.join(six.text_type(a) for a in attr_path)
+ return ', '.join(str(a) for a in attr_path)
def get_attribute(self, key, *path):
if key == self.REMOVED_RSRC_LIST:
- return self._current_blacklist()
+ return self._current_skiplist()
if key == self.ATTR_ATTRIBUTES and not path:
raise exception.InvalidTemplateAttribute(resource=self.name,
key=key)
@@ -546,7 +545,7 @@ class ResourceGroup(stack_resource.StackResource):
def _nested_output_defns(self, resource_names, get_attr_fn, get_res_fn):
for attr in self.referenced_attrs():
- if isinstance(attr, six.string_types):
+ if isinstance(attr, str):
key, path = attr, []
else:
key, path = attr[0], list(attr[1:])
@@ -601,19 +600,25 @@ class ResourceGroup(stack_resource.StackResource):
# At this stage, we don't mind if all of the parameters have values
# assigned. Pass in a custom resolver to the properties to not
# error when a parameter does not have a user entered value.
- def ignore_param_resolve(snippet):
+ def ignore_param_resolve(snippet, nullable=False):
if isinstance(snippet, function.Function):
try:
- return snippet.result()
+ result = snippet.result()
except exception.UserParameterMissing:
return None
-
- if isinstance(snippet, collections.Mapping):
- return dict((k, ignore_param_resolve(v))
- for k, v in snippet.items())
- elif (not isinstance(snippet, six.string_types) and
- isinstance(snippet, collections.Iterable)):
- return [ignore_param_resolve(v) for v in snippet]
+ if not (nullable or function._non_null_value(result)):
+ result = None
+ return result
+
+ if isinstance(snippet, collections.abc.Mapping):
+ return dict(filter(function._non_null_item,
+ ((k, ignore_param_resolve(v, nullable=True))
+ for k, v in snippet.items())))
+ elif (not isinstance(snippet, str) and
+ isinstance(snippet, collections.abc.Iterable)):
+ return list(filter(function._non_null_value,
+ (ignore_param_resolve(v, nullable=True)
+ for v in snippet)))
return snippet
@@ -639,11 +644,11 @@ class ResourceGroup(stack_resource.StackResource):
def recurse(x):
return self._handle_repl_val(res_name, x)
- if isinstance(val, six.string_types):
+ if isinstance(val, str):
return val.replace(repl_var, res_name)
- elif isinstance(val, collections.Mapping):
+ elif isinstance(val, collections.abc.Mapping):
return {k: recurse(v) for k, v in val.items()}
- elif isinstance(val, collections.Sequence):
+ elif isinstance(val, collections.abc.Sequence):
return [recurse(v) for v in val]
return val
@@ -681,11 +686,11 @@ class ResourceGroup(stack_resource.StackResource):
template_version=('heat_template_version',
'2015-04-30')):
names = list(self._resource_names(total_capacity))
- name_blacklist = self._name_blacklist()
+ name_skiplist = self._name_skiplist()
valid_resources = [(n, d) for n, d in
grouputils.get_member_definitions(self)
- if n not in name_blacklist]
+ if n not in name_skiplist]
targ_cap = self.get_size()
@@ -706,7 +711,7 @@ class ResourceGroup(stack_resource.StackResource):
old_resources = sorted(valid_resources, key=replace_priority)
existing_names = set(n for n, d in valid_resources)
- new_names = six.moves.filterfalse(lambda n: n in existing_names,
+ new_names = itertools.filterfalse(lambda n: n in existing_names,
names)
res_def = self.get_resource_def(include_all)
definitions = scl_template.member_definitions(
@@ -729,7 +734,7 @@ class ResourceGroup(stack_resource.StackResource):
def _resolve_attribute(self, name):
if name == self.REMOVED_RSRC_LIST:
- return self._current_blacklist()
+ return self._current_skiplist()
def _update_timeout(self, batch_cnt, pause_sec):
total_pause_time = pause_sec * max(batch_cnt - 1, 0)
@@ -762,12 +767,12 @@ class ResourceGroup(stack_resource.StackResource):
while not duration.expired():
yield
- # current capacity not including existing blacklisted
+ # current capacity not including existing skiplisted
inspector = grouputils.GroupInspector.from_parent_resource(self)
- num_blacklist = self._count_black_listed(
+ num_skiplist = self._count_skipped(
inspector.member_names(include_failed=False))
num_resources = inspector.size(include_failed=True)
- curr_cap = num_resources - num_blacklist
+ curr_cap = num_resources - num_skiplist
batches = list(self._get_batches(self.get_size(), curr_cap, batch_size,
min_in_service))
diff --git a/heat/engine/resources/openstack/heat/scaling_policy.py b/heat/engine/resources/openstack/heat/scaling_policy.py
index c0afc212e..8ca88275f 100644
--- a/heat/engine/resources/openstack/heat/scaling_policy.py
+++ b/heat/engine/resources/openstack/heat/scaling_policy.py
@@ -12,7 +12,6 @@
# under the License.
from oslo_log import log as logging
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -99,7 +98,8 @@ class AutoScalingPolicy(signal_responder.SignalResponder):
attributes_schema = {
ALARM_URL: attributes.Schema(
_("A signed url to handle the alarm."),
- type=attributes.Schema.STRING
+ type=attributes.Schema.STRING,
+ cache_mode=attributes.Schema.CACHE_NONE
),
SIGNAL_URL: attributes.Schema(
_("A url to handle the alarm using native API."),
@@ -186,9 +186,9 @@ class AutoScalingPolicy(signal_responder.SignalResponder):
if self.resource_id is None:
return
if name == self.ALARM_URL:
- return six.text_type(self._get_ec2_signed_url())
+ return str(self._get_ec2_signed_url(never_expire=True))
elif name == self.SIGNAL_URL:
- return six.text_type(self._get_heat_signal_url())
+ return str(self._get_heat_signal_url())
def resource_mapping():
diff --git a/heat/engine/resources/openstack/heat/software_config.py b/heat/engine/resources/openstack/heat/software_config.py
index b03d9b917..1dcc6ed5c 100644
--- a/heat/engine/resources/openstack/heat/software_config.py
+++ b/heat/engine/resources/openstack/heat/software_config.py
@@ -44,6 +44,10 @@ class SoftwareConfig(resource.Resource):
support_status = support.SupportStatus(version='2014.1')
+ default_client_name = 'heat'
+
+ entity = 'software_configs'
+
PROPERTIES = (
GROUP, CONFIG,
OPTIONS,
diff --git a/heat/engine/resources/openstack/heat/software_deployment.py b/heat/engine/resources/openstack/heat/software_deployment.py
index 2e2b6b34c..2e0ddeafa 100644
--- a/heat/engine/resources/openstack/heat/software_deployment.py
+++ b/heat/engine/resources/openstack/heat/software_deployment.py
@@ -12,8 +12,7 @@
# under the License.
import copy
-import six
-from six import itertools
+import itertools
import uuid
from oslo_config import cfg
@@ -72,6 +71,10 @@ class SoftwareDeployment(signal_responder.SignalResponder):
support_status = support.SupportStatus(version='2014.1')
+ default_client_name = 'heat'
+
+ entity = 'software_deployments'
+
PROPERTIES = (
CONFIG, SERVER, INPUT_VALUES,
DEPLOY_ACTIONS, NAME, SIGNAL_TRANSPORT
@@ -706,10 +709,10 @@ class SoftwareDeploymentGroup(resource_group.ResourceGroup):
def res_def_changed(self, prop_diff):
return True
- def _update_name_blacklist(self, properties):
+ def _update_name_skiplist(self, properties):
pass
- def _name_blacklist(self):
+ def _name_skiplist(self):
return set()
def get_resource_def(self, include_all=False):
@@ -745,7 +748,7 @@ class SoftwareDeploymentGroup(resource_group.ResourceGroup):
def _nested_output_defns(self, resource_names, get_attr_fn, get_res_fn):
for attr in self.referenced_attrs():
- key = attr if isinstance(attr, six.string_types) else attr[0]
+ key = attr if isinstance(attr, str) else attr[0]
n_attr = self._member_attribute_name(key)
output_name = self._attribute_output_name(self.ATTR_ATTRIBUTES,
n_attr)
diff --git a/heat/engine/resources/openstack/heat/structured_config.py b/heat/engine/resources/openstack/heat/structured_config.py
index 836e2ef92..abcb676d6 100644
--- a/heat/engine/resources/openstack/heat/structured_config.py
+++ b/heat/engine/resources/openstack/heat/structured_config.py
@@ -15,8 +15,6 @@ import collections
import copy
import functools
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
@@ -150,8 +148,8 @@ class StructuredDeployment(sd.SoftwareDeployment):
def get_input_key_arg(snippet, input_key):
if len(snippet) != 1:
return None
- fn_name, fn_arg = next(six.iteritems(snippet))
- if (fn_name == input_key and isinstance(fn_arg, six.string_types)):
+ fn_name, fn_arg = next(iter(snippet.items()))
+ if (fn_name == input_key and isinstance(fn_arg, str)):
return fn_arg
@staticmethod
@@ -168,16 +166,16 @@ class StructuredDeployment(sd.SoftwareDeployment):
input_key,
check_input_val=check_input_val)
- if isinstance(snippet, collections.Mapping):
+ if isinstance(snippet, collections.abc.Mapping):
fn_arg = StructuredDeployment.get_input_key_arg(snippet, input_key)
if fn_arg is not None:
return StructuredDeployment.get_input_key_value(fn_arg, inputs,
check_input_val
)
- return dict((k, parse(v)) for k, v in six.iteritems(snippet))
- elif (not isinstance(snippet, six.string_types) and
- isinstance(snippet, collections.Iterable)):
+ return dict((k, parse(v)) for k, v in snippet.items())
+ elif (not isinstance(snippet, str) and
+ isinstance(snippet, collections.abc.Iterable)):
return [parse(v) for v in snippet]
else:
return snippet
diff --git a/heat/engine/resources/openstack/heat/swiftsignal.py b/heat/engine/resources/openstack/heat/swiftsignal.py
index d8c33296b..dc78b9310 100644
--- a/heat/engine/resources/openstack/heat/swiftsignal.py
+++ b/heat/engine/resources/openstack/heat/swiftsignal.py
@@ -14,8 +14,7 @@
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
-import six
-from six.moves.urllib import parse
+from urllib import parse
from heat.common import exception
from heat.common.i18n import _
@@ -344,7 +343,7 @@ class SwiftSignal(resource.Resource):
def _resolve_attribute(self, key):
if key == self.DATA:
- return six.text_type(jsonutils.dumps(self.get_data()))
+ return str(jsonutils.dumps(self.get_data()))
def resource_mapping():
diff --git a/heat/engine/resources/openstack/heat/test_resource.py b/heat/engine/resources/openstack/heat/test_resource.py
index 3af8b46b0..dc8b6234b 100644
--- a/heat/engine/resources/openstack/heat/test_resource.py
+++ b/heat/engine/resources/openstack/heat/test_resource.py
@@ -14,7 +14,6 @@
import datetime
import eventlet
from oslo_utils import timeutils
-import six
from heat.common.i18n import _
from heat.engine import attributes
@@ -218,12 +217,12 @@ class TestResource(resource.Resource):
obj.get(entity_id)
except Exception as exc:
LOG.debug('%s.%s(%s) %s' % (client_name, self.entity,
- entity_id, six.text_type(exc)))
+ entity_id, str(exc)))
else:
# just sleep some more
eventlet.sleep(1)
- if isinstance(started_at, six.string_types):
+ if isinstance(started_at, str):
started_at = timeutils.parse_isotime(started_at)
started_at = timeutils.normalize_time(started_at)
diff --git a/heat/engine/resources/openstack/heat/value.py b/heat/engine/resources/openstack/heat/value.py
index 87786d885..7f21926cb 100644
--- a/heat/engine/resources/openstack/heat/value.py
+++ b/heat/engine/resources/openstack/heat/value.py
@@ -103,7 +103,8 @@ class Value(resource.Resource):
_('The expression to generate the "value" attribute.'),
required=True,
update_allowed=True,
- ))
+ ),
+ self.VALUE)
def resource_mapping():
diff --git a/heat/engine/resources/openstack/heat/wait_condition.py b/heat/engine/resources/openstack/heat/wait_condition.py
index bea9c38d8..db8ff844e 100644
--- a/heat/engine/resources/openstack/heat/wait_condition.py
+++ b/heat/engine/resources/openstack/heat/wait_condition.py
@@ -14,7 +14,6 @@
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
-import six
from heat.common.i18n import _
from heat.engine import attributes
@@ -158,7 +157,7 @@ class HeatWaitCondition(resource.Resource):
'key': key,
'res': res})
- return six.text_type(jsonutils.dumps(res))
+ return str(jsonutils.dumps(res))
def resource_mapping():
diff --git a/heat/engine/resources/openstack/ironic/__init__.py b/heat/engine/resources/openstack/ironic/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/engine/resources/openstack/ironic/__init__.py
diff --git a/heat/engine/resources/openstack/ironic/port.py b/heat/engine/resources/openstack/ironic/port.py
new file mode 100644
index 000000000..1b98040a5
--- /dev/null
+++ b/heat/engine/resources/openstack/ironic/port.py
@@ -0,0 +1,240 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.common import exception
+from heat.common.i18n import _
+from heat.engine import attributes
+from heat.engine import constraints
+from heat.engine import properties
+from heat.engine import resource
+from heat.engine import support
+from heat.engine import translation
+
+
+class Port(resource.Resource):
+ """A resource that creates a ironic port.
+
+ Node UUID and physical hardware address for the Port (MAC address in
+ most cases) are needed (all Ports must be associated to a Node when
+ created).
+ """
+
+ support_status = support.SupportStatus(version='13.0.0')
+
+ default_client_name = 'ironic'
+
+ entity = 'port'
+
+ PROPERTIES = (
+ NODE, ADDRESS, PORTGROUP, LOCAL_LINK_CONNECTION, PXE_ENABLED,
+ PHYSICAL_NETWORK, EXTRA, IS_SMARTNIC,
+ ) = (
+ 'node', 'address', 'portgroup', 'local_link_connection', 'pxe_enabled',
+ 'physical_network', 'extra', 'is_smartnic',
+ )
+ PROPERTIES_MIN_SUPPORT_VERSION = (
+ (PXE_ENABLED, 1.19),
+ (LOCAL_LINK_CONNECTION, 1.191),
+ (PORTGROUP, 1.24), (PHYSICAL_NETWORK, 1.34),
+ (IS_SMARTNIC, 1.53)
+ )
+
+ ATTRIBUTES = (
+ ADDRESS_ATTR, NODE_UUID_ATTR, PORTGROUP_UUID_ATTR,
+ LOCAL_LINK_CONNECTION_ATTR, PXE_ENABLED_ATTR, PHYSICAL_NETWORK_ATTR,
+ INTERNAL_INFO_ATTR, EXTRA_ATTR, IS_SMARTNIC_ATTR,
+ ) = (
+ 'address', 'node_uuid', 'portgroup_uuid',
+ 'local_link_connection', 'pxe_enabled', 'physical_network',
+ 'internal_info', 'extra', 'is_smartnic',
+ )
+ attributes_schema = {
+ ADDRESS_ATTR: attributes.Schema(
+ _('Physical hardware address of this network Port, typically the '
+ 'hardware MAC address.'),
+ type=attributes.Schema.STRING
+ ),
+ NODE_UUID_ATTR: attributes.Schema(
+ _('UUID of the Node this resource belongs to.'),
+ type=attributes.Schema.STRING
+ ),
+ PORTGROUP_UUID_ATTR: attributes.Schema(
+ _('UUID of the Portgroup this resource belongs to.'),
+ type=attributes.Schema.STRING
+ ),
+ LOCAL_LINK_CONNECTION_ATTR: attributes.Schema(
+ _('The Port binding profile. If specified, must contain switch_id '
+ '(only a MAC address or an OpenFlow based datapath_id of the '
+ 'switch are accepted in this field) and port_id (identifier of '
+ 'the physical port on the switch to which node\'s port is '
+ 'connected to) fields. switch_info is an optional string field '
+ 'to be used to store any vendor-specific information.'),
+ type=attributes.Schema.MAP
+ ),
+ PXE_ENABLED_ATTR: attributes.Schema(
+ _('Indicates whether PXE is enabled or disabled on the Port.'),
+ type=attributes.Schema.BOOLEAN
+ ),
+ PHYSICAL_NETWORK_ATTR: attributes.Schema(
+ _('The name of the physical network to which a port is connected. '
+ 'May be empty.'),
+ type=attributes.Schema.STRING
+ ),
+ INTERNAL_INFO_ATTR: attributes.Schema(
+ _('Internal metadata set and stored by the Port. This field is '
+ 'read-only.'),
+ type=attributes.Schema.MAP
+ ),
+ EXTRA_ATTR: attributes.Schema(
+ _('A set of one or more arbitrary metadata key and value pairs.'),
+ type=attributes.Schema.MAP
+ ),
+ IS_SMARTNIC_ATTR: attributes.Schema(
+ _('Indicates whether the Port is a Smart NIC port.'),
+ type=attributes.Schema.BOOLEAN
+ )}
+
+ properties_schema = {
+ NODE: properties.Schema(
+ properties.Schema.STRING,
+ _('UUID or name of the Node this resource belongs to.'),
+ constraints=[
+ constraints.CustomConstraint('ironic.node')
+ ],
+ required=True,
+ update_allowed=True
+ ),
+ ADDRESS: properties.Schema(
+ properties.Schema.STRING,
+ _('Physical hardware address of this network Port, typically the '
+ 'hardware MAC address.'),
+ required=True,
+ update_allowed=True
+ ),
+ PORTGROUP: properties.Schema(
+ properties.Schema.STRING,
+ _('UUID or name of the Portgroup this resource belongs to.'),
+ constraints=[
+ constraints.CustomConstraint('ironic.portgroup')
+ ],
+ update_allowed=True,
+ ),
+ LOCAL_LINK_CONNECTION: properties.Schema(
+ properties.Schema.MAP,
+ _('The Port binding profile. If specified, must contain switch_id '
+ '(only a MAC address or an OpenFlow based datapath_id of the '
+ 'switch are accepted in this field) and port_id (identifier of '
+ 'the physical port on the switch to which node\'s port is '
+ 'connected to) fields. switch_info is an optional string field '
+ 'to be used to store any vendor-specific information.'),
+ update_allowed=True,
+ ),
+ PXE_ENABLED: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('Indicates whether PXE is enabled or disabled on the Port.'),
+ update_allowed=True,
+ ),
+ PHYSICAL_NETWORK: properties.Schema(
+ properties.Schema.STRING,
+ _('The name of the physical network to which a port is connected. '
+ 'May be empty.'),
+ update_allowed=True,
+ ),
+ EXTRA: properties.Schema(
+ properties.Schema.MAP,
+ _('A set of one or more arbitrary metadata key and value pairs.'),
+ update_allowed=True,
+ ),
+ IS_SMARTNIC: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('Indicates whether the Port is a Smart NIC port.'),
+ update_allowed=True,
+ )
+ }
+
+ def translation_rules(self, props):
+ return [
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.NODE],
+ client_plugin=self.client_plugin('ironic'),
+ finder='get_node'),
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.PORTGROUP],
+ client_plugin=self.client_plugin('ironic'),
+ finder='get_portgroup'),
+ ]
+
+ def _resolve_attribute(self, name):
+ if self.resource_id is None:
+ return
+ port = self.client().port.get(self.resource_id)
+ return getattr(port, name, None)
+
+ def _check_supported(self, properties):
+ # TODO(ricolin) Implement version support in property schema.
+ for k, v in self.PROPERTIES_MIN_SUPPORT_VERSION:
+ if k in properties and properties[k] is not None and (
+ self.client_plugin().max_microversion < v
+ ):
+ raise exception.NotSupported(
+ feature="OS::Ironic::Port with %s property" % k)
+
+ def handle_create(self):
+ args = dict(self.properties.items())
+ self._check_supported(args)
+ args['node_uuid'] = args.pop(self.NODE)
+ if self.PORTGROUP in args:
+ args['portgroup_uuid'] = args.pop(self.PORTGROUP)
+ port = self.client().port.create(**args)
+ self.resource_id_set(port.uuid)
+ return port.uuid
+
+ def check_create_complete(self, id):
+ try:
+ self.client().port.get(id)
+ except Exception as exc:
+ self.client_plugin().ignore_not_found(exc)
+ return False
+ return True
+
+ def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+ if prop_diff:
+ self._check_supported(prop_diff)
+ if self.NODE in prop_diff:
+ prop_diff['node_uuid'] = prop_diff.pop(self.NODE)
+ if self.PORTGROUP in prop_diff:
+ prop_diff['portgroup_uuid'] = prop_diff.pop(self.PORTGROUP)
+ patch = [{'op': 'replace', 'path': '/' + k, 'value': v}
+ for k, v in prop_diff.items()]
+ self.client().port.update(self.resource_id, patch)
+ return self.resource_id, prop_diff
+
+ def check_delete_complete(self, id):
+ if not id:
+ return True
+ try:
+ self.client().port.get(id)
+ except Exception as exc:
+ self.client_plugin().ignore_not_found(exc)
+ return True
+ return False
+
+
+def resource_mapping():
+ return {
+ 'OS::Ironic::Port': Port,
+ }
diff --git a/heat/engine/resources/openstack/keystone/project.py b/heat/engine/resources/openstack/keystone/project.py
index 2fdecf19d..8ea1945e8 100644
--- a/heat/engine/resources/openstack/keystone/project.py
+++ b/heat/engine/resources/openstack/keystone/project.py
@@ -192,6 +192,24 @@ class KeystoneProject(resource.Resource):
result[self.DOMAIN] = resource_data.get('domain_id')
return result
+ def handle_delete(self):
+ if self.resource_id:
+ # find and delete the default security group Neutron has created
+ default_sec_group_name = "default"
+ nclient = self.client_plugin("neutron").client()
+ default_sec_groups = nclient.list_security_groups(
+ project_id=self.resource_id,
+ name=default_sec_group_name)["security_groups"]
+ # NOTE(pas-ha) this should always contain a single security group
+ # (if any) as Netron enforces uniqueness of 'default' security
+ # group in a project.
+ # However leaving orphans is bad enough, so we are deleting
+ # any security group with such name w/o uniqueness check.
+ for secgroup in default_sec_groups:
+ with self.client_plugin("neutron").ignore_not_found:
+ nclient.delete_security_group(secgroup["id"])
+ super(KeystoneProject, self).handle_delete()
+
def resource_mapping():
return {
diff --git a/heat/engine/resources/openstack/keystone/region.py b/heat/engine/resources/openstack/keystone/region.py
index b52cf009d..63ed6b3e6 100644
--- a/heat/engine/resources/openstack/keystone/region.py
+++ b/heat/engine/resources/openstack/keystone/region.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from six.moves.urllib import parse
+from urllib import parse
from heat.common.i18n import _
from heat.engine import constraints
diff --git a/heat/engine/resources/openstack/keystone/role.py b/heat/engine/resources/openstack/keystone/role.py
index 51d1416c7..620427d32 100644
--- a/heat/engine/resources/openstack/keystone/role.py
+++ b/heat/engine/resources/openstack/keystone/role.py
@@ -51,9 +51,8 @@ class KeystoneRole(resource.Resource):
DOMAIN: properties.Schema(
properties.Schema.STRING,
_('Name or id of keystone domain.'),
- default='default',
constraints=[constraints.CustomConstraint('keystone.domain')],
- support_status=support.SupportStatus(version='10.0.0')
+ support_status=support.SupportStatus(version='16.0.0')
)
}
diff --git a/heat/engine/resources/openstack/magnum/bay.py b/heat/engine/resources/openstack/magnum/bay.py
index fd2ed643c..1837dd552 100644
--- a/heat/engine/resources/openstack/magnum/bay.py
+++ b/heat/engine/resources/openstack/magnum/bay.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
@@ -129,7 +127,7 @@ class Bay(resource.Resource):
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
patch = [{'op': 'replace', 'path': '/' + k, 'value': v}
- for k, v in six.iteritems(prop_diff)]
+ for k, v in prop_diff.items()]
self.client().bays.update(self.resource_id, patch)
return self.resource_id
diff --git a/heat/engine/resources/openstack/magnum/cluster.py b/heat/engine/resources/openstack/magnum/cluster.py
index 58e465d3e..ac2ae5dc9 100644
--- a/heat/engine/resources/openstack/magnum/cluster.py
+++ b/heat/engine/resources/openstack/magnum/cluster.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
@@ -218,7 +216,7 @@ class Cluster(resource.Resource):
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
patch = [{'op': 'replace', 'path': '/' + k, 'value': v}
- for k, v in six.iteritems(prop_diff)]
+ for k, v in prop_diff.items()]
self.client().clusters.update(self.resource_id, patch)
return self.resource_id
diff --git a/heat/engine/resources/openstack/magnum/cluster_template.py b/heat/engine/resources/openstack/magnum/cluster_template.py
index d394492b3..d51162e54 100644
--- a/heat/engine/resources/openstack/magnum/cluster_template.py
+++ b/heat/engine/resources/openstack/magnum/cluster_template.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
@@ -290,7 +288,7 @@ class ClusterTemplate(resource.Resource):
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
patch = [{'op': 'replace', 'path': '/' + k, 'value': v}
- for k, v in six.iteritems(prop_diff)]
+ for k, v in prop_diff.items()]
self.client().cluster_templates.update(self.resource_id, patch)
return self.resource_id
diff --git a/heat/engine/resources/openstack/manila/share.py b/heat/engine/resources/openstack/manila/share.py
index ce89f7863..61e93163c 100644
--- a/heat/engine/resources/openstack/manila/share.py
+++ b/heat/engine/resources/openstack/manila/share.py
@@ -13,7 +13,6 @@
from oslo_log import log as logging
from oslo_utils import encodeutils
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -193,11 +192,21 @@ class ManilaShare(resource.Resource):
def _request_share(self):
return self.client().shares.get(self.resource_id)
+ def _request_export_locations(self):
+ # Only return the "path" response parameter, because that is what was
+ # returned before API version "2.9" by the shares endpoint
+ return [export_location.to_dict()['path']
+ for export_location in
+ self.client().share_export_locations.list(self.resource_id)]
+
def _resolve_attribute(self, name):
if self.resource_id is None:
return
- share = self._request_share()
- return six.text_type(getattr(share, name))
+ if name == self.EXPORT_LOCATIONS_ATTR:
+ attr = self._request_export_locations()
+ else:
+ attr = getattr(self._request_share(), name)
+ return str(attr)
def handle_create(self):
# Request IDs of entities from manila
@@ -345,7 +354,7 @@ class ManilaShare(resource.Resource):
result[self.ACCESS_RULES] = []
for rule in rules:
result[self.ACCESS_RULES].append(
- {(k, v) for (k, v) in six.iteritems(rule)
+ {(k, v) for (k, v) in rule.items()
if k in self._ACCESS_RULE_PROPERTIES})
return result
diff --git a/heat/engine/resources/openstack/mistral/external_resource.py b/heat/engine/resources/openstack/mistral/external_resource.py
index 9e8f1b70e..a38e35b4e 100644
--- a/heat/engine/resources/openstack/mistral/external_resource.py
+++ b/heat/engine/resources/openstack/mistral/external_resource.py
@@ -13,7 +13,6 @@
from oslo_log import log as logging
from oslo_serialization import jsonutils
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -212,7 +211,7 @@ class MistralExternalResource(resource.Resource):
LOG.debug('ExternalResource id set to %(rid)s from Mistral '
'execution %(eid)s output' % {'eid': execution_id,
'rid': rsrc_id})
- self.resource_id_set(six.text_type(rsrc_id)[:255])
+ self.resource_id_set(str(rsrc_id)[:255])
return success
def _resolve_attribute(self, name):
diff --git a/heat/engine/resources/openstack/mistral/workflow.py b/heat/engine/resources/openstack/mistral/workflow.py
index 3c9ea23bc..c9113faa9 100644
--- a/heat/engine/resources/openstack/mistral/workflow.py
+++ b/heat/engine/resources/openstack/mistral/workflow.py
@@ -14,7 +14,6 @@
import copy
from oslo_serialization import jsonutils
-import six
import yaml
from heat.common import exception
@@ -46,6 +45,8 @@ class Workflow(signal_responder.SignalResponder,
entity = 'workflows'
+ always_replace_on_check_failed = False
+
PROPERTIES = (
NAME, TYPE, DESCRIPTION, INPUT, OUTPUT, TASKS, PARAMS,
TASK_DEFAULTS, USE_REQUEST_BODY_AS_INPUT, TAGS
@@ -374,7 +375,8 @@ class Workflow(signal_responder.SignalResponder,
ALARM_URL: attributes.Schema(
_("A signed url to create executions for workflows specified in "
"Workflow resource."),
- type=attributes.Schema.STRING
+ type=attributes.Schema.STRING,
+ cache_mode=attributes.Schema.CACHE_NONE
),
EXECUTIONS: attributes.Schema(
_("List of workflows' executions, each of them is a dictionary "
@@ -440,11 +442,11 @@ class Workflow(signal_responder.SignalResponder,
error=_('Signal data error'),
message=message)
if params is not None and not isinstance(params, dict):
- message = (_('Params must be a map, find a '
- '%s') % type(params))
- raise exception.StackValidationFailed(
- error=_('Signal data error'),
- message=message)
+ message = (_('Params must be a map, find a '
+ '%s') % type(params))
+ raise exception.StackValidationFailed(
+ error=_('Signal data error'),
+ message=message)
def validate(self):
super(Workflow, self).validate()
@@ -553,7 +555,7 @@ class Workflow(signal_responder.SignalResponder,
if props.get(self.TASK_DEFAULTS) is not None:
definition[defn_name][self.TASK_DEFAULTS.replace('_', '-')] = {
k.replace('_', '-'): v for k, v in
- six.iteritems(props.get(self.TASK_DEFAULTS)) if v}
+ props.get(self.TASK_DEFAULTS).items() if v}
return yaml.dump(definition, Dumper=yaml.CSafeDumper
if hasattr(yaml, 'CSafeDumper')
@@ -596,6 +598,19 @@ class Workflow(signal_responder.SignalResponder,
executions.extend(self.data().get(self.EXECUTIONS).split(','))
self.data_set(self.EXECUTIONS, ','.join(executions))
+ def needs_replace_failed(self):
+ if self.resource_id is None:
+ return True
+
+ if self.properties[self.NAME] is None:
+ return True
+
+ with self.client_plugin().ignore_not_found:
+ self.client().workflows.get(self.resource_id)
+ return False
+ self.resource_id_set(None)
+ return True
+
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
props = json_snippet.properties(self.properties_schema,
@@ -632,8 +647,8 @@ class Workflow(signal_responder.SignalResponder,
'created_at': execution.created_at,
'updated_at': execution.updated_at,
'state': execution.state,
- 'input': jsonutils.loads(six.text_type(execution.input)),
- 'output': jsonutils.loads(six.text_type(execution.output))
+ 'input': jsonutils.loads(str(execution.input)),
+ 'output': jsonutils.loads(str(execution.output))
}
return [parse_execution_response(
@@ -646,7 +661,7 @@ class Workflow(signal_responder.SignalResponder,
self.INPUT: self.properties.get(self.INPUT)}
elif name == self.ALARM_URL and self.resource_id is not None:
- return six.text_type(self._get_ec2_signed_url())
+ return str(self._get_ec2_signed_url(never_expire=True))
def resource_mapping():
diff --git a/heat/engine/resources/openstack/monasca/notification.py b/heat/engine/resources/openstack/monasca/notification.py
index fc6681660..8074bd0b0 100644
--- a/heat/engine/resources/openstack/monasca/notification.py
+++ b/heat/engine/resources/openstack/monasca/notification.py
@@ -12,7 +12,7 @@
# under the License.
import re
-from six.moves import urllib
+from urllib import parse
from heat.common import exception
from heat.common.i18n import _
@@ -115,7 +115,7 @@ class MonascaNotification(resource.Resource):
if self.properties[self.TYPE] == self.WEBHOOK:
try:
- parsed_address = urllib.parse.urlparse(address)
+ parsed_address = parse.urlparse(address)
except Exception:
msg = _('Address "%(addr)s" should have correct format '
'required by "%(wh)s" type of "%(type)s" '
diff --git a/heat/engine/resources/openstack/neutron/extraroute.py b/heat/engine/resources/openstack/neutron/extraroute.py
index f4753ec1e..b79ad3cdc 100644
--- a/heat/engine/resources/openstack/neutron/extraroute.py
+++ b/heat/engine/resources/openstack/neutron/extraroute.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
@@ -62,7 +60,7 @@ class ExtraRoute(neutron.NeutronResource):
def add_dependencies(self, deps):
super(ExtraRoute, self).add_dependencies(deps)
- for resource in six.itervalues(self.stack):
+ for resource in self.stack.values():
# depend on any RouterInterface in this template with the same
# router_id as this router_id
if resource.has_interface('OS::Neutron::RouterInterface'):
@@ -111,13 +109,15 @@ class ExtraRoute(neutron.NeutronResource):
def handle_delete(self):
if not self.resource_id:
return
- (router_id, destination, nexthop) = self.resource_id.split(':')
+ router_id = self.properties[self.ROUTER_ID]
with self.client_plugin().ignore_not_found:
routes = self.client().show_router(
router_id).get('router').get('routes', [])
try:
- routes.remove({'destination': destination,
- 'nexthop': nexthop})
+ routes.remove(
+ {'destination': self.properties[self.DESTINATION],
+ 'nexthop': self.properties[self.NEXTHOP]}
+ )
except ValueError:
return
self.client().update_router(router_id,
diff --git a/heat/engine/resources/openstack/neutron/extrarouteset.py b/heat/engine/resources/openstack/neutron/extrarouteset.py
new file mode 100644
index 000000000..af953474e
--- /dev/null
+++ b/heat/engine/resources/openstack/neutron/extrarouteset.py
@@ -0,0 +1,236 @@
+# Copyright 2019 Ericsson Software Technology
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from operator import itemgetter
+import six
+
+from oslo_log import log as logging
+
+from heat.common import exception
+from heat.common.i18n import _
+from heat.engine import constraints
+from heat.engine import properties
+from heat.engine.resources.openstack.neutron import neutron
+from heat.engine.resources.openstack.neutron import router
+from heat.engine import support
+
+
+LOG = logging.getLogger(__name__)
+
+
+class ExtraRouteSet(neutron.NeutronResource):
+ """Resource for specifying extra routes for a Neutron router.
+
+ Requires Neutron ``extraroute-atomic`` extension to be enabled::
+
+ $ openstack extension show extraroute-atomic
+
+ An extra route is a static routing table entry that is added beyond
+ the routes managed implicitly by router interfaces and router gateways.
+
+ The ``destination`` of an extra route is any IP network in /CIDR notation.
+ The ``nexthop`` of an extra route is an IP in a subnet that is directly
+ connected to the router.
+
+ In a single OS::Neutron::ExtraRouteSet resource you can specify a
+ set of extra routes (represented as a list) on the same virtual
+ router. As an improvement over the (never formally supported)
+ OS::Neutron::ExtraRoute resource this resource plugin uses a Neutron
+ API extension (``extraroute-atomic``) that is not prone to race
+ conditions when used to manage multiple extra routes of the same
+ router. It is safe to manage multiple extra routes of the same router
+ from multiple stacks.
+
+ On the other hand use of the same route on the same router is not safe
+ from multiple stacks (or between Heat and non-Heat managed Neutron extra
+ routes).
+ """
+
+ support_status = support.SupportStatus(version='14.0.0')
+
+ required_service_extension = 'extraroute-atomic'
+
+ PROPERTIES = (
+ ROUTER, ROUTES,
+ ) = (
+ 'router', 'routes',
+ )
+
+ _ROUTE_KEYS = (
+ DESTINATION, NEXTHOP,
+ ) = (
+ 'destination', 'nexthop',
+ )
+
+ properties_schema = {
+ ROUTER: properties.Schema(
+ properties.Schema.STRING,
+ description=_('The router id.'),
+ required=True,
+ constraints=[
+ constraints.CustomConstraint('neutron.router')
+ ],
+ ),
+ ROUTES: properties.Schema(
+ properties.Schema.LIST,
+ _('A set of route dictionaries for the router.'),
+ schema=properties.Schema(
+ properties.Schema.MAP,
+ schema={
+ DESTINATION: properties.Schema(
+ properties.Schema.STRING,
+ _('The destination network in CIDR notation.'),
+ required=True,
+ constraints=[
+ constraints.CustomConstraint('net_cidr')
+ ]
+ ),
+ NEXTHOP: properties.Schema(
+ properties.Schema.STRING,
+ _('The next hop for the destination.'),
+ required=True,
+ constraints=[
+ constraints.CustomConstraint('ip_addr')
+ ]
+ ),
+ },
+ ),
+ default=[],
+ update_allowed=True,
+ ),
+ }
+
+ def add_dependencies(self, deps):
+ super(ExtraRouteSet, self).add_dependencies(deps)
+ for resource in six.itervalues(self.stack):
+ # depend on any RouterInterface in this template with the same
+ # router as this router
+ if resource.has_interface('OS::Neutron::RouterInterface'):
+ try:
+ router_id = self.properties[self.ROUTER]
+ dep_router_id = resource.properties.get(
+ router.RouterInterface.ROUTER)
+ except (ValueError, TypeError):
+ # Properties errors will be caught later in validation,
+ # where we can report them in their proper context.
+ continue
+ if dep_router_id == router_id:
+ deps += (self, resource)
+
+ def handle_create(self):
+ router = self.properties[self.ROUTER]
+ routes = self.properties[self.ROUTES]
+
+ _raise_if_duplicate(self.client().show_router(router), routes)
+
+ self.client().add_extra_routes_to_router(
+ router, {'router': {'routes': routes}})
+
+ # A set of extra routes does not have a physical ID, so all
+ # we can do is to set the resource ID to something at least
+ # informative, that is the router's ID.
+ self.resource_id_set(router)
+
+ def handle_delete(self):
+ if not self.resource_id:
+ return
+ with self.client_plugin().ignore_not_found:
+ self.client().remove_extra_routes_from_router(
+ self.properties[self.ROUTER],
+ {'router': {'routes': self.properties[self.ROUTES]}})
+
+ def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+ """Handle updates correctly.
+
+ Implementing handle_update() here is not just an optimization but a
+ must, because the default create/delete behavior would delete the
+ unchanged part of the extra route set.
+ """
+
+ # Ignore the shallow diff done in prop_diff.
+ if self.ROUTES in prop_diff:
+ del prop_diff[self.ROUTES]
+
+ # Do a deep diff instead.
+ old = self.properties[self.ROUTES] or []
+ new = json_snippet.properties(
+ self.properties_schema)[self.ROUTES] or []
+
+ add = _set_to_routes(_routes_to_set(new) - _routes_to_set(old))
+ remove = _set_to_routes(_routes_to_set(old) - _routes_to_set(new))
+
+ router = self.properties[self.ROUTER]
+
+ _raise_if_duplicate(self.client().show_router(router), add)
+
+ # Neither the remove-add nor the add-remove order is perfect.
+ # Likely both will produce transient packet loss.
+ # The remove-add order seems to be conceptually simpler,
+ # never producing unexpected routing tables.
+ self.client().remove_extra_routes_from_router(
+ router, {'router': {'routes': remove}})
+ self.client().add_extra_routes_to_router(
+ router, {'router': {'routes': add}})
+
+
+def _routes_to_set(route_list):
+ """Convert routes to a set that can be diffed.
+
+ Convert the in-API/in-template routes format to another data type that
+ has the same information content but that is hashable, so we can put
+ routes in a set and perform set operations on them.
+ """
+ return set(frozenset(r.items()) for r in route_list)
+
+
+def _set_to_routes(route_set):
+ """The reverse of _routes_to_set.
+
+ _set_to_routes(_routes_to_set(routes)) == routes
+ """
+ return [dict(r) for r in route_set]
+
+
+def _generate_name(router, routes):
+ return ','.join(
+ ['%s' % router] +
+ ['%(destination)s=%(nexthop)s' % r for r in sorted(
+ # sort by destination as primary key and
+ # by nexthop as secondary key
+ routes, key=itemgetter('destination', 'nexthop'))])
+
+
+def _raise_if_duplicate(router_existing, routes_to_add):
+ """Detect trying to add duplicate routes in create/update
+
+ Take the response of show_router() for an existing router and a list of
+ routes to add and raise PhysicalResourceExists if we try to add a route
+ already existing on the router. Otherwise do not raise and return None.
+
+ You cannot use this to detect duplicate routes atomically while adding
+ a route so when you use this you'll inevitably create race conditions.
+ """
+ routes_existing = _routes_to_set(
+ router_existing['router']['routes'])
+ for route in _routes_to_set(routes_to_add):
+ if route in routes_existing:
+ original = _set_to_routes(set([route]))
+ name = _generate_name(router, original)
+ raise exception.PhysicalResourceExists(name=name)
+
+
+def resource_mapping():
+ return {
+ 'OS::Neutron::ExtraRouteSet': ExtraRouteSet,
+ }
diff --git a/heat/engine/resources/openstack/neutron/firewall.py b/heat/engine/resources/openstack/neutron/firewall.py
index 740cb16bf..93c865e76 100644
--- a/heat/engine/resources/openstack/neutron/firewall.py
+++ b/heat/engine/resources/openstack/neutron/firewall.py
@@ -142,7 +142,7 @@ class Firewall(neutron.NeutronResource):
status = attributes['status']
if status == 'PENDING_CREATE':
return False
- elif status == 'ACTIVE':
+ elif status == 'ACTIVE' or status == 'INACTIVE':
return True
elif status == 'ERROR':
raise exception.ResourceInError(
@@ -243,8 +243,8 @@ class FirewallPolicy(neutron.NeutronResource):
),
FIREWALL_RULES: properties.Schema(
properties.Schema.LIST,
- _('An ordered list of firewall rules to apply to the firewall.'),
- required=True,
+ _('An ordered list of firewall rules to apply to the firewall. '
+ '(Prior to version 14.0.0 this was a required property).'),
update_allowed=True
),
}
diff --git a/heat/engine/resources/openstack/neutron/floatingip.py b/heat/engine/resources/openstack/neutron/floatingip.py
index e06afcbeb..a56a0163d 100644
--- a/heat/engine/resources/openstack/neutron/floatingip.py
+++ b/heat/engine/resources/openstack/neutron/floatingip.py
@@ -10,7 +10,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import six
from oslo_log import log as logging
@@ -42,7 +41,7 @@ class FloatingIP(neutron.NeutronResource):
entity = 'floatingip'
PROPERTIES = (
- FLOATING_NETWORK_ID, FLOATING_NETWORK, FLOATING_SUBNET,
+ FLOATING_NETWORK_ID, FLOATING_NETWORK, FLOATING_SUBNET,
VALUE_SPECS, PORT_ID, FIXED_IP_ADDRESS, FLOATING_IP_ADDRESS,
DNS_NAME, DNS_DOMAIN,
) = (
@@ -238,7 +237,7 @@ class FloatingIP(neutron.NeutronResource):
except Exception as exc:
LOG.info("Ignoring Neutron error while "
"getting FloatingIP dependencies: %s",
- six.text_type(exc))
+ str(exc))
return False
else:
try:
@@ -265,7 +264,7 @@ class FloatingIP(neutron.NeutronResource):
def add_dependencies(self, deps):
super(FloatingIP, self).add_dependencies(deps)
- for resource in six.itervalues(self.stack):
+ for resource in self.stack.values():
# depend on any RouterGateway in this template with the same
# network_id as this floating_network_id
if resource.has_interface('OS::Neutron::RouterGateway'):
@@ -388,7 +387,7 @@ class FloatingIPAssociation(neutron.NeutronResource):
def add_dependencies(self, deps):
super(FloatingIPAssociation, self).add_dependencies(deps)
- for resource in six.itervalues(self.stack):
+ for resource in self.stack.values():
if resource.has_interface('OS::Neutron::RouterInterface'):
def port_on_subnet(resource, subnet):
diff --git a/heat/engine/resources/openstack/neutron/l2_gateway.py b/heat/engine/resources/openstack/neutron/l2_gateway.py
index fb3265468..634ee81fc 100644
--- a/heat/engine/resources/openstack/neutron/l2_gateway.py
+++ b/heat/engine/resources/openstack/neutron/l2_gateway.py
@@ -13,7 +13,6 @@
# under the License.
import collections
-import six
from heat.common.i18n import _
from heat.engine import properties
@@ -107,13 +106,13 @@ class L2Gateway(neutron.NeutronResource):
@staticmethod
def _remove_none_value_props(props):
- if isinstance(props, collections.Mapping):
+ if isinstance(props, collections.abc.Mapping):
return dict((k, L2Gateway._remove_none_value_props(v)) for k, v
in props.items() if v is not None)
- elif (isinstance(props, collections.Sequence) and
- not isinstance(props, six.string_types)):
- return list(L2Gateway._remove_none_value_props(l) for l in props
- if l is not None)
+ elif (isinstance(props, collections.abc.Sequence) and
+ not isinstance(props, str)):
+ return list(L2Gateway._remove_none_value_props(p) for p in props
+ if p is not None)
return props
@staticmethod
diff --git a/heat/engine/resources/openstack/neutron/lbaas/pool.py b/heat/engine/resources/openstack/neutron/lbaas/pool.py
index 9e4aa2060..63d2868a6 100644
--- a/heat/engine/resources/openstack/neutron/lbaas/pool.py
+++ b/heat/engine/resources/openstack/neutron/lbaas/pool.py
@@ -197,8 +197,8 @@ class Pool(neutron.NeutronResource):
if (self.properties[self.LISTENER] is None and
self.properties[self.LOADBALANCER] is None):
- raise exception.PropertyUnspecifiedError(self.LISTENER,
- self.LOADBALANCER)
+ raise exception.PropertyUnspecifiedError(self.LISTENER,
+ self.LOADBALANCER)
if self.properties[self.SESSION_PERSISTENCE] is not None:
session_p = self.properties[self.SESSION_PERSISTENCE]
diff --git a/heat/engine/resources/openstack/neutron/net.py b/heat/engine/resources/openstack/neutron/net.py
index 62a19e1ff..d0d2ebd8d 100644
--- a/heat/engine/resources/openstack/neutron/net.py
+++ b/heat/engine/resources/openstack/neutron/net.py
@@ -284,8 +284,13 @@ class Net(neutron.NeutronResource):
if self.resource_id is None:
return
if name == self.SEGMENTS:
- return [segment.to_dict() for segment in list(self.client(
+ segments = [segment.to_dict() for segment in list(self.client(
'openstack').network.segments(network_id=self.resource_id))]
+ # Sort segments without name attribute first.
+ # See bug: https://bugs.launchpad.net/tripleo/+bug/1894920
+ segments.sort(key=lambda s: s['name'] is not None)
+ return segments
+
attributes = self._show_resource()
return attributes[name]
diff --git a/heat/engine/resources/openstack/neutron/neutron.py b/heat/engine/resources/openstack/neutron/neutron.py
index f62e99bd4..029127d9d 100644
--- a/heat/engine/resources/openstack/neutron/neutron.py
+++ b/heat/engine/resources/openstack/neutron/neutron.py
@@ -92,9 +92,14 @@ class NeutronResource(resource.Resource):
attrs['port_security_enabled'])
@staticmethod
- def merge_value_specs(props):
+ def merge_value_specs(props, before_value_specs=None):
value_spec_props = props.pop('value_specs')
- props.update(value_spec_props)
+ if value_spec_props is not None:
+ if before_value_specs:
+ for k in list(value_spec_props):
+ if value_spec_props[k] == before_value_specs.get(k, None):
+ value_spec_props.pop(k)
+ props.update(value_spec_props)
def prepare_update_properties(self, prop_diff):
"""Prepares prop_diff values for correct neutron update call.
@@ -102,8 +107,9 @@ class NeutronResource(resource.Resource):
1. Merges value_specs
2. Defaults resource name to physical resource name if None
"""
- if 'value_specs' in prop_diff and prop_diff['value_specs']:
- NeutronResource.merge_value_specs(prop_diff)
+ if 'value_specs' in prop_diff:
+ NeutronResource.merge_value_specs(
+ prop_diff, self.properties[self.VALUE_SPECS])
if 'name' in prop_diff and prop_diff['name'] is None:
prop_diff['name'] = self.physical_resource_name()
diff --git a/heat/engine/resources/openstack/neutron/port.py b/heat/engine/resources/openstack/neutron/port.py
index a5491ec4a..b7a01d40b 100644
--- a/heat/engine/resources/openstack/neutron/port.py
+++ b/heat/engine/resources/openstack/neutron/port.py
@@ -13,7 +13,6 @@
from oslo_log import log as logging
from oslo_serialization import jsonutils
-import six
from heat.common.i18n import _
from heat.engine import attributes
@@ -54,11 +53,11 @@ class Port(neutron.NeutronResource):
EXTRA_PROPERTIES = (
VALUE_SPECS, ADMIN_STATE_UP, MAC_ADDRESS,
ALLOWED_ADDRESS_PAIRS, VNIC_TYPE, QOS_POLICY,
- PORT_SECURITY_ENABLED,
+ PORT_SECURITY_ENABLED, PROPAGATE_UPLINK_STATUS,
) = (
'value_specs', 'admin_state_up', 'mac_address',
'allowed_address_pairs', 'binding:vnic_type', 'qos_policy',
- 'port_security_enabled',
+ 'port_security_enabled', 'propagate_uplink_status',
)
_FIXED_IP_KEYS = (
@@ -78,12 +77,13 @@ class Port(neutron.NeutronResource):
MAC_ADDRESS_ATTR, NAME_ATTR, NETWORK_ID_ATTR, SECURITY_GROUPS_ATTR,
STATUS, TENANT_ID, ALLOWED_ADDRESS_PAIRS_ATTR, SUBNETS_ATTR,
PORT_SECURITY_ENABLED_ATTR, QOS_POLICY_ATTR, DNS_ASSIGNMENT,
- NETWORK_ATTR,
+ NETWORK_ATTR, PROPAGATE_UPLINK_STATUS_ATTR,
) = (
'admin_state_up', 'device_id', 'device_owner', 'fixed_ips',
'mac_address', 'name', 'network_id', 'security_groups',
'status', 'tenant_id', 'allowed_address_pairs', 'subnets',
'port_security_enabled', 'qos_policy_id', 'dns_assignment', 'network',
+ 'propagate_uplink_status',
)
properties_schema = {
@@ -245,7 +245,6 @@ class Port(neutron.NeutronResource):
constraints=[
constraints.CustomConstraint('mac_addr')
],
- update_allowed=True,
),
ALLOWED_ADDRESS_PAIRS: properties.Schema(
properties.Schema.LIST,
@@ -284,7 +283,8 @@ class Port(neutron.NeutronResource):
'the bindings extension.'),
constraints=[
constraints.AllowedValues(['normal', 'direct', 'macvtap',
- 'direct-physical', 'baremetal']),
+ 'direct-physical', 'baremetal',
+ 'virtio-forwarder', 'smart-nic']),
],
support_status=support.SupportStatus(version='2015.1'),
update_allowed=True,
@@ -307,6 +307,12 @@ class Port(neutron.NeutronResource):
update_allowed=True,
support_status=support.SupportStatus(version='6.0.0')
),
+ PROPAGATE_UPLINK_STATUS: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('Flag to enable/disable propagate uplink status on the port.'),
+ update_allowed=True,
+ support_status=support.SupportStatus(version='15.0.0')
+ ),
}
# Need to update properties_schema with other properties before
@@ -384,7 +390,7 @@ class Port(neutron.NeutronResource):
_("The attributes of the network owning the port. (The full list "
"of response parameters can be found in the `Openstack "
"Networking service API reference "
- "<https://developer.openstack.org/api-ref/network/>`_.) The "
+ "<https://docs.openstack.org/api-ref/network/>`_.) The "
"following examples demonstrate some (not all) possible "
"expressions. (Obtains the network, the MTU (Maximum "
"transmission unit), the network tags and the l2_adjacency "
@@ -396,6 +402,11 @@ class Port(neutron.NeutronResource):
type=attributes.Schema.MAP,
support_status=support.SupportStatus(version='11.0.0'),
),
+ PROPAGATE_UPLINK_STATUS_ATTR: attributes.Schema(
+ _("Enable/Disable propagate uplink status for the port."),
+ support_status=support.SupportStatus(version='15.0.0'),
+ type=attributes.Schema.BOOLEAN
+ ),
}
def translation_rules(self, props):
@@ -438,7 +449,7 @@ class Port(neutron.NeutronResource):
# It is not known which subnet a port might be assigned
# to so all subnets in a network should be created before
# the ports in that network.
- for res in six.itervalues(self.stack):
+ for res in self.stack.values():
if res.has_interface('OS::Neutron::Subnet'):
try:
dep_network = res.properties.get(subnet.Subnet.NETWORK)
@@ -543,7 +554,6 @@ class Port(neutron.NeutronResource):
result = super(Port, self).parse_live_resource_data(
resource_properties, resource_data)
result[self.QOS_POLICY] = resource_data.get('qos_policy_id')
- result.pop(self.MAC_ADDRESS)
fixed_ips = resource_data.get(self.FIXED_IPS) or []
if fixed_ips:
result.update({self.FIXED_IPS: []})
diff --git a/heat/engine/resources/openstack/neutron/provider_net.py b/heat/engine/resources/openstack/neutron/provider_net.py
index 342e9c3e4..9f5a038c7 100644
--- a/heat/engine/resources/openstack/neutron/provider_net.py
+++ b/heat/engine/resources/openstack/neutron/provider_net.py
@@ -37,18 +37,18 @@ class ProviderNet(net.Net):
PROPERTIES = (
NAME, PROVIDER_NETWORK_TYPE, PROVIDER_PHYSICAL_NETWORK,
PROVIDER_SEGMENTATION_ID, ADMIN_STATE_UP, SHARED,
- PORT_SECURITY_ENABLED, ROUTER_EXTERNAL, TAGS,
+ PORT_SECURITY_ENABLED, ROUTER_EXTERNAL, DNS_DOMAIN, TAGS,
) = (
'name', 'network_type', 'physical_network',
'segmentation_id', 'admin_state_up', 'shared',
- 'port_security_enabled', 'router_external', 'tags',
+ 'port_security_enabled', 'router_external', 'dns_domain', 'tags',
)
ATTRIBUTES = (
- STATUS, SUBNETS,
+ STATUS, SUBNETS, SEGMENTS,
) = (
- 'status', 'subnets',
+ 'status', 'subnets', 'segments',
)
NETWORK_TYPES = (
@@ -110,6 +110,15 @@ class ProviderNet(net.Net):
update_allowed=True,
support_status=support.SupportStatus(version='12.0.0')
),
+ DNS_DOMAIN: properties.Schema(
+ properties.Schema.STRING,
+ _('DNS domain associated with this network.'),
+ constraints=[
+ constraints.CustomConstraint('dns_domain')
+ ],
+ update_allowed=True,
+ support_status=support.SupportStatus(version='15.0.0')
+ ),
}
attributes_schema = {
@@ -121,6 +130,11 @@ class ProviderNet(net.Net):
_("Subnets of this network."),
type=attributes.Schema.LIST
),
+ SEGMENTS: attributes.Schema(
+ _("The segments of this network."),
+ type=attributes.Schema.LIST,
+ support_status=support.SupportStatus(version='16.0.0'),
+ ),
}
def validate(self):
diff --git a/heat/engine/resources/openstack/neutron/qos.py b/heat/engine/resources/openstack/neutron/qos.py
index 2a75428b1..d767050ae 100644
--- a/heat/engine/resources/openstack/neutron/qos.py
+++ b/heat/engine/resources/openstack/neutron/qos.py
@@ -111,7 +111,7 @@ class QoSRule(neutron.NeutronResource):
support_status = support.SupportStatus(version='6.0.0')
PROPERTIES = (
- POLICY, TENANT_ID,
+ POLICY, TENANT_ID,
) = (
'policy', 'tenant_id',
)
@@ -292,9 +292,96 @@ class QoSDscpMarkingRule(QoSRule):
return [self.resource_id, self.policy_id]
+class QoSMinimumBandwidthRule(QoSRule):
+ """A resource for guaranteeing bandwidth.
+
+ This rule can be associated with a QoS policy, and then the policy
+ can be used by a neutron port to provide guaranteed bandwidth QoS
+ capabilities.
+
+ Depending on drivers the guarantee may be enforced on two levels.
+ First when a server is placed (scheduled) on physical infrastructure
+ and/or second in the data plane of the physical hypervisor. For details
+ please see Neutron documentation:
+
+ https://docs.openstack.org/neutron/latest/admin/config-qos-min-bw.html
+
+ The default policy usage of this resource is limited to
+ administrators only.
+ """
+
+ entity = 'minimum_bandwidth_rule'
+
+ required_service_extension = 'qos-bw-minimum-ingress'
+
+ support_status = support.SupportStatus(
+ status=support.SUPPORTED,
+ version='14.0.0',
+ )
+
+ PROPERTIES = (
+ MIN_BANDWIDTH, DIRECTION
+ ) = (
+ 'min_kbps', 'direction'
+ )
+
+ properties_schema = {
+ MIN_BANDWIDTH: properties.Schema(
+ properties.Schema.INTEGER,
+ _('Min bandwidth in kbps.'),
+ required=True,
+ update_allowed=True,
+ constraints=[
+ constraints.Range(min=0),
+ ],
+ ),
+ DIRECTION: properties.Schema(
+ properties.Schema.STRING,
+ _('Traffic direction from the point of view of the port.'),
+ update_allowed=True,
+ constraints=[
+ constraints.AllowedValues(['egress', 'ingress']),
+ ],
+ default='egress',
+ ),
+ }
+
+ properties_schema.update(QoSRule.properties_schema)
+
+ def handle_create(self):
+ props = self.prepare_properties(self.properties,
+ self.physical_resource_name())
+ props.pop(self.POLICY)
+
+ rule = self.client().create_minimum_bandwidth_rule(
+ self.policy_id,
+ {'minimum_bandwidth_rule': props})['minimum_bandwidth_rule']
+
+ self.resource_id_set(rule['id'])
+
+ def handle_delete(self):
+ if self.resource_id is None:
+ return
+
+ with self.client_plugin().ignore_not_found:
+ self.client().delete_minimum_bandwidth_rule(
+ self.resource_id, self.policy_id)
+
+ def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+ if prop_diff:
+ self.client().update_minimum_bandwidth_rule(
+ self.resource_id,
+ self.policy_id,
+ {'minimum_bandwidth_rule': prop_diff})
+
+ def _res_get_args(self):
+ return [self.resource_id, self.policy_id]
+
+
def resource_mapping():
return {
'OS::Neutron::QoSPolicy': QoSPolicy,
'OS::Neutron::QoSBandwidthLimitRule': QoSBandwidthLimitRule,
- 'OS::Neutron::QoSDscpMarkingRule': QoSDscpMarkingRule
+ 'OS::Neutron::QoSDscpMarkingRule': QoSDscpMarkingRule,
+ 'OS::Neutron::QoSMinimumBandwidthRule': QoSMinimumBandwidthRule,
}
diff --git a/heat/engine/resources/openstack/neutron/router.py b/heat/engine/resources/openstack/neutron/router.py
index 5ae5d4997..1127a2c10 100644
--- a/heat/engine/resources/openstack/neutron/router.py
+++ b/heat/engine/resources/openstack/neutron/router.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
@@ -266,7 +264,7 @@ class Router(neutron.NeutronResource):
external_gw = self.properties[self.EXTERNAL_GATEWAY]
if external_gw:
external_gw_net = external_gw.get(self.EXTERNAL_GATEWAY_NETWORK)
- for res in six.itervalues(self.stack):
+ for res in self.stack.values():
if res.has_interface('OS::Neutron::Subnet'):
try:
subnet_net = res.properties.get(subnet.Subnet.NETWORK)
@@ -639,7 +637,7 @@ class RouterGateway(neutron.NeutronResource):
def add_dependencies(self, deps):
super(RouterGateway, self).add_dependencies(deps)
- for resource in six.itervalues(self.stack):
+ for resource in self.stack.values():
# depend on any RouterInterface in this template with the same
# router_id as this router_id
if resource.has_interface('OS::Neutron::RouterInterface'):
diff --git a/heat/engine/resources/openstack/neutron/sfc/flow_classifier.py b/heat/engine/resources/openstack/neutron/sfc/flow_classifier.py
index b6ddcb488..e5a34d54e 100644
--- a/heat/engine/resources/openstack/neutron/sfc/flow_classifier.py
+++ b/heat/engine/resources/openstack/neutron/sfc/flow_classifier.py
@@ -181,8 +181,8 @@ class FlowClassifier(neutron.NeutronResource):
if self.resource_id is None:
return
with self.client_plugin().ignore_not_found:
- self.client_plugin().delete_ext_resource('flow_classifier',
- self.resource_id)
+ self.client_plugin().delete_ext_resource('flow_classifier',
+ self.resource_id)
def resource_mapping():
diff --git a/heat/engine/resources/openstack/neutron/sfc/port_pair.py b/heat/engine/resources/openstack/neutron/sfc/port_pair.py
index 749f8e45f..bc4070d35 100644
--- a/heat/engine/resources/openstack/neutron/sfc/port_pair.py
+++ b/heat/engine/resources/openstack/neutron/sfc/port_pair.py
@@ -118,8 +118,8 @@ class PortPair(neutron.NeutronResource):
if self.resource_id is None:
return
with self.client_plugin().ignore_not_found:
- self.client_plugin().delete_ext_resource('port_pair',
- self.resource_id)
+ self.client_plugin().delete_ext_resource('port_pair',
+ self.resource_id)
def resource_mapping():
diff --git a/heat/engine/resources/openstack/neutron/sfc/port_pair_group.py b/heat/engine/resources/openstack/neutron/sfc/port_pair_group.py
index 688594b96..280aea75b 100644
--- a/heat/engine/resources/openstack/neutron/sfc/port_pair_group.py
+++ b/heat/engine/resources/openstack/neutron/sfc/port_pair_group.py
@@ -24,7 +24,7 @@ class PortPairGroup(neutron.NeutronResource):
Multiple port-pairs may be included in a port-pair-group to allow the
specification of a set of functionally equivalent Service Functions that
- can be be used for load distribution.
+ can be used for load distribution.
"""
support_status = support.SupportStatus(
diff --git a/heat/engine/resources/openstack/neutron/taas/tap_flow.py b/heat/engine/resources/openstack/neutron/taas/tap_flow.py
index 7201c67d8..1644ab823 100644
--- a/heat/engine/resources/openstack/neutron/taas/tap_flow.py
+++ b/heat/engine/resources/openstack/neutron/taas/tap_flow.py
@@ -135,8 +135,8 @@ class TapFlow(neutron.NeutronResource):
if self.resource_id is None:
return
with self.client_plugin().ignore_not_found:
- self.client_plugin().delete_ext_resource('tap_flow',
- self.resource_id)
+ self.client_plugin().delete_ext_resource('tap_flow',
+ self.resource_id)
def check_create_complete(self, data):
return self.client_plugin().check_ext_resource_status(
diff --git a/heat/engine/resources/openstack/neutron/taas/tap_service.py b/heat/engine/resources/openstack/neutron/taas/tap_service.py
index 112ed4f23..d44ffab93 100644
--- a/heat/engine/resources/openstack/neutron/taas/tap_service.py
+++ b/heat/engine/resources/openstack/neutron/taas/tap_service.py
@@ -99,8 +99,8 @@ class TapService(neutron.NeutronResource):
if self.resource_id is None:
return
with self.client_plugin().ignore_not_found:
- self.client_plugin().delete_ext_resource('tap_service',
- self.resource_id)
+ self.client_plugin().delete_ext_resource('tap_service',
+ self.resource_id)
def check_create_complete(self, data):
return self.client_plugin().check_ext_resource_status(
diff --git a/heat/engine/resources/openstack/neutron/vpnservice.py b/heat/engine/resources/openstack/neutron/vpnservice.py
index e527744d7..2e2b4bef1 100644
--- a/heat/engine/resources/openstack/neutron/vpnservice.py
+++ b/heat/engine/resources/openstack/neutron/vpnservice.py
@@ -552,7 +552,8 @@ class IKEPolicy(neutron.NeutronResource):
constraints=[
constraints.AllowedValues(['3des', 'aes-128', 'aes-192',
'aes-256']),
- ]
+ ],
+ update_allowed=True
),
PHASE1_NEGOTIATION_MODE: properties.Schema(
properties.Schema.STRING,
@@ -565,6 +566,7 @@ class IKEPolicy(neutron.NeutronResource):
LIFETIME: properties.Schema(
properties.Schema.MAP,
_('Safety assessment lifetime configuration for the ike policy.'),
+ update_allowed=True,
schema={
LIFETIME_UNITS: properties.Schema(
properties.Schema.STRING,
@@ -588,7 +590,8 @@ class IKEPolicy(neutron.NeutronResource):
default='group5',
constraints=[
constraints.AllowedValues(['group2', 'group5', 'group14']),
- ]
+ ],
+ update_allowed=True
),
IKE_VERSION: properties.Schema(
properties.Schema.STRING,
@@ -596,7 +599,8 @@ class IKEPolicy(neutron.NeutronResource):
default='v1',
constraints=[
constraints.AllowedValues(['v1', 'v2']),
- ]
+ ],
+ update_allowed=True
),
}
diff --git a/heat/engine/resources/openstack/nova/flavor.py b/heat/engine/resources/openstack/nova/flavor.py
index 28b1c5f64..afefcbe51 100644
--- a/heat/engine/resources/openstack/nova/flavor.py
+++ b/heat/engine/resources/openstack/nova/flavor.py
@@ -41,8 +41,6 @@ class NovaFlavor(resource.Resource):
default_client_name = 'nova'
- required_service_extension = 'os-flavor-manage'
-
entity = 'flavors'
PROPERTIES = (
diff --git a/heat/engine/resources/openstack/nova/floatingip.py b/heat/engine/resources/openstack/nova/floatingip.py
index 83e5e7fca..64f2cb22c 100644
--- a/heat/engine/resources/openstack/nova/floatingip.py
+++ b/heat/engine/resources/openstack/nova/floatingip.py
@@ -12,7 +12,6 @@
# under the License.
from oslo_log import log as logging
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -49,8 +48,6 @@ class NovaFloatingIp(resource.Resource):
)
)
- required_service_extension = 'os-floating-ips'
-
PROPERTIES = (POOL,) = ('pool',)
ATTRIBUTES = (
@@ -125,7 +122,7 @@ class NovaFloatingIp(resource.Resource):
self.POOL_ATTR: floating_ip['floatingip']['floating_network_id'],
self.IP: floating_ip['floatingip']['floating_ip_address']
}
- return six.text_type(attributes[key])
+ return str(attributes[key])
class NovaFloatingIpAssociation(resource.Resource):
diff --git a/heat/engine/resources/openstack/nova/host_aggregate.py b/heat/engine/resources/openstack/nova/host_aggregate.py
index 2b6f0ce81..5264c8393 100644
--- a/heat/engine/resources/openstack/nova/host_aggregate.py
+++ b/heat/engine/resources/openstack/nova/host_aggregate.py
@@ -38,8 +38,6 @@ class HostAggregate(resource.Resource):
entity = 'aggregates'
- required_service_extension = 'os-aggregates'
-
PROPERTIES = (
NAME, AVAILABILITY_ZONE, HOSTS, METADATA
) = (
@@ -56,7 +54,6 @@ class HostAggregate(resource.Resource):
AVAILABILITY_ZONE: properties.Schema(
properties.Schema.STRING,
_('Name for the availability zone.'),
- required=True,
update_allowed=True,
),
HOSTS: properties.Schema(
diff --git a/heat/engine/resources/openstack/nova/keypair.py b/heat/engine/resources/openstack/nova/keypair.py
index bff7c7ba6..064310614 100644
--- a/heat/engine/resources/openstack/nova/keypair.py
+++ b/heat/engine/resources/openstack/nova/keypair.py
@@ -10,7 +10,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -43,8 +42,6 @@ class KeyPair(resource.Resource):
support_status = support.SupportStatus(version='2014.1')
- required_service_extension = 'os-keypairs'
-
PROPERTIES = (
NAME, SAVE_PRIVATE_KEY, PUBLIC_KEY, KEY_TYPE, USER,
) = (
@@ -194,7 +191,7 @@ class KeyPair(resource.Resource):
def _resolve_attribute(self, key):
attr_fn = {self.PRIVATE_KEY_ATTR: self.private_key,
self.PUBLIC_KEY_ATTR: self.public_key}
- return six.text_type(attr_fn[key])
+ return str(attr_fn[key])
def get_reference_id(self):
return self.resource_id
diff --git a/heat/engine/resources/openstack/nova/quota.py b/heat/engine/resources/openstack/nova/quota.py
index 269fcdaa7..8761da97a 100644
--- a/heat/engine/resources/openstack/nova/quota.py
+++ b/heat/engine/resources/openstack/nova/quota.py
@@ -53,8 +53,6 @@ class NovaQuota(resource.Resource):
entity = 'quotas'
- required_service_extension = 'os-quota-sets'
-
PROPERTIES = (
PROJECT, CORES, FIXED_IPS, FLOATING_IPS, INSTANCES,
INJECTED_FILES, INJECTED_FILE_CONTENT_BYTES, INJECTED_FILE_PATH_BYTES,
@@ -117,6 +115,14 @@ class NovaQuota(resource.Resource):
properties.Schema.INTEGER,
_('Quota for the number of injected files. '
'Setting the value to -1 removes the limit.'),
+ support_status=support.SupportStatus(
+ status=support.DEPRECATED,
+ version='14.0.0',
+ message=_('File injection is deprecated '
+ 'from compute REST API '
+ 'OS::Nova::Quota resource will not support '
+ 'it in the future.')
+ ),
constraints=[
constraints.Range(min=-1),
],
@@ -126,6 +132,14 @@ class NovaQuota(resource.Resource):
properties.Schema.INTEGER,
_('Quota for the number of injected file content bytes. '
'Setting the value to -1 removes the limit.'),
+ support_status=support.SupportStatus(
+ status=support.DEPRECATED,
+ version='14.0.0',
+ message=_('File injection is deprecated '
+ 'from compute REST API '
+ 'OS::Nova::Quota resource will not support '
+ 'it in the future.')
+ ),
constraints=[
constraints.Range(min=-1),
],
@@ -135,6 +149,14 @@ class NovaQuota(resource.Resource):
properties.Schema.INTEGER,
_('Quota for the number of injected file path bytes. '
'Setting the value to -1 removes the limit.'),
+ support_status=support.SupportStatus(
+ status=support.DEPRECATED,
+ version='14.0.0',
+ message=_('File injection is deprecated '
+ 'from compute REST API '
+ 'OS::Nova::Quota resource will not support '
+ 'it in the future.')
+ ),
constraints=[
constraints.Range(min=-1),
],
diff --git a/heat/engine/resources/openstack/nova/server.py b/heat/engine/resources/openstack/nova/server.py
index 3815b7938..7fdd31e4d 100644
--- a/heat/engine/resources/openstack/nova/server.py
+++ b/heat/engine/resources/openstack/nova/server.py
@@ -12,11 +12,11 @@
# under the License.
import copy
+import ipaddress
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -39,7 +39,8 @@ cfg.CONF.import_opt('default_user_data_format', 'heat.common.config')
LOG = logging.getLogger(__name__)
NOVA_MICROVERSIONS = (MICROVERSION_TAGS, MICROVERSION_STR_NETWORK,
- MICROVERSION_NIC_TAGS) = ('2.26', '2.37', '2.42')
+ MICROVERSION_NIC_TAGS, MICROVERSION_PERSONALITY_REMOVED
+ ) = ('2.26', '2.37', '2.42', '2.57')
class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
@@ -330,7 +331,10 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema(
properties.Schema.BOOLEAN,
_('Indicate whether the volume should be deleted '
- 'when the server is terminated.')
+ 'when the server is terminated. '
+ 'Defaults to "False" in case of a volume, snapshot '
+ 'or image and to "True" in case of swap or '
+ 'ephemeral.')
),
},
),
@@ -517,7 +521,9 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
'the user_data is passed to Nova unmodified. '
'For SOFTWARE_CONFIG user_data is bundled as part of the '
'software config data, and metadata is derived from any '
- 'associated SoftwareDeployment resources.'),
+ 'associated SoftwareDeployment resources. And if the '
+ 'user_data is in CoreOS ignition(json) format, the metadata '
+ 'will be injected into the user_data automatically by Heat.'),
default=cfg.CONF.default_user_data_format,
constraints=[
constraints.AllowedValues(_SOFTWARE_CONFIG_FORMATS),
@@ -553,9 +559,10 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
),
USER_DATA: properties.Schema(
properties.Schema.STRING,
- _('User data script to be executed by cloud-init. Changes cause '
- 'replacement of the resource by default, but can be ignored '
- 'altogether by setting the `user_data_update_policy` property.'),
+ _('User data script to be executed by cloud-init or CoreOS '
+ 'ignition. Changes cause replacement of the resource '
+ 'by default, but can be ignored altogether by setting the '
+ '`user_data_update_policy` property.'),
default='',
update_allowed=True
),
@@ -697,12 +704,20 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
ACCESSIPV4: attributes.Schema(
_('The manually assigned alternative public IPv4 address '
'of the server.'),
- type=attributes.Schema.STRING
+ type=attributes.Schema.STRING,
+ support_status=support.SupportStatus(
+ status=support.DEPRECATED,
+ version='14.0.0',
+ previous_status=support.SupportStatus(version='2015.1')),
),
ACCESSIPV6: attributes.Schema(
_('The manually assigned alternative public IPv6 address '
'of the server.'),
- type=attributes.Schema.STRING
+ type=attributes.Schema.STRING,
+ support_status=support.SupportStatus(
+ status=support.DEPRECATED,
+ version='14.0.0',
+ previous_status=support.SupportStatus(version='2015.1'))
),
CONSOLE_URLS: attributes.Schema(
_("URLs of server's consoles. "
@@ -884,6 +899,7 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
if server is not None:
self.resource_id_set(server.id)
+ assert server is not None
return server.id
def check_create_complete(self, server_id):
@@ -929,8 +945,8 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
server, server_data = resource_data
result = {
# there's a risk that flavor id will be int type, so cast to str
- self.FLAVOR: six.text_type(server_data.get(self.FLAVOR)['id']),
- self.IMAGE: six.text_type(server_data.get(self.IMAGE)['id']),
+ self.FLAVOR: str(server_data.get(self.FLAVOR)['id']),
+ self.IMAGE: str(server_data.get(self.IMAGE)['id']),
self.NAME: server_data.get(self.NAME),
self.METADATA: server_data.get(self.METADATA),
self.NETWORKS: self._get_live_networks(server, resource_properties)
@@ -940,8 +956,8 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
return result
def _get_live_networks(self, server, props):
- reality_nets = self._add_attrs_for_address(server,
- extend_networks=False)
+ reality_nets = self._get_server_addresses(server,
+ extend_networks=False)
reality_net_ids = {}
client_plugin = self.client_plugin('neutron')
for net_key in reality_nets:
@@ -970,7 +986,7 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
reality_net_ids.get(net_id).pop(idx)
break
- for key, value in six.iteritems(reality_nets):
+ for key, value in reality_nets.items():
for address in reality_nets[key]:
new_net = {self.NETWORK_ID: key,
self.NETWORK_FIXED_IP: address['addr']}
@@ -1109,7 +1125,7 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
LOG.warning("Failed to fetch resource attributes: %s", ex)
return
- def _add_attrs_for_address(self, server, extend_networks=True):
+ def _get_server_addresses(self, server, extend_networks=True):
"""Adds port id, subnets and network attributes to addresses list.
This method is used only for resolving attributes.
@@ -1118,31 +1134,48 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
the net is returned without replacing name on
id.
"""
- nets = copy.deepcopy(server.addresses) or {}
- ifaces = server.interface_list()
- ip_mac_mapping_on_port_id = dict(((iface.fixed_ips[0]['ip_address'],
- iface.mac_addr), iface.port_id)
- for iface in ifaces)
- for net_name in nets:
- for addr in nets[net_name]:
- addr['port'] = ip_mac_mapping_on_port_id.get(
- (addr['addr'], addr['OS-EXT-IPS-MAC:mac_addr']))
+ nets = {}
+ ifaces = self.client('neutron').list_ports(device_id=server.id)
+ for port in ifaces['ports']:
+ net_label = self.client('neutron').list_networks(
+ id=port['network_id'])['networks'][0]['name']
+ net = nets.setdefault(net_label, [])
+ for fixed_ip in port['fixed_ips']:
+ addr = {'addr': fixed_ip.get('ip_address'),
+ 'OS-EXT-IPS-MAC:mac_addr': port['mac_address'],
+ 'OS-EXT-IPS:type': 'fixed',
+ 'port': port['id']}
+
+ try:
+ addr['version'] = ipaddress.ip_address(
+ addr['addr']).version,
+ except ValueError:
+ addr['version'] = None
+
+ if addr['addr']:
+ fips = self.client('neutron').list_floatingips(
+ fixed_ip_address=addr['addr'])
+ for fip in fips['floatingips']:
+ net.append({
+ 'addr': fip['floating_ip_address'],
+ 'version': addr['version'],
+ 'OS-EXT-IPS-MAC:mac_addr': port['mac_address'],
+ 'OS-EXT-IPS:type': 'floating',
+ 'port': None})
+
# _get_live_networks() uses this method to get reality_nets.
# We don't need to get subnets and network in that case. Only
# do the external calls if extend_networks is true, i.e called
# from _resolve_attribute()
if not extend_networks:
+ net.append(addr)
continue
- try:
- port = self.client('neutron').show_port(
- addr['port'])['port']
- except Exception as ex:
- addr['subnets'], addr['network'] = None, None
- LOG.warning("Failed to fetch resource attributes: %s", ex)
- continue
+
addr['subnets'] = self._get_subnets_attr(port['fixed_ips'])
addr['network'] = self._get_network_attr(port['network_id'])
+ net.append(addr)
+
if extend_networks:
return self._extend_networks(nets)
else:
@@ -1186,7 +1219,7 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
self.client_plugin().ignore_not_found(e)
return ''
if name == self.ADDRESSES:
- return self._add_attrs_for_address(server)
+ return self._get_server_addresses(server)
if name == self.NETWORKS_ATTR:
return self._extend_networks(server.networks)
if name == self.INSTANCE_NAME:
@@ -1217,7 +1250,7 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
return
if not nets:
return
- for res in six.itervalues(self.stack):
+ for res in self.stack.values():
if res.has_interface('OS::Neutron::Subnet'):
try:
subnet_net = res.properties.get(subnet.Subnet.NETWORK)
@@ -1578,6 +1611,14 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
# retrieve provider's absolute limits if it will be needed
metadata = self.properties[self.METADATA]
personality = self.properties[self.PERSONALITY]
+
+ if personality:
+ if self.client_plugin().is_version_supported(
+ MICROVERSION_PERSONALITY_REMOVED):
+ msg = (_('Cannot use the personality parameter as nova no '
+ 'longer supports it. Use user_data instead.'))
+ raise exception.StackValidationFailed(message=msg)
+
if metadata or personality:
limits = self.client_plugin().absolute_limits()
@@ -1694,7 +1735,7 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
status = cp.get_status(server)
LOG.debug('%(name)s check_suspend_complete status = %(status)s',
{'name': self.name, 'status': status})
- if status in list(cp.deferred_server_statuses + ['ACTIVE']):
+ if status in (cp.deferred_server_statuses | {'ACTIVE'}):
return status == 'SUSPENDED'
else:
exc = exception.ResourceUnknownStatus(
@@ -1741,7 +1782,11 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
def check_snapshot_complete(self, image_id):
image = self.client_plugin('glance').get_image(image_id)
if image.status.lower() == self.IMAGE_ACTIVE:
- return True
+ server = self.client_plugin().get_server(self.resource_id)
+ task_state = getattr(server, 'OS-EXT-STS:task_state', '')
+ if task_state not in {'image_uploading', 'image_snapshot_pending',
+ 'image_snapshot', 'image_pending_upload'}:
+ return True
elif image.status.lower() in (self.IMAGE_ERROR, self.IMAGE_DELETED):
raise exception.Error(image.status)
diff --git a/heat/engine/resources/openstack/nova/server_group.py b/heat/engine/resources/openstack/nova/server_group.py
index 5b5a4c3ff..abaa8c6b7 100644
--- a/heat/engine/resources/openstack/nova/server_group.py
+++ b/heat/engine/resources/openstack/nova/server_group.py
@@ -33,8 +33,6 @@ class ServerGroup(resource.Resource):
entity = 'server_groups'
- required_service_extension = 'os-server-groups'
-
PROPERTIES = (
NAME, POLICIES
) = (
diff --git a/heat/engine/resources/openstack/nova/server_network_mixin.py b/heat/engine/resources/openstack/nova/server_network_mixin.py
index 6679710e5..62d501994 100644
--- a/heat/engine/resources/openstack/nova/server_network_mixin.py
+++ b/heat/engine/resources/openstack/nova/server_network_mixin.py
@@ -65,7 +65,7 @@ class ServerNetworkMixin(object):
"/".join([self.NETWORKS, self.NETWORK_PORT]))
# if user only specifies network and floating ip, floating ip
- # can't be associated as the the neutron port isn't created/managed
+ # can't be associated as the neutron port isn't created/managed
# by heat
if floating_ip is not None:
if net_id is not None and port is None and subnet is None:
@@ -190,12 +190,6 @@ class ServerNetworkMixin(object):
creating. We need to store information about that ports, so store
their IDs to data with key `external_ports`.
"""
- # check if os-attach-interfaces extension is available on this cloud.
- # If it's not, then novaclient's interface_list method cannot be used
- # to get the list of interfaces.
- if not self.client_plugin().has_extension('os-attach-interfaces'):
- return
-
server = self.client().servers.get(self.resource_id)
ifaces = server.interface_list()
external_port_ids = set(iface.port_id for iface in ifaces)
diff --git a/heat/engine/resources/openstack/octavia/flavor.py b/heat/engine/resources/openstack/octavia/flavor.py
new file mode 100644
index 000000000..8cf58b8e4
--- /dev/null
+++ b/heat/engine/resources/openstack/octavia/flavor.py
@@ -0,0 +1,132 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.common.i18n import _
+from heat.engine import attributes
+from heat.engine import constraints
+from heat.engine import properties
+from heat.engine import resource
+from heat.engine import support
+from heat.engine import translation
+
+
+class Flavor(resource.Resource):
+ """A resource for creating octavia Flavors.
+
+ This resource creates and manages octavia Flavors,
+ which allows to tune Load Balancers' capabilities.
+ """
+
+ default_client_name = 'octavia'
+
+ support_status = support.SupportStatus(version='14.0.0')
+
+ PROPERTIES = (
+ DESCRIPTION, ENABLED, FLAVOR_PROFILE, NAME
+ ) = (
+ 'description', 'enabled', 'flavor_profile', 'name'
+ )
+
+ ATTRIBUTES = (
+ FLAVOR_PROFILE_ID_ATTR,
+ ) = (
+ 'flavor_profile_id',
+ )
+
+ properties_schema = {
+ DESCRIPTION: properties.Schema(
+ properties.Schema.STRING,
+ _('Description of this Flavor.'),
+ update_allowed=True,
+ default=''
+ ),
+ ENABLED: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('If the resource if available for use.'),
+ update_allowed=True,
+ default=True,
+ ),
+ NAME: properties.Schema(
+ properties.Schema.STRING,
+ _('Name of this Flavor.'),
+ update_allowed=True
+ ),
+ FLAVOR_PROFILE: properties.Schema(
+ properties.Schema.STRING,
+ _('The ID or the name of the Flavor Profile.'),
+ required=True,
+ constraints=[
+ constraints.CustomConstraint('octavia.flavorprofile')
+ ]
+ ),
+ }
+
+ attributes_schema = {
+ FLAVOR_PROFILE_ID_ATTR: attributes.Schema(
+ _('The ID of the flavor profile.'),
+ type=attributes.Schema.STRING,
+ )
+ }
+
+ def translation_rules(self, props):
+ return [
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.FLAVOR_PROFILE],
+ client_plugin=self.client_plugin(),
+ finder='get_flavorprofile'
+ )
+ ]
+
+ def _prepare_args(self, properties):
+ props = dict((k, v) for k, v in properties.items()
+ if v is not None)
+ if self.NAME not in props:
+ props[self.NAME] = self.physical_resource_name()
+ props['flavor_profile_id'] = props.pop(self.FLAVOR_PROFILE)
+ return props
+
+ def handle_create(self):
+ props = self._prepare_args(self.properties)
+
+ flavor = self.client().flavor_create(
+ json={'flavor': props})['flavor']
+ self.resource_id_set(flavor['id'])
+
+ def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+ if prop_diff:
+ if self.NAME in prop_diff and prop_diff[self.NAME] is None:
+ prop_diff[self.NAME] = self.physical_resource_name()
+ self.client().flavor_set(self.resource_id,
+ json={'flavor': prop_diff})
+
+ def handle_delete(self):
+ with self.client_plugin().ignore_not_found:
+ self.client().flavor_delete(self.resource_id)
+ return True
+
+ def _resolve_attribute(self, name):
+ if self.resource_id is None:
+ return None
+ resource = self._show_resource()
+ return resource[name]
+
+ def _show_resource(self):
+ return self.client().flavor_show(self.resource_id)
+
+
+def resource_mapping():
+ return {
+ 'OS::Octavia::Flavor': Flavor
+ }
diff --git a/heat/engine/resources/openstack/octavia/flavor_profile.py b/heat/engine/resources/openstack/octavia/flavor_profile.py
new file mode 100644
index 000000000..ef20f6aca
--- /dev/null
+++ b/heat/engine/resources/openstack/octavia/flavor_profile.py
@@ -0,0 +1,90 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.common.i18n import _
+from heat.engine import properties
+from heat.engine import resource
+from heat.engine import support
+
+
+class FlavorProfile(resource.Resource):
+ """A resource for creating octavia Flavor Profiles.
+
+ This resource creates and manages octavia Flavor Profiles,
+ which allows to tune Load Balancers' capabilities.
+ """
+
+ default_client_name = 'octavia'
+
+ support_status = support.SupportStatus(version='14.0.0')
+
+ PROPERTIES = (
+ NAME, FLAVOR_DATA, PROVIDER_NAME
+ ) = (
+ 'name', 'flavor_data', 'provider_name'
+ )
+
+ properties_schema = {
+ NAME: properties.Schema(
+ properties.Schema.STRING,
+ _('Name of this Flavor Profile.'),
+ update_allowed=True
+ ),
+ FLAVOR_DATA: properties.Schema(
+ properties.Schema.STRING,
+ _('JSON string containing the flavor metadata.'),
+ update_allowed=True,
+ required=True
+ ),
+ PROVIDER_NAME: properties.Schema(
+ properties.Schema.STRING,
+ _('Provider name of this Flavor Profile.'),
+ update_allowed=True,
+ ),
+ }
+
+ def _prepare_args(self, properties):
+ props = dict((k, v) for k, v in properties.items()
+ if v is not None)
+ if self.NAME not in props:
+ props[self.NAME] = self.physical_resource_name()
+ return props
+
+ def handle_create(self):
+ props = self._prepare_args(self.properties)
+
+ flavorprofile = self.client().flavorprofile_create(
+ json={'flavorprofile': props})['flavorprofile']
+ self.resource_id_set(flavorprofile['id'])
+
+ def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+ if prop_diff:
+ if self.NAME in prop_diff and prop_diff[self.NAME] is None:
+ prop_diff[self.NAME] = self.physical_resource_name()
+ self.client().flavorprofile_set(
+ self.resource_id,
+ json={'flavorprofile': prop_diff})
+
+ def handle_delete(self):
+ with self.client_plugin().ignore_not_found:
+ self.client().flavorprofile_delete(self.resource_id)
+ return True
+
+ def _show_resource(self):
+ return self.client().flavorprofile_show(self.resource_id)
+
+
+def resource_mapping():
+ return {
+ 'OS::Octavia::FlavorProfile': FlavorProfile
+ }
diff --git a/heat/engine/resources/openstack/octavia/health_monitor.py b/heat/engine/resources/openstack/octavia/health_monitor.py
index 87d2edc27..a2af07bb0 100644
--- a/heat/engine/resources/openstack/octavia/health_monitor.py
+++ b/heat/engine/resources/openstack/octavia/health_monitor.py
@@ -56,7 +56,7 @@ class HealthMonitor(octavia_base.OctaviaBase):
),
DELAY: properties.Schema(
properties.Schema.INTEGER,
- _('The minimum time in milliseconds between regular connections '
+ _('The minimum time in seconds between regular connections '
'of the member.'),
required=True,
update_allowed=True,
@@ -95,7 +95,7 @@ class HealthMonitor(octavia_base.OctaviaBase):
),
TIMEOUT: properties.Schema(
properties.Schema.INTEGER,
- _('Maximum number of milliseconds for a monitor to wait for a '
+ _('Maximum number of seconds for a monitor to wait for a '
'connection to be established before it times out.'),
required=True,
update_allowed=True,
diff --git a/heat/engine/resources/openstack/octavia/listener.py b/heat/engine/resources/openstack/octavia/listener.py
index c001caa73..ae3d00bdd 100644
--- a/heat/engine/resources/openstack/octavia/listener.py
+++ b/heat/engine/resources/openstack/octavia/listener.py
@@ -17,6 +17,7 @@ from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.octavia import octavia_base
+from heat.engine import support
from heat.engine import translation
@@ -30,11 +31,11 @@ class Listener(octavia_base.OctaviaBase):
PROPERTIES = (
PROTOCOL_PORT, PROTOCOL, LOADBALANCER, DEFAULT_POOL, NAME,
ADMIN_STATE_UP, DESCRIPTION, DEFAULT_TLS_CONTAINER_REF,
- SNI_CONTAINER_REFS, CONNECTION_LIMIT, TENANT_ID
+ SNI_CONTAINER_REFS, CONNECTION_LIMIT, TENANT_ID, ALLOWED_CIDRS
) = (
'protocol_port', 'protocol', 'loadbalancer', 'default_pool', 'name',
'admin_state_up', 'description', 'default_tls_container_ref',
- 'sni_container_refs', 'connection_limit', 'tenant_id'
+ 'sni_container_refs', 'connection_limit', 'tenant_id', 'allowed_cidrs'
)
SUPPORTED_PROTOCOLS = (TCP, HTTP, HTTPS, TERMINATED_HTTPS, PROXY, UDP) = (
@@ -121,6 +122,20 @@ class Listener(octavia_base.OctaviaBase):
properties.Schema.STRING,
_('The ID of the tenant who owns the listener.')
),
+ ALLOWED_CIDRS: properties.Schema(
+ properties.Schema.LIST,
+ _('A list of IPv4, IPv6 or mix of both CIDRs. The default is all '
+ 'allowed. When a list of CIDRs is provided, the default '
+ 'switches to deny all.'),
+ update_allowed=True,
+ schema=properties.Schema(
+ properties.Schema.STRING,
+ constraints=[
+ constraints.CustomConstraint('net_cidr')
+ ]
+ ),
+ support_status=support.SupportStatus(version='14.0.0'),
+ )
}
attributes_schema = {
diff --git a/heat/engine/resources/openstack/octavia/loadbalancer.py b/heat/engine/resources/openstack/octavia/loadbalancer.py
index a8d8fe510..fc639a81a 100644
--- a/heat/engine/resources/openstack/octavia/loadbalancer.py
+++ b/heat/engine/resources/openstack/octavia/loadbalancer.py
@@ -16,6 +16,7 @@ from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.octavia import octavia_base
+from heat.engine import support
from heat.engine import translation
@@ -28,16 +29,17 @@ class LoadBalancer(octavia_base.OctaviaBase):
PROPERTIES = (
DESCRIPTION, NAME, PROVIDER, VIP_ADDRESS, VIP_SUBNET,
- ADMIN_STATE_UP, TENANT_ID
+ ADMIN_STATE_UP, TENANT_ID, FLAVOR
) = (
'description', 'name', 'provider', 'vip_address', 'vip_subnet',
- 'admin_state_up', 'tenant_id'
+ 'admin_state_up', 'tenant_id', 'flavor'
)
ATTRIBUTES = (
- VIP_ADDRESS_ATTR, VIP_PORT_ATTR, VIP_SUBNET_ATTR, POOLS_ATTR
+ VIP_ADDRESS_ATTR, VIP_PORT_ATTR, VIP_SUBNET_ATTR, POOLS_ATTR,
+ FLAVOR_ID_ATTR
) = (
- 'vip_address', 'vip_port_id', 'vip_subnet_id', 'pools'
+ 'vip_address', 'vip_port_id', 'vip_subnet_id', 'pools', 'flavor_id'
)
properties_schema = {
@@ -86,6 +88,14 @@ class LoadBalancer(octavia_base.OctaviaBase):
constraints=[
constraints.CustomConstraint('keystone.project')
],
+ ),
+ FLAVOR: properties.Schema(
+ properties.Schema.STRING,
+ _('The name or ID of the flavor of the Load Balancer.'),
+ support_status=support.SupportStatus(version='14.0.0'),
+ constraints=[
+ constraints.CustomConstraint('octavia.flavor')
+ ]
)
}
@@ -106,6 +116,10 @@ class LoadBalancer(octavia_base.OctaviaBase):
_('Pools this LoadBalancer is associated with.'),
type=attributes.Schema.LIST,
),
+ FLAVOR_ID_ATTR: attributes.Schema(
+ _('The flavor ID of the LoadBalancer.'),
+ type=attributes.Schema.STRING,
+ )
}
def translation_rules(self, props):
@@ -118,6 +132,13 @@ class LoadBalancer(octavia_base.OctaviaBase):
finder='find_resourceid_by_name_or_id',
entity='subnet'
),
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.FLAVOR],
+ client_plugin=self.client_plugin(),
+ finder='get_flavor',
+ ),
]
def _prepare_args(self, properties):
@@ -126,6 +147,8 @@ class LoadBalancer(octavia_base.OctaviaBase):
if self.NAME not in props:
props[self.NAME] = self.physical_resource_name()
props['vip_subnet_id'] = props.pop(self.VIP_SUBNET)
+ if self.FLAVOR in props:
+ props['flavor_id'] = props.pop(self.FLAVOR)
if 'tenant_id' in props:
props['project_id'] = props.pop('tenant_id')
return props
diff --git a/heat/engine/resources/openstack/octavia/pool.py b/heat/engine/resources/openstack/octavia/pool.py
index 3e454468b..625483736 100644
--- a/heat/engine/resources/openstack/octavia/pool.py
+++ b/heat/engine/resources/openstack/octavia/pool.py
@@ -17,6 +17,7 @@ from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.octavia import octavia_base
+from heat.engine import support
from heat.engine import translation
@@ -32,10 +33,11 @@ class Pool(octavia_base.OctaviaBase):
ADMIN_STATE_UP, DESCRIPTION, SESSION_PERSISTENCE, NAME,
LB_ALGORITHM, LISTENER, LOADBALANCER, PROTOCOL,
SESSION_PERSISTENCE_TYPE, SESSION_PERSISTENCE_COOKIE_NAME,
+ TLS_ENABLED,
) = (
'admin_state_up', 'description', 'session_persistence', 'name',
'lb_algorithm', 'listener', 'loadbalancer', 'protocol',
- 'type', 'cookie_name'
+ 'type', 'cookie_name', 'tls_enabled',
)
SESSION_PERSISTENCE_TYPES = (
@@ -97,8 +99,8 @@ class Pool(octavia_base.OctaviaBase):
'the pool.'),
required=True,
constraints=[
- constraints.AllowedValues(['ROUND_ROBIN',
- 'LEAST_CONNECTIONS', 'SOURCE_IP']),
+ constraints.AllowedValues(['ROUND_ROBIN', 'LEAST_CONNECTIONS',
+ 'SOURCE_IP', 'SOURCE_IP_PORT']),
],
update_allowed=True,
),
@@ -124,6 +126,13 @@ class Pool(octavia_base.OctaviaBase):
constraints.AllowedValues(SUPPORTED_PROTOCOLS),
]
),
+ TLS_ENABLED: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('Enable backend member re-encryption.'),
+ default=False,
+ update_allowed=True,
+ support_status=support.SupportStatus(version='14.0.0'),
+ ),
}
attributes_schema = {
@@ -179,8 +188,8 @@ class Pool(octavia_base.OctaviaBase):
super(Pool, self).validate()
if (self.properties[self.LISTENER] is None and
self.properties[self.LOADBALANCER] is None):
- raise exception.PropertyUnspecifiedError(self.LISTENER,
- self.LOADBALANCER)
+ raise exception.PropertyUnspecifiedError(self.LISTENER,
+ self.LOADBALANCER)
if self.properties[self.SESSION_PERSISTENCE] is not None:
session_p = self.properties[self.SESSION_PERSISTENCE]
diff --git a/heat/engine/resources/openstack/octavia/quota.py b/heat/engine/resources/openstack/octavia/quota.py
new file mode 100644
index 000000000..5951e5cd7
--- /dev/null
+++ b/heat/engine/resources/openstack/octavia/quota.py
@@ -0,0 +1,150 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.common import exception
+from heat.common.i18n import _
+from heat.engine import constraints
+from heat.engine import properties
+from heat.engine import resource
+from heat.engine import support
+from heat.engine import translation
+
+
+class OctaviaQuota(resource.Resource):
+ """A resource for creating Octavia quotas.
+
+ Ocatavia Quota is used to manage operational limits for Octavia. Currently,
+ this resource can manage Octavia's quotas for:
+
+ - healthmonitor
+ - listener
+ - loadbalancer
+ - pool
+ - member
+
+ Note that default octavia security policy usage of this resource
+ is limited to being used by administrators only. Administrators should be
+ careful to create only one Octavia Quota resource per project, otherwise
+ it will be hard for them to manage the quota properly.
+ """
+
+ support_status = support.SupportStatus(version='14.0.0')
+
+ default_client_name = 'octavia'
+
+ entity = 'quotas'
+
+ PROPERTIES = (
+ PROJECT, HEALTHMONITOR, LISTENER, LOADBALANCER,
+ POOL, MEMBER
+ ) = (
+ 'project', 'healthmonitor', 'listener', 'loadbalancer',
+ 'pool', 'member'
+ )
+
+ properties_schema = {
+ PROJECT: properties.Schema(
+ properties.Schema.STRING,
+ _('Name or id of the project to set the quota for.'),
+ required=True,
+ constraints=[
+ constraints.CustomConstraint('keystone.project')
+ ]
+ ),
+ HEALTHMONITOR: properties.Schema(
+ properties.Schema.INTEGER,
+ _('Quota for the number of healthmonitors. '
+ 'Setting the value to -1 removes the limit.'),
+ constraints=[
+ constraints.Range(min=-1),
+ ],
+ update_allowed=True
+ ),
+ LISTENER: properties.Schema(
+ properties.Schema.INTEGER,
+ _('Quota for the number of listeners. '
+ 'Setting the value to -1 removes the limit.'),
+ constraints=[
+ constraints.Range(min=-1),
+ ],
+ update_allowed=True
+ ),
+ LOADBALANCER: properties.Schema(
+ properties.Schema.INTEGER,
+ _('Quota for the number of load balancers. '
+ 'Setting the value to -1 removes the limit.'),
+ constraints=[
+ constraints.Range(min=-1),
+ ],
+ update_allowed=True
+ ),
+ POOL: properties.Schema(
+ properties.Schema.INTEGER,
+ _('Quota for the number of pools. '
+ 'Setting the value to -1 removes the limit.'),
+ constraints=[
+ constraints.Range(min=-1),
+ ],
+ update_allowed=True
+ ),
+ MEMBER: properties.Schema(
+ properties.Schema.INTEGER,
+ _('Quota for the number of m. '
+ 'Setting the value to -1 removes the limit.'),
+ constraints=[
+ constraints.Range(min=-1),
+ ],
+ update_allowed=True
+ ),
+ }
+
+ def translation_rules(self, props):
+ return [
+ translation.TranslationRule(
+ props,
+ translation.TranslationRule.RESOLVE,
+ [self.PROJECT],
+ client_plugin=self.client_plugin('keystone'),
+ finder='get_project_id')
+ ]
+
+ def handle_create(self):
+ self._set_quota()
+ self.resource_id_set(self.physical_resource_name())
+
+ def handle_update(self, json_snippet, tmpl_diff, prop_diff):
+ self._set_quota(json_snippet.properties(self.properties_schema,
+ self.context))
+
+ def _set_quota(self, props=None):
+ if props is None:
+ props = self.properties
+
+ kwargs = dict((k, v) for k, v in props.items()
+ if k != self.PROJECT and v is not None)
+ self.client().quotas.update(props.get(self.PROJECT), **kwargs)
+
+ def handle_delete(self):
+ self.client().quotas.delete(self.properties[self.PROJECT])
+
+ def validate(self):
+ super(OctaviaQuota, self).validate()
+ if sum(1 for p in self.properties.values() if p is not None) <= 1:
+ raise exception.PropertyUnspecifiedError(
+ *sorted(set(self.PROPERTIES) - {self.PROJECT}))
+
+
+def resource_mapping():
+ return {
+ 'OS::Octavia::Quota': OctaviaQuota
+ }
diff --git a/heat/engine/resources/openstack/sahara/job.py b/heat/engine/resources/openstack/sahara/job.py
index 711ee4943..cda967946 100644
--- a/heat/engine/resources/openstack/sahara/job.py
+++ b/heat/engine/resources/openstack/sahara/job.py
@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
@@ -182,7 +180,8 @@ class SaharaJob(signal_responder.SignalResponder, resource.Resource):
DEFAULT_EXECUTION_URL: attributes.Schema(
_("A signed url to create execution specified in "
"default_execution_data property."),
- type=attributes.Schema.STRING
+ type=attributes.Schema.STRING,
+ cache_mode=attributes.Schema.CACHE_NONE
),
EXECUTIONS: attributes.Schema(
_("List of the job executions."),
@@ -298,7 +297,7 @@ class SaharaJob(signal_responder.SignalResponder, resource.Resource):
def _resolve_attribute(self, name):
if name == self.DEFAULT_EXECUTION_URL:
- return six.text_type(self._get_ec2_signed_url())
+ return str(self._get_ec2_signed_url(never_expire=True))
elif name == self.EXECUTIONS:
try:
job_execs = self.client().job_executions.find(
diff --git a/heat/engine/resources/openstack/sahara/templates.py b/heat/engine/resources/openstack/sahara/templates.py
index 376717227..0f52a2a44 100644
--- a/heat/engine/resources/openstack/sahara/templates.py
+++ b/heat/engine/resources/openstack/sahara/templates.py
@@ -14,7 +14,6 @@
# limitations under the License.
import re
-import six
from oslo_log import log as logging
from oslo_utils import encodeutils
@@ -284,7 +283,7 @@ class SaharaNodeGroupTemplate(resource.Resource):
return props
def handle_create(self):
- props = dict((k, v) for k, v in six.iteritems(self.properties))
+ props = dict((k, v) for k, v in self.properties.items())
args = self._prepare_properties(props)
node_group_template = self.client().node_group_templates.create(**args)
LOG.info("Node Group Template '%s' has been created",
@@ -335,7 +334,7 @@ class SaharaNodeGroupTemplate(resource.Resource):
self.properties[self.PLUGIN_NAME],
self.properties[self.HADOOP_VERSION])
allowed_processes = [item for sublist in
- list(six.itervalues(plugin.node_processes))
+ list(plugin.node_processes.values())
for item in sublist]
unsupported_processes = []
for process in self.properties[self.NODE_PROCESSES]:
@@ -564,7 +563,7 @@ class SaharaClusterTemplate(resource.Resource):
return props
def handle_create(self):
- props = dict((k, v) for k, v in six.iteritems(self.properties))
+ props = dict((k, v) for k, v in self.properties.items())
args = self._prepare_properties(props)
cluster_template = self.client().cluster_templates.create(**args)
LOG.info("Cluster Template '%s' has been created",
diff --git a/heat/engine/resources/openstack/senlin/cluster.py b/heat/engine/resources/openstack/senlin/cluster.py
index 371e12a99..02c59fa64 100644
--- a/heat/engine/resources/openstack/senlin/cluster.py
+++ b/heat/engine/resources/openstack/senlin/cluster.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
@@ -218,12 +216,11 @@ class Cluster(res_base.BaseSenlinResource):
}
cluster = self.client().create_cluster(**params)
- action_id = cluster.location.split('/')[-1]
self.resource_id_set(cluster.id)
# for cluster creation, we just to check the action status
# the action is executed above
action = {
- 'action_id': action_id,
+ 'cluster_id': cluster.id,
'done': False,
}
actions.append(action)
@@ -250,18 +247,15 @@ class Cluster(res_base.BaseSenlinResource):
if self.resource_id is not None:
with self.client_plugin().ignore_not_found:
self.client().delete_cluster(self.resource_id)
- return self.resource_id
+ return self.resource_id
def check_delete_complete(self, resource_id):
- if not resource_id:
- return True
+ if resource_id:
+ with self.client_plugin().ignore_not_found:
+ self.client().get_cluster(self.resource_id)
+ return False
- try:
- self.client().get_cluster(self.resource_id)
- except Exception as ex:
- self.client_plugin().ignore_not_found(ex)
- return True
- return False
+ return True
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
UPDATE_PROPS = (self.NAME, self.METADATA, self.TIMEOUT, self.PROFILE)
@@ -323,7 +317,7 @@ class Cluster(res_base.BaseSenlinResource):
actions.append(action)
# Update cluster
if any(p in prop_diff for p in UPDATE_PROPS):
- params = dict((k, v) for k, v in six.iteritems(prop_diff)
+ params = dict((k, v) for k, v in prop_diff.items()
if k in UPDATE_PROPS)
params['cluster'] = cluster_obj
if self.PROFILE in params:
@@ -337,7 +331,7 @@ class Cluster(res_base.BaseSenlinResource):
actions.append(action)
# Resize Cluster
if any(p in prop_diff for p in RESIZE_PROPS):
- params = dict((k, v) for k, v in six.iteritems(prop_diff)
+ params = dict((k, v) for k, v in prop_diff.items()
if k in RESIZE_PROPS)
if self.DESIRED_CAPACITY in params:
params['adjustment_type'] = 'EXACT_CAPACITY'
diff --git a/heat/engine/resources/openstack/senlin/node.py b/heat/engine/resources/openstack/senlin/node.py
index c5609f814..1809eb777 100644
--- a/heat/engine/resources/openstack/senlin/node.py
+++ b/heat/engine/resources/openstack/senlin/node.py
@@ -130,18 +130,15 @@ class Node(res_base.BaseSenlinResource):
if self.resource_id is not None:
with self.client_plugin().ignore_not_found:
self.client().delete_node(self.resource_id)
- return self.resource_id
+ return self.resource_id
def check_delete_complete(self, res_id):
- if not res_id:
- return True
-
- try:
- self.client().get_node(self.resource_id)
- except Exception as ex:
- self.client_plugin().ignore_not_found(ex)
- return True
- return False
+ if res_id:
+ with self.client_plugin().ignore_not_found:
+ self.client().get_node(self.resource_id)
+ return False
+
+ return True
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
actions = []
diff --git a/heat/engine/resources/openstack/senlin/policy.py b/heat/engine/resources/openstack/senlin/policy.py
index 8f6991d90..d225e85f1 100644
--- a/heat/engine/resources/openstack/senlin/policy.py
+++ b/heat/engine/resources/openstack/senlin/policy.py
@@ -107,7 +107,7 @@ class Policy(res_base.BaseSenlinResource):
def remove_bindings(self, bindings):
for bd in bindings:
try:
- bd['action'] = self.client().cluster_detach_policy(
+ bd['action'] = self.client().detach_policy_from_cluster(
bd[self.BD_CLUSTER], self.resource_id)['action']
bd['finished'] = False
except Exception as ex:
@@ -120,7 +120,7 @@ class Policy(res_base.BaseSenlinResource):
def add_bindings(self, bindings):
for bd in bindings:
- bd['action'] = self.client().cluster_attach_policy(
+ bd['action'] = self.client().attach_policy_to_cluster(
bd[self.BD_CLUSTER], self.resource_id,
enabled=bd[self.BD_ENABLED])['action']
bd['finished'] = False
@@ -169,10 +169,13 @@ class Policy(res_base.BaseSenlinResource):
return self.check_action_done(bindings)
def handle_delete(self):
+ if not self.resource_id:
+ return
+
return copy.deepcopy(self.properties[self.BINDINGS])
def check_delete_complete(self, bindings):
- if not self.resource_id:
+ if not bindings:
return True
self.remove_bindings(bindings)
if self.check_action_done(bindings):
diff --git a/heat/engine/resources/openstack/swift/container.py b/heat/engine/resources/openstack/swift/container.py
index 53ed4b3db..948be4cff 100644
--- a/heat/engine/resources/openstack/swift/container.py
+++ b/heat/engine/resources/openstack/swift/container.py
@@ -12,8 +12,7 @@
# under the License.
from oslo_log import log as logging
-import six
-from six.moves.urllib import parse as urlparse
+from urllib import parse
from heat.common import exception
from heat.common.i18n import _
@@ -224,10 +223,10 @@ class SwiftContainer(resource.Resource):
self.client().get_container(self.resource_id)
def get_reference_id(self):
- return six.text_type(self.resource_id)
+ return str(self.resource_id)
def _resolve_attribute(self, key):
- parsed = list(urlparse.urlparse(self.client().url))
+ parsed = list(parse.urlparse(self.client().url))
if key == self.DOMAIN_NAME:
return parsed[1].split(':')[0]
elif key == self.WEBSITE_URL:
diff --git a/heat/engine/resources/openstack/trove/cluster.py b/heat/engine/resources/openstack/trove/cluster.py
index ced52fe30..8705cb698 100644
--- a/heat/engine/resources/openstack/trove/cluster.py
+++ b/heat/engine/resources/openstack/trove/cluster.py
@@ -63,9 +63,9 @@ class TroveCluster(resource.Resource):
)
_INSTANCE_KEYS = (
- FLAVOR, VOLUME_SIZE, NETWORKS,
+ FLAVOR, VOLUME_SIZE, NETWORKS, AVAILABILITY_ZONE,
) = (
- 'flavor', 'volume_size', 'networks',
+ 'flavor', 'volume_size', 'networks', 'availability_zone',
)
_NICS_KEYS = (
@@ -169,6 +169,11 @@ class TroveCluster(resource.Resource):
},
),
),
+ AVAILABILITY_ZONE: properties.Schema(
+ properties.Schema.STRING,
+ _('Name of the availability zone for DB instance.'),
+ support_status=support.SupportStatus(version='14.0.0'),
+ ),
}
)
),
@@ -230,6 +235,9 @@ class TroveCluster(resource.Resource):
instance_nics = self.get_instance_nics(instance)
if instance_nics:
instance_dict["nics"] = instance_nics
+ instance_availability_zone = instance[self.AVAILABILITY_ZONE]
+ if instance_availability_zone:
+ instance_dict["availability_zone"] = instance_availability_zone
instances.append(instance_dict)
args = {
diff --git a/heat/engine/resources/openstack/trove/instance.py b/heat/engine/resources/openstack/trove/instance.py
index ba764daf3..fd143aff4 100644
--- a/heat/engine/resources/openstack/trove/instance.py
+++ b/heat/engine/resources/openstack/trove/instance.py
@@ -12,7 +12,6 @@
# under the License.
from oslo_log import log as logging
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -366,7 +365,7 @@ class Instance(resource.Resource):
nic_dict['v4-fixed-ip'] = ip
nics.append(nic_dict)
- # create db instance
+ # create DB instance
instance = self.client().instances.create(
self._dbinstance_name(),
self.flavor,
@@ -501,9 +500,9 @@ class Instance(resource.Resource):
# we retrieve it and try to update it so check again
if self.client_plugin().is_over_limit(exc):
LOG.debug("API rate limit: %(ex)s. Retrying.",
- {'ex': six.text_type(exc)})
+ {'ex': str(exc)})
return False
- if "No change was requested" in six.text_type(exc):
+ if "No change was requested" in str(exc):
LOG.warning("Unexpected instance state change "
"during update. Retrying.")
return False
@@ -518,8 +517,8 @@ class Instance(resource.Resource):
def _update_flavor(self, instance, new_flavor):
if new_flavor:
- current_flav = six.text_type(instance.flavor['id'])
- new_flav = six.text_type(new_flavor)
+ current_flav = str(instance.flavor['id'])
+ new_flav = str(new_flavor)
if new_flav != current_flav:
dmsg = "Resizing instance flavor from %(old)s to %(new)s"
LOG.debug(dmsg % {"old": current_flav, "new": new_flav})
diff --git a/heat/engine/resources/openstack/vitrage/__init__.py b/heat/engine/resources/openstack/vitrage/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/engine/resources/openstack/vitrage/__init__.py
diff --git a/heat/engine/resources/openstack/vitrage/vitrage_template.py b/heat/engine/resources/openstack/vitrage/vitrage_template.py
new file mode 100644
index 000000000..6f9b6c125
--- /dev/null
+++ b/heat/engine/resources/openstack/vitrage/vitrage_template.py
@@ -0,0 +1,135 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log as logging
+
+from heat.common import exception
+from heat.common.i18n import _
+from heat.engine import properties
+from heat.engine import resource
+from heat.engine import support
+
+LOG = logging.getLogger(__name__)
+
+
+class VitrageTemplate(resource.Resource):
+ """A resource for managing Vitrage templates.
+
+ A Vitrage template defines conditions and actions, based on the Vitrage
+ topology graph. For example, if there is an "instance down" alarm on an
+ instance, then execute a Mistral healing workflow.
+
+ The VitrageTemplate resource generates and adds to Vitrage a template based
+ on the input parameters.
+ """
+
+ default_client_name = "vitrage"
+
+ support_status = support.SupportStatus(version='16.0.0')
+
+ TEMPLATE_NAME = 'template_name'
+
+ PROPERTIES = (
+ TEMPLATE_FILE, TEMPLATE_PARAMS
+ ) = (
+ 'template_file', 'template_params'
+ )
+
+ properties_schema = {
+ TEMPLATE_FILE: properties.Schema(
+ properties.Schema.STRING,
+ _("Path of the Vitrage template to use."),
+ required=True,
+ ),
+ TEMPLATE_PARAMS: properties.Schema(
+ properties.Schema.MAP,
+ _("Input parameters for the Vitrage template."),
+ required=True,
+ ),
+ }
+
+ def handle_create(self):
+ """Create a Vitrage template."""
+
+ # Add the new template to Vitrage
+ params = self.properties[self.TEMPLATE_PARAMS]
+ params[self.TEMPLATE_NAME] = self.physical_resource_name()
+ params['description'] = self.properties.get('description')
+
+ LOG.debug('Vitrage params for template add: %s', params)
+
+ added_templates = self.client().template.add(
+ template_str=self.properties[self.TEMPLATE_FILE], params=params)
+
+ if added_templates and len(added_templates) > 0:
+ if added_templates[0].get('status') == 'LOADING':
+ self.resource_id_set(added_templates[0].get('uuid'))
+ LOG.debug('Added Vitrage template: %s',
+ str(added_templates[0].get('uuid')))
+ else:
+ LOG.warning("Failed to add template to Vitrage: %s",
+ added_templates[0].get('status details'))
+ else:
+ LOG.warning("Failed to add template to Vitrage")
+
+ def handle_delete(self):
+ """Delete the Vitrage template."""
+ if not self.resource_id:
+ return
+ LOG.debug('Deleting Vitrage template %s', self.resource_id)
+ self.client().template.delete(self.resource_id)
+
+ def validate(self):
+ """Validate a Vitrage template."""
+ super(VitrageTemplate, self).validate()
+
+ try:
+ params = self.properties[self.TEMPLATE_PARAMS]
+ params[self.TEMPLATE_NAME] = self.physical_resource_name()
+ params['description'] = self.properties.get('description')
+
+ for key, value in params.items():
+ if value is None:
+ # some values depend on creation of other objects, which
+ # was not done yet. Use temporary values for now.
+ params[key] = 'temp'
+
+ LOG.debug('Vitrage params for template validate: %s', params)
+
+ validation = self.client().template.validate(
+ template_str=self.properties[self.TEMPLATE_FILE],
+ params=params)
+
+ except Exception as e:
+ msg = _("Exception when calling Vitrage template validate: %s") % \
+ e.message
+ raise exception.StackValidationFailed(message=msg)
+
+ if not validation or not validation.get('results') or \
+ len(validation['results']) != 1 or \
+ 'status code' not in validation['results'][0]:
+ msg = _("Failed to validate Vitrage template %s") % \
+ self.TEMPLATE_FILE
+ raise exception.StackValidationFailed(message=msg)
+
+ result = validation['results'][0]
+ if result['status code'] != 0:
+ msg = _("Failed to validate Vitrage template. Error: %s") % \
+ result.get('message')
+ raise exception.StackValidationFailed(message=msg)
+
+
+def resource_mapping():
+ return {
+ 'OS::Vitrage::Template': VitrageTemplate,
+ }
diff --git a/heat/engine/resources/openstack/zaqar/queue.py b/heat/engine/resources/openstack/zaqar/queue.py
index a242809cb..3ebaca846 100644
--- a/heat/engine/resources/openstack/zaqar/queue.py
+++ b/heat/engine/resources/openstack/zaqar/queue.py
@@ -20,7 +20,7 @@ from heat.engine import properties
from heat.engine import resource
from heat.engine import support
-from six.moves.urllib import parse as urlparse
+from urllib import parse
class ZaqarQueue(resource.Resource):
@@ -120,7 +120,7 @@ class ZaqarQueue(resource.Resource):
queue_name = self.physical_resource_name()
return '%s/v%s/queues/%s' % (client.api_url.rstrip('/'),
client.api_version,
- urlparse.quote(queue_name))
+ parse.quote(queue_name))
def _resolve_attribute(self, name):
if name == self.QUEUE_ID:
@@ -243,7 +243,7 @@ class ZaqarSignedQueueURL(resource.Resource):
'project_id': data[self.PROJECT],
'queue_name': self.properties[self.QUEUE],
}
- return urlparse.urlencode(query)
+ return parse.urlencode(query)
def handle_delete(self):
# We can't delete a signed URL
diff --git a/heat/engine/resources/openstack/zun/container.py b/heat/engine/resources/openstack/zun/container.py
index 9dd8842e1..d87dfcfc2 100644
--- a/heat/engine/resources/openstack/zun/container.py
+++ b/heat/engine/resources/openstack/zun/container.py
@@ -39,12 +39,12 @@ class Container(resource.Resource,
NAME, IMAGE, COMMAND, CPU, MEMORY,
ENVIRONMENT, WORKDIR, LABELS, IMAGE_PULL_POLICY,
RESTART_POLICY, INTERACTIVE, IMAGE_DRIVER, HINTS,
- HOSTNAME, SECURITY_GROUPS, MOUNTS, NETWORKS,
+ HOSTNAME, SECURITY_GROUPS, MOUNTS, NETWORKS, TTY,
) = (
'name', 'image', 'command', 'cpu', 'memory',
'environment', 'workdir', 'labels', 'image_pull_policy',
'restart_policy', 'interactive', 'image_driver', 'hints',
- 'hostname', 'security_groups', 'mounts', 'networks',
+ 'hostname', 'security_groups', 'mounts', 'networks', 'tty',
)
_NETWORK_KEYS = (
@@ -129,6 +129,11 @@ class Container(resource.Resource,
properties.Schema.BOOLEAN,
_('Keep STDIN open even if not attached.'),
),
+ TTY: properties.Schema(
+ properties.Schema.BOOLEAN,
+ _('Whether the container allocates a TTY for itself.'),
+ support_status=support.SupportStatus(version='14.0.0'),
+ ),
IMAGE_DRIVER: properties.Schema(
properties.Schema.STRING,
_('The image driver to use to pull container image.'),
@@ -320,7 +325,12 @@ class Container(resource.Resource,
command = args.pop(self.COMMAND, None)
if command:
args['command'] = shlex.split(command)
- container = self.client().containers.run(**args)
+
+ if self.TTY in args:
+ container = self.client(
+ version=self.client_plugin().V1_36).containers.run(**args)
+ else:
+ container = self.client().containers.run(**args)
self.resource_id_set(container.uuid)
return container.uuid
diff --git a/heat/engine/resources/server_base.py b/heat/engine/resources/server_base.py
index 983efe298..50a04e0a2 100644
--- a/heat/engine/resources/server_base.py
+++ b/heat/engine/resources/server_base.py
@@ -61,14 +61,15 @@ class BaseServer(stack_user.StackUser):
return container_name, object_name
+ def _get_region_name(self):
+ return self.client_plugin()._get_region_name()
+
def _populate_deployments_metadata(self, meta, props):
meta['deployments'] = meta.get('deployments', [])
meta['os-collect-config'] = meta.get('os-collect-config', {})
occ = meta['os-collect-config']
collectors = list(self.default_collectors)
occ['collectors'] = collectors
- region_name = (self.context.region_name or
- cfg.CONF.region_name_for_services)
# set existing values to None to override any boot-time config
occ_keys = ('heat', 'zaqar', 'cfn', 'request')
@@ -84,11 +85,12 @@ class BaseServer(stack_user.StackUser):
occ.update({'heat': {
'user_id': self._get_user_id(),
'password': self.password,
- 'auth_url': self.context.auth_url,
+ 'auth_url': self.keystone().server_keystone_endpoint_url(
+ fallback_endpoint=self.context.auth_url),
'project_id': self.stack.stack_user_project_id,
'stack_id': self.stack.identifier().stack_path(),
'resource_name': self.name,
- 'region_name': region_name}})
+ 'region_name': self._get_region_name()}})
collectors.append('heat')
elif self.transport_zaqar_message(props):
@@ -96,10 +98,11 @@ class BaseServer(stack_user.StackUser):
occ.update({'zaqar': {
'user_id': self._get_user_id(),
'password': self.password,
- 'auth_url': self.context.auth_url,
+ 'auth_url': self.keystone().server_keystone_endpoint_url(
+ fallback_endpoint=self.context.auth_url),
'project_id': self.stack.stack_user_project_id,
'queue_id': queue_id,
- 'region_name': region_name}})
+ 'region_name': self._get_region_name()}})
collectors.append('zaqar')
elif self.transport_poll_server_cfn(props):
@@ -237,30 +240,12 @@ class BaseServer(stack_user.StackUser):
def _update_software_config_transport(self, prop_diff):
if not self.user_data_software_config():
return
- try:
- metadata = self.metadata_get(True) or {}
- self._create_transport_credentials(prop_diff)
- self._populate_deployments_metadata(metadata, prop_diff)
- # push new metadata to all sources by creating a dummy
- # deployment
- sc = self.rpc_client().create_software_config(
- self.context, 'ignored', 'ignored', '')
- sd = self.rpc_client().create_software_deployment(
- self.context, self.resource_id, sc['id'])
- self.rpc_client().delete_software_deployment(
- self.context, sd['id'])
- self.rpc_client().delete_software_config(
- self.context, sc['id'])
- except Exception:
- # Updating the software config transport is on a best-effort
- # basis as any raised exception here would result in the resource
- # going into an ERROR state, which will be replaced on the next
- # stack update. This is not desirable for a server. The old
- # transport will continue to work, and the new transport may work
- # despite exceptions in the above block.
- LOG.exception(
- 'Error while updating software config transport'
- )
+ self._delete_queue()
+ self._delete_temp_url()
+
+ metadata = self.metadata_get(True) or {}
+ self._create_transport_credentials(prop_diff)
+ self._populate_deployments_metadata(metadata, prop_diff)
def metadata_update(self, new_metadata=None):
"""Refresh the metadata if new_metadata is None."""
@@ -288,25 +273,33 @@ class BaseServer(stack_user.StackUser):
object_name = self.data().get('metadata_object_name')
if not object_name:
return
- with self.client_plugin('swift').ignore_not_found:
- container = self.properties[self.DEPLOYMENT_SWIFT_DATA].get(
- 'container')
- container = container or self.physical_resource_name()
- swift = self.client('swift')
- swift.delete_object(container, object_name)
- headers = swift.head_container(container)
- if int(headers['x-container-object-count']) == 0:
- swift.delete_container(container)
+ endpoint_exists = self.client_plugin().does_endpoint_exist(
+ 'swift', 'object-store')
+ if endpoint_exists:
+ with self.client_plugin('swift').ignore_not_found:
+ container = self.properties[self.DEPLOYMENT_SWIFT_DATA].get(
+ 'container')
+ container = container or self.physical_resource_name()
+ swift = self.client('swift')
+ swift.delete_object(container, object_name)
+ headers = swift.head_container(container)
+ if int(headers['x-container-object-count']) == 0:
+ swift.delete_container(container)
+ self.data_delete('metadata_object_name')
+ self.data_delete('metadata_put_url')
def _delete_queue(self):
queue_id = self.data().get('metadata_queue_id')
if not queue_id:
return
- client_plugin = self.client_plugin('zaqar')
- zaqar = client_plugin.create_for_tenant(
- self.stack.stack_user_project_id, self._user_token())
- with client_plugin.ignore_not_found:
- zaqar.queue(queue_id).delete()
+ endpoint_exists = self.client_plugin().does_endpoint_exist(
+ 'zaqar', 'messaging')
+ if endpoint_exists:
+ client_plugin = self.client_plugin('zaqar')
+ zaqar = client_plugin.create_for_tenant(
+ self.stack.stack_user_project_id, self._user_token())
+ with client_plugin.ignore_not_found:
+ zaqar.queue(queue_id).delete()
self.data_delete('metadata_queue_id')
def handle_snapshot_delete(self, state):
diff --git a/heat/engine/resources/signal_responder.py b/heat/engine/resources/signal_responder.py
index 23c1f9c76..d23730f6f 100644
--- a/heat/engine/resources/signal_responder.py
+++ b/heat/engine/resources/signal_responder.py
@@ -10,12 +10,13 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+from urllib import parse
from keystoneclient.contrib.ec2 import utils as ec2_utils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
-from six.moves.urllib import parse as urlparse
+from oslo_utils import timeutils
from heat.common import exception
from heat.common.i18n import _
@@ -93,6 +94,9 @@ class SignalResponder(stack_user.StackUser):
return self.properties.get(
self.SIGNAL_TRANSPORT) == self.ZAQAR_SIGNAL
+ def _get_region_name(self):
+ return self.client_plugin('heat')._get_region_name()
+
def _get_heat_signal_credentials(self):
"""Return OpenStack credentials that can be used to send a signal.
@@ -103,16 +107,17 @@ class SignalResponder(stack_user.StackUser):
if self.password is None:
self.password = password_gen.generate_openstack_password()
self._create_user()
- return {'auth_url': self.keystone().v3_endpoint,
+ return {'auth_url': self.keystone().server_keystone_endpoint_url(
+ fallback_endpoint=self.keystone().v3_endpoint),
'username': self.physical_resource_name(),
'user_id': self._get_user_id(),
'password': self.password,
'project_id': self.stack.stack_user_project_id,
'domain_id': self.keystone().stack_domain_id,
- 'region_name': (self.context.region_name or
- cfg.CONF.region_name_for_services)}
+ 'region_name': self._get_region_name()}
- def _get_ec2_signed_url(self, signal_type=SIGNAL):
+ def _get_ec2_signed_url(self, signal_type=SIGNAL,
+ never_expire=False):
"""Create properly formatted and pre-signed URL.
This uses the created user for the credentials.
@@ -121,10 +126,6 @@ class SignalResponder(stack_user.StackUser):
:param signal_type: either WAITCONDITION or SIGNAL.
"""
- stored = self.data().get('ec2_signed_url')
- if stored is not None:
- return stored
-
access_key = self.data().get('access_key')
secret_key = self.data().get('secret_key')
@@ -151,7 +152,7 @@ class SignalResponder(stack_user.StackUser):
endpoint = heat_client_plugin.get_heat_cfn_url()
signal_url = ''.join([endpoint, signal_type])
- host_url = urlparse.urlparse(signal_url)
+ host_url = parse.urlparse(signal_url)
path = self.identifier().arn_url_path()
@@ -159,25 +160,25 @@ class SignalResponder(stack_user.StackUser):
# processing in the CFN API (ec2token.py) has an unquoted path, so we
# need to calculate the signature with the path component unquoted, but
# ensure the actual URL contains the quoted version...
- unquoted_path = urlparse.unquote(host_url.path + path)
+ unquoted_path = parse.unquote(host_url.path + path)
+ params = {'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': '2',
+ 'AWSAccessKeyId': access_key}
+ if not never_expire:
+ params['Timestamp'] = timeutils.utcnow().strftime(
+ "%Y-%m-%dT%H:%M:%SZ")
request = {'host': host_url.netloc.lower(),
'verb': SIGNAL_VERB[signal_type],
'path': unquoted_path,
- 'params': {'SignatureMethod': 'HmacSHA256',
- 'SignatureVersion': '2',
- 'AWSAccessKeyId': access_key,
- 'Timestamp':
- self.created_time.strftime("%Y-%m-%dT%H:%M:%SZ")
- }}
+ 'params': params}
# Sign the request
signer = ec2_utils.Ec2Signer(secret_key)
request['params']['Signature'] = signer.generate(request)
- qs = urlparse.urlencode(request['params'])
+ qs = parse.urlencode(request['params'])
url = "%s%s?%s" % (signal_url.lower(),
path, qs)
- self.data_set('ec2_signed_url', url)
return url
def _delete_ec2_signed_url(self):
@@ -205,7 +206,7 @@ class SignalResponder(stack_user.StackUser):
if project_id is not None:
path = project_id + path[path.find('/'):]
- url = urlparse.urljoin(url, '%s/signal' % path)
+ url = parse.urljoin(url, '%s/signal' % path)
self.data_set('heat_signal_url', url)
return url
diff --git a/heat/engine/resources/stack_resource.py b/heat/engine/resources/stack_resource.py
index 7def946c5..5b491888a 100644
--- a/heat/engine/resources/stack_resource.py
+++ b/heat/engine/resources/stack_resource.py
@@ -18,7 +18,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import reflection
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -397,7 +396,7 @@ class StackResource(resource.Resource):
if not class_name.endswith('_Remote'):
return False
- full_message = six.text_type(ex)
+ full_message = str(ex)
if full_message.find('\n') > -1:
message, msg_trace = full_message.split('\n', 1)
else:
@@ -425,27 +424,12 @@ class StackResource(resource.Resource):
if action != expected_action:
return False
- # Has the action really started?
- #
- # The rpc call to update does not guarantee that the stack will be
- # placed into IN_PROGRESS by the time it returns (it runs stack.update
- # in a thread) so you could also have a situation where we get into
- # this method and the update hasn't even started.
- #
- # So we are using a mixture of state (action+status) and updated_at
- # to see if the action has actually progressed.
- # - very fast updates (like something with one RandomString) we will
- # probably miss the state change, but we should catch the updated_at.
- # - very slow updates we won't see the updated_at for quite a while,
- # but should see the state change.
- if cookie is not None:
- prev_state = cookie['previous']['state']
- prev_updated_at = cookie['previous']['updated_at']
- if (prev_updated_at == updated_time and
- prev_state == (action, status)):
- return False
-
if status == self.IN_PROGRESS:
+ if cookie is not None and 'fail_count' in cookie:
+ prev_status_reason = cookie['previous']['status_reason']
+ if status_reason != prev_status_reason:
+ # State has changed, so fail on the next failure
+ cookie['fail_count'] = 1
return False
elif status == self.COMPLETE:
# For operations where we do not take a resource lock
@@ -459,6 +443,10 @@ class StackResource(resource.Resource):
self._nested = None
return done
elif status == self.FAILED:
+ if cookie is not None and 'fail_count' in cookie:
+ cookie['fail_count'] -= 1
+ if cookie['fail_count'] > 0:
+ raise resource.PollDelay(10)
raise exception.ResourceFailure(status_reason, self,
action=action)
else:
@@ -534,9 +522,6 @@ class StackResource(resource.Resource):
action, status, status_reason, updated_time = status_data
kwargs = self._stack_kwargs(user_params, child_template)
- cookie = {'previous': {
- 'updated_at': updated_time,
- 'state': (action, status)}}
kwargs.update({
'stack_identity': dict(self.nested_identifier()),
@@ -550,7 +535,6 @@ class StackResource(resource.Resource):
with excutils.save_and_reraise_exception():
raw_template.RawTemplate.delete(self.context,
kwargs['template_id'])
- return cookie
def check_update_complete(self, cookie=None):
if cookie is not None and 'target_action' in cookie:
@@ -561,9 +545,10 @@ class StackResource(resource.Resource):
return self._check_status_complete(target_action,
cookie=cookie)
- def handle_update_cancel(self, cookie):
+ def _handle_cancel(self):
stack_identity = self.nested_identifier()
if stack_identity is not None:
+ LOG.debug('Cancelling %s of %s' % (self.action, self))
try:
self.rpc_client().stack_cancel_update(
self.context,
@@ -573,6 +558,12 @@ class StackResource(resource.Resource):
LOG.debug('Nested stack %s not in cancellable state',
stack_identity.stack_name)
+ def handle_preempt(self):
+ self._handle_cancel()
+
+ def handle_update_cancel(self, cookie):
+ self._handle_cancel()
+
def handle_create_cancel(self, cookie):
return self.handle_update_cancel(cookie)
@@ -582,12 +573,33 @@ class StackResource(resource.Resource):
if stack_identity is None:
return
+ cookie = None
+ if not self.stack.convergence:
+ try:
+ status_data = stack_object.Stack.get_status(self.context,
+ self.resource_id)
+ except exception.NotFound:
+ return
+
+ action, status, status_reason, updated_time = status_data
+ if (action, status) == (self.stack.DELETE,
+ self.stack.IN_PROGRESS):
+ cookie = {
+ 'previous': {
+ 'state': (action, status),
+ 'status_reason': status_reason,
+ 'updated_at': None,
+ },
+ 'fail_count': 2,
+ }
+
with self.rpc_client().ignore_error_by_name('EntityNotFound'):
if self.abandon_in_progress:
self.rpc_client().abandon_stack(self.context, stack_identity)
else:
self.rpc_client().delete_stack(self.context, stack_identity,
cast=False)
+ return cookie
def handle_delete(self):
return self.delete_nested()
diff --git a/heat/engine/resources/template_resource.py b/heat/engine/resources/template_resource.py
index 44959e966..67fb1e01c 100644
--- a/heat/engine/resources/template_resource.py
+++ b/heat/engine/resources/template_resource.py
@@ -14,7 +14,6 @@
from oslo_log import log as logging
from oslo_serialization import jsonutils
from requests import exceptions
-import six
from heat.common import exception
from heat.common import grouputils
@@ -98,7 +97,7 @@ class TemplateResource(stack_resource.StackResource):
try:
return urlfetch.get(template_name, allowed_schemes=allowed_schemes)
except (IOError, exceptions.RequestException) as r_exc:
- args = {'name': template_name, 'exc': six.text_type(r_exc)}
+ args = {'name': template_name, 'exc': str(r_exc)}
msg = _('Could not fetch remote template '
'"%(name)s": %(exc)s') % args
raise exception.NotFound(msg_fmt=msg)
@@ -290,7 +289,7 @@ class TemplateResource(stack_resource.StackResource):
def validate_template(self):
if self.validation_exception is not None:
- msg = six.text_type(self.validation_exception)
+ msg = str(self.validation_exception)
raise exception.StackValidationFailed(message=msg)
return super(TemplateResource, self).validate_template()
@@ -317,7 +316,7 @@ class TemplateResource(stack_resource.StackResource):
def get_reference_id(self):
if self.resource_id is None:
- return six.text_type(self.name)
+ return str(self.name)
if STACK_ID_OUTPUT in self.attributes.cached_attrs:
return self.attributes.cached_attrs[STACK_ID_OUTPUT]
diff --git a/heat/engine/resources/wait_condition.py b/heat/engine/resources/wait_condition.py
index 87d797237..74de91819 100644
--- a/heat/engine/resources/wait_condition.py
+++ b/heat/engine/resources/wait_condition.py
@@ -14,7 +14,6 @@
import collections
from oslo_log import log as logging
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -41,6 +40,15 @@ class BaseWaitConditionHandle(signal_responder.SignalResponder):
'SUCCESS',
)
+ def _get_ec2_signed_url(self, signal_type=signal_responder.WAITCONDITION):
+ stored = self.data().get('ec2_signed_url')
+ if stored is not None:
+ return stored
+ url = super(BaseWaitConditionHandle,
+ self)._get_ec2_signed_url(signal_type)
+ self.data_set('ec2_signed_url', url)
+ return url
+
def handle_create(self):
super(BaseWaitConditionHandle, self).handle_create()
self.resource_id_set(self._get_user_id())
@@ -49,7 +57,7 @@ class BaseWaitConditionHandle(signal_responder.SignalResponder):
return status in self.WAIT_STATUSES
def _metadata_format_ok(self, metadata):
- if not isinstance(metadata, collections.Mapping):
+ if not isinstance(metadata, collections.abc.Mapping):
return False
if set(metadata) != set(self.METADATA_KEYS):
return False
@@ -70,7 +78,7 @@ class BaseWaitConditionHandle(signal_responder.SignalResponder):
raise ValueError(_("Metadata format invalid"))
new_entry = signal_data.copy()
- unique_id = six.text_type(new_entry.pop(self.UNIQUE_ID))
+ unique_id = str(new_entry.pop(self.UNIQUE_ID))
new_rsrc_metadata = latest_rsrc_metadata.copy()
if unique_id in new_rsrc_metadata:
@@ -92,12 +100,12 @@ class BaseWaitConditionHandle(signal_responder.SignalResponder):
def get_status(self):
"""Return a list of the Status values for the handle signals."""
return [v[self.STATUS]
- for v in six.itervalues(self.metadata_get(refresh=True))]
+ for v in self.metadata_get(refresh=True).values()]
def get_status_reason(self, status):
"""Return a list of reasons associated with a particular status."""
return [v[self.REASON]
- for v in six.itervalues(self.metadata_get(refresh=True))
+ for v in self.metadata_get(refresh=True).values()
if v[self.STATUS] == status]
diff --git a/heat/engine/rsrc_defn.py b/heat/engine/rsrc_defn.py
index 6de61c373..04e12eed3 100644
--- a/heat/engine/rsrc_defn.py
+++ b/heat/engine/rsrc_defn.py
@@ -12,13 +12,11 @@
import collections
import copy
+import functools
import itertools
import operator
-import six
-
from heat.common import exception
-from heat.common.i18n import repr_wrapper
from heat.engine import function
from heat.engine import properties
@@ -37,7 +35,6 @@ FIELDS = (
)
-@repr_wrapper
class ResourceDefinition(object):
"""A definition of a resource, independent of any template format."""
@@ -113,22 +110,22 @@ class ResourceDefinition(object):
self._dep_names = None
self._all_dep_attrs = None
- assert isinstance(self.description, six.string_types)
+ assert isinstance(self.description, str)
if properties is not None:
- assert isinstance(properties, (collections.Mapping,
+ assert isinstance(properties, (collections.abc.Mapping,
function.Function))
self._hash ^= _hash_data(properties)
if metadata is not None:
- assert isinstance(metadata, (collections.Mapping,
+ assert isinstance(metadata, (collections.abc.Mapping,
function.Function))
self._hash ^= _hash_data(metadata)
if depends is not None:
- assert isinstance(depends, (collections.Sequence,
+ assert isinstance(depends, (collections.abc.Sequence,
function.Function))
- assert not isinstance(depends, six.string_types)
+ assert not isinstance(depends, str)
self._hash ^= _hash_data(depends)
if deletion_policy is not None:
@@ -136,18 +133,18 @@ class ResourceDefinition(object):
self._hash ^= _hash_data(deletion_policy)
if update_policy is not None:
- assert isinstance(update_policy, (collections.Mapping,
+ assert isinstance(update_policy, (collections.abc.Mapping,
function.Function))
self._hash ^= _hash_data(update_policy)
if external_id is not None:
- assert isinstance(external_id, (six.string_types,
+ assert isinstance(external_id, (str,
function.Function))
self._hash ^= _hash_data(external_id)
self._deletion_policy = self.RETAIN
if condition is not None:
- assert isinstance(condition, (six.string_types, bool,
+ assert isinstance(condition, (str, bool,
function.Function))
self._hash ^= _hash_data(condition)
@@ -257,9 +254,9 @@ class ResourceDefinition(object):
path(PROPERTIES))
metadata_deps = function.dependencies(self._metadata,
path(METADATA))
- implicit_depends = six.moves.map(lambda rp: rp.name,
- itertools.chain(prop_deps,
- metadata_deps))
+ implicit_depends = map(lambda rp: rp.name,
+ itertools.chain(prop_deps,
+ metadata_deps))
# (ricolin) External resource should not depend on any other
# resources. This operation is not allowed for now.
@@ -289,9 +286,7 @@ class ResourceDefinition(object):
if getattr(res, 'strict_dependency', True):
return res
- return six.moves.filter(None,
- six.moves.map(get_resource,
- self.required_resource_names()))
+ return filter(None, map(get_resource, self.required_resource_names()))
def set_translation_rules(self, rules=None, client_resolve=True):
"""Helper method to update properties with translation rules."""
@@ -306,7 +301,8 @@ class ResourceDefinition(object):
"""
props = properties.Properties(schema, self._properties or {},
function.resolve, context=context,
- section=PROPERTIES)
+ section=PROPERTIES,
+ rsrc_description=self.description)
props.update_translation(self._rules, self._client_resolve)
return props
@@ -435,12 +431,12 @@ def _hash_data(data):
if isinstance(data, function.Function):
data = copy.deepcopy(data)
- if not isinstance(data, six.string_types):
- if isinstance(data, collections.Sequence):
+ if not isinstance(data, str):
+ if isinstance(data, collections.abc.Sequence):
return hash(tuple(_hash_data(d) for d in data))
- if isinstance(data, collections.Mapping):
+ if isinstance(data, collections.abc.Mapping):
item_hashes = (hash(k) ^ _hash_data(v) for k, v in data.items())
- return six.moves.reduce(operator.xor, item_hashes, 0)
+ return functools.reduce(operator.xor, item_hashes, 0)
return hash(data)
diff --git a/heat/engine/scheduler.py b/heat/engine/scheduler.py
index 5c72d13a1..1e97343b1 100644
--- a/heat/engine/scheduler.py
+++ b/heat/engine/scheduler.py
@@ -11,17 +11,17 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
import sys
import types
+import debtcollector
import eventlet
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
-import six
from heat.common.i18n import _
-from heat.common.i18n import repr_wrapper
from heat.common import timeutils
LOG = logging.getLogger(__name__)
@@ -40,9 +40,9 @@ def task_description(task):
if name is not None and isinstance(task, (types.MethodType,
types.FunctionType)):
if getattr(task, '__self__', None) is not None:
- return '%s from %s' % (six.text_type(name), task.__self__)
+ return '%s from %s' % (str(name), task.__self__)
else:
- return six.text_type(name)
+ return str(name)
return encodeutils.safe_decode(repr(task))
@@ -57,7 +57,7 @@ class Timeout(BaseException):
def __init__(self, task_runner, timeout):
"""Initialise with the TaskRunner and a timeout period in seconds."""
- message = _('%s Timed out') % six.text_type(task_runner)
+ message = _('%s Timed out') % str(task_runner)
super(Timeout, self).__init__(message)
self._duration = timeutils.Duration(timeout)
@@ -91,7 +91,6 @@ class TimedCancel(Timeout):
return False
-@six.python_2_unicode_compatible
class ExceptionGroup(Exception):
"""Container for multiple exceptions.
@@ -111,7 +110,6 @@ class ExceptionGroup(Exception):
return str([str(ex) for ex in self.exceptions])
-@six.python_2_unicode_compatible
class TaskRunner(object):
"""Wrapper for a resumable task (co-routine)."""
@@ -142,12 +140,12 @@ class TaskRunner(object):
def __str__(self):
"""Return a human-readable string representation of the task."""
text = 'Task %s' % self.name
- return six.text_type(text)
+ return str(text)
def _sleep(self, wait_time):
"""Sleep for the specified number of seconds."""
if ENABLE_SLEEP and wait_time is not None:
- LOG.debug('%s sleeping', six.text_type(self))
+ LOG.debug('%s sleeping', str(self))
eventlet.sleep(wait_time)
def __call__(self, wait_time=1, timeout=None, progress_callback=None):
@@ -174,7 +172,7 @@ class TaskRunner(object):
assert self._runner is None, "Task already started"
assert not self._done, "Task already cancelled"
- LOG.debug('%s starting', six.text_type(self))
+ LOG.debug('%s starting', str(self))
if timeout is not None:
self._timeout = Timeout(self, timeout)
@@ -186,7 +184,7 @@ class TaskRunner(object):
else:
self._runner = False
self._done = True
- LOG.debug('%s done (not resumable)', six.text_type(self))
+ LOG.debug('%s done (not resumable)', str(self))
def step(self):
"""Run another step of the task.
@@ -206,15 +204,15 @@ class TaskRunner(object):
self._timeout.trigger(self._runner)
else:
- LOG.debug('%s running', six.text_type(self))
+ LOG.debug('%s running', str(self))
try:
poll_period = next(self._runner)
except StopIteration:
self._done = True
- LOG.debug('%s complete', six.text_type(self))
+ LOG.debug('%s complete', str(self))
else:
- if isinstance(poll_period, six.integer_types):
+ if isinstance(poll_period, int):
self._poll_period = max(poll_period, 1)
else:
self._poll_period = 1
@@ -270,7 +268,7 @@ class TaskRunner(object):
return
if not self.started() or grace_period is None:
- LOG.debug('%s cancelled', six.text_type(self))
+ LOG.debug('%s cancelled', str(self))
self._done = True
if self.started():
self._runner.close()
@@ -296,6 +294,10 @@ class TaskRunner(object):
return self.__nonzero__()
+@debtcollector.removals.remove(message="Use the Python 3 'yield from' keyword "
+ "in place of 'yield', instead of "
+ "decorating with @wrappertask.",
+ stacklevel=1)
def wrappertask(task):
"""Decorator for a task that needs to drive a subtask.
@@ -312,8 +314,11 @@ def wrappertask(task):
self.cleanup()
"""
- @six.wraps(task)
+ @functools.wraps(task)
def wrapper(*args, **kwargs):
+ # This could be simplified by using 'yield from' for the parent loop
+ # as well, but not without adding yet another frame to the stack
+ # for the subtasks.
parent = task(*args, **kwargs)
try:
@@ -324,28 +329,7 @@ def wrappertask(task):
while True:
try:
if isinstance(subtask, types.GeneratorType):
- subtask_running = True
- try:
- step = next(subtask)
- except StopIteration:
- subtask_running = False
-
- while subtask_running:
- try:
- yield step
- except GeneratorExit:
- subtask.close()
- raise
- except: # noqa
- try:
- step = subtask.throw(*sys.exc_info())
- except StopIteration:
- subtask_running = False
- else:
- try:
- step = next(subtask)
- except StopIteration:
- subtask_running = False
+ yield from subtask
else:
yield subtask
except GeneratorExit:
@@ -365,7 +349,6 @@ def wrappertask(task):
return wrapper
-@repr_wrapper
class DependencyTaskGroup(object):
"""Task which manages group of subtasks that have ordering dependencies."""
@@ -401,7 +384,7 @@ class DependencyTaskGroup(object):
if name is None:
name = '(%s) %s' % (getattr(task, '__name__',
task_description(task)),
- six.text_type(dependencies))
+ str(dependencies))
self.name = name
def __repr__(self):
@@ -415,7 +398,7 @@ class DependencyTaskGroup(object):
thrown_exceptions = []
try:
- while any(six.itervalues(self._runners)):
+ while any(self._runners.values()):
try:
for k, r in self._ready():
r.start()
@@ -425,36 +408,31 @@ class DependencyTaskGroup(object):
if self._graph:
try:
yield
- except Exception:
- thrown_exceptions.append(sys.exc_info())
+ except Exception as err:
+ thrown_exceptions.append(err)
raise
for k, r in self._running():
if r.step():
del self._graph[k]
- except Exception:
- exc_info = None
- try:
- exc_info = sys.exc_info()
- if self.aggregate_exceptions:
- self._cancel_recursively(k, r)
- else:
- self.cancel_all(grace_period=self.error_wait_time)
- raised_exceptions.append(exc_info)
- finally:
- del exc_info
+ except Exception as err:
+ if self.aggregate_exceptions:
+ self._cancel_recursively(k, r)
+ else:
+ self.cancel_all(grace_period=self.error_wait_time)
+ raised_exceptions.append(err)
except: # noqa
with excutils.save_and_reraise_exception():
self.cancel_all()
if raised_exceptions:
if self.aggregate_exceptions:
- raise ExceptionGroup(v for t, v, tb in raised_exceptions)
+ raise ExceptionGroup(err for err in raised_exceptions)
else:
if thrown_exceptions:
- six.reraise(*thrown_exceptions[-1])
+ raise thrown_exceptions[-1]
else:
- six.reraise(*raised_exceptions[0])
+ raise raised_exceptions[0]
finally:
del raised_exceptions
del thrown_exceptions
@@ -466,7 +444,7 @@ class DependencyTaskGroup(object):
def get_grace_period(key):
return grace_period
- for k, r in six.iteritems(self._runners):
+ for k, r in self._runners.items():
if not r.started() or r.done():
gp = None
else:
@@ -474,13 +452,13 @@ class DependencyTaskGroup(object):
try:
r.cancel(grace_period=gp)
except Exception as ex:
- LOG.debug('Exception cancelling task: %s', six.text_type(ex))
+ LOG.debug('Exception cancelling task: %s', str(ex))
def _cancel_recursively(self, key, runner):
try:
runner.cancel()
except Exception as ex:
- LOG.debug('Exception cancelling task: %s', six.text_type(ex))
+ LOG.debug('Exception cancelling task: %s', str(ex))
node = self._graph[key]
for dependent_node in node.required_by():
node_runner = self._runners[dependent_node]
@@ -510,4 +488,4 @@ class DependencyTaskGroup(object):
def running(k_r):
return k_r[0] in self._graph and k_r[1].started()
- return six.moves.filter(running, six.iteritems(self._runners))
+ return filter(running, self._runners.items())
diff --git a/heat/engine/service.py b/heat/engine/service.py
index a0e607aea..00c7f2c61 100644
--- a/heat/engine/service.py
+++ b/heat/engine/service.py
@@ -32,7 +32,6 @@ from oslo_service import threadgroup
from oslo_utils import timeutils
from oslo_utils import uuidutils
from osprofiler import profiler
-import six
import webob
from heat.common import context
@@ -219,7 +218,7 @@ class ThreadGroupManager(object):
if stack_id not in self.groups:
self.groups[stack_id] = threadgroup.ThreadGroup()
self.groups[stack_id].add_timer(cfg.CONF.periodic_interval,
- func, *args, **kwargs)
+ func, None, *args, **kwargs)
def add_msg_queue(self, stack_id, msg_queue):
self.msg_queues[stack_id].append(msg_queue)
@@ -251,7 +250,7 @@ class ThreadGroupManager(object):
for th in threads:
th.link(mark_done, th)
- while not all(six.itervalues(links_done)):
+ while not all(links_done.values()):
eventlet.sleep()
def send(self, stack_id, message):
@@ -674,6 +673,9 @@ class EngineService(service.ServiceBase):
raise exception.MissingCredentialError(required='X-Auth-Key')
def _validate_new_stack(self, cnxt, stack_name, parsed_template):
+ # We'll check that the stack name is unique in the tenant while
+ # storing it in the database to avoid races, but also check it here
+ # before validating so we can fail early.
if stack_object.Stack.get_by_name(cnxt, stack_name):
raise exception.StackExists(stack_name=stack_name)
@@ -693,7 +695,7 @@ class EngineService(service.ServiceBase):
except AssertionError:
raise
except Exception as ex:
- raise exception.StackValidationFailed(message=six.text_type(ex))
+ raise exception.StackValidationFailed(message=str(ex))
max_resources = cfg.CONF.max_resources_per_stack
if max_resources == -1:
@@ -832,7 +834,7 @@ class EngineService(service.ServiceBase):
stack.create_stack_user_project_id()
except exception.AuthorizationFailure as ex:
stack.state_set(stack.action, stack.FAILED,
- six.text_type(ex))
+ str(ex))
def _stack_create(stack, msg_queue=None):
# Create/Adopt a stack, and create the periodic task if successful
@@ -894,7 +896,7 @@ class EngineService(service.ServiceBase):
# stack definition. If PARAM_EXISTING is specified, we merge
# any environment provided into the existing one and attempt
# to use the existing stack template, if one is not provided.
- if args.get(rpc_api.PARAM_EXISTING):
+ if args.get(rpc_api.PARAM_EXISTING, False):
assert template_id is None, \
"Cannot specify template_id with PARAM_EXISTING"
@@ -974,9 +976,9 @@ class EngineService(service.ServiceBase):
common_params.setdefault(rpc_api.PARAM_CONVERGE,
current_stack.converge)
- if args.get(rpc_api.PARAM_EXISTING):
- common_params.setdefault(rpc_api.STACK_TAGS,
- current_stack.tags)
+ if args.get(rpc_api.PARAM_EXISTING, False):
+ if rpc_api.STACK_TAGS not in common_params:
+ common_params[rpc_api.STACK_TAGS] = current_stack.tags
current_kwargs.update(common_params)
updated_stack = parser.Stack(cnxt, stack_name, tmpl,
**current_kwargs)
@@ -1021,9 +1023,11 @@ class EngineService(service.ServiceBase):
LOG.info('Updating stack %s', db_stack.name)
if cfg.CONF.reauthentication_auth_method == 'trusts':
current_stack = parser.Stack.load(
- cnxt, stack=db_stack, use_stored_context=True)
+ cnxt, stack=db_stack, use_stored_context=True,
+ check_refresh_cred=True)
else:
- current_stack = parser.Stack.load(cnxt, stack=db_stack)
+ current_stack = parser.Stack.load(cnxt, stack=db_stack,
+ check_refresh_cred=True)
self.resource_enforcer.enforce_stack(current_stack,
is_registered_policy=True)
@@ -1286,7 +1290,7 @@ class EngineService(service.ServiceBase):
try:
self._validate_template(cnxt, tmpl)
except Exception as ex:
- return {'Error': six.text_type(ex)}
+ return {'Error': str(ex)}
stack_name = 'dummy'
stack = parser.Stack(cnxt, stack_name, tmpl,
@@ -1295,7 +1299,7 @@ class EngineService(service.ServiceBase):
stack.validate(ignorable_errors=ignorable_errors,
validate_res_tmpl_only=True)
except exception.StackValidationFailed as ex:
- return {'Error': six.text_type(ex)}
+ return {'Error': str(ex)}
def filter_parameter(p):
return p.name not in stack.parameters.PSEUDO_PARAMETERS
@@ -1640,7 +1644,7 @@ class EngineService(service.ServiceBase):
supported_funcs.update(tmpl_class.plugin.condition_functions)
functions = []
- for func_name, func in six.iteritems(supported_funcs):
+ for func_name, func in supported_funcs.items():
if func is not hot_functions.Removed:
desc = pydoc.splitdoc(pydoc.getdoc(func))[0]
functions.append(
@@ -1677,7 +1681,7 @@ class EngineService(service.ServiceBase):
raise exception.ResourceTypeUnavailable(
service_name=resource_class.default_client_name,
resource_type=type_name,
- reason=six.text_type(exc))
+ reason=str(exc))
else:
if not svc_available:
raise exception.ResourceTypeUnavailable(
@@ -2019,7 +2023,7 @@ class EngineService(service.ServiceBase):
stack = parser.Stack.load(cnxt, stack=s)
return [api.format_stack_resource(resource)
- for name, resource in six.iteritems(stack)
+ for name, resource in stack.items()
if resource_name is None or name == resource_name]
@context.request_context
@@ -2101,7 +2105,7 @@ class EngineService(service.ServiceBase):
if stack.status == stack.IN_PROGRESS:
LOG.info('%(stack)s is in state %(action)s_IN_PROGRESS, '
'snapshot is not permitted.', {
- 'stack': six.text_type(stack),
+ 'stack': str(stack),
'action': stack.action})
raise exception.ActionInProgress(stack_name=stack.name,
action=stack.action)
@@ -2294,15 +2298,15 @@ class EngineService(service.ServiceBase):
msg = _("Migration of nested stack %s") % stack_id
raise exception.NotSupported(feature=msg)
- if parent_stack.status != parent_stack.COMPLETE:
- raise exception.ActionNotComplete(stack_name=parent_stack.name,
- action=parent_stack.action)
-
if parent_stack.convergence:
LOG.info("Convergence was already enabled for stack %s",
stack_id)
return
+ if parent_stack.status != parent_stack.COMPLETE:
+ raise exception.ActionNotComplete(stack_name=parent_stack.name,
+ action=parent_stack.action)
+
db_stacks = stack_object.Stack.get_all_by_root_owner_id(
ctxt, parent_stack.id)
stacks = [parser.Stack.load(ctxt, stack_id=st.id,
@@ -2318,18 +2322,12 @@ class EngineService(service.ServiceBase):
try:
for st in stacks:
lock = stack_lock.StackLock(ctxt, st.id, self.engine_id)
- lock.acquire()
locks.append(lock)
- sess = ctxt.session
- sess.begin(subtransactions=True)
- try:
+ lock.acquire()
+ with ctxt.session.begin():
for st in stacks:
if not st.convergence:
st.migrate_to_convergence()
- sess.commit()
- except Exception:
- sess.rollback()
- raise
finally:
for lock in locks:
lock.release()
diff --git a/heat/engine/service_software_config.py b/heat/engine/service_software_config.py
index 82cb0f0af..637d7874e 100644
--- a/heat/engine/service_software_config.py
+++ b/heat/engine/service_software_config.py
@@ -11,14 +11,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+import itertools
import uuid
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import requests
-import six
-from six.moves.urllib import parse as urlparse
+from urllib import parse
from heat.common import crypt
from heat.common import exception
@@ -88,8 +88,7 @@ class SoftwareConfigService(object):
cnxt, server_id)
# filter out the sds with None config
- flt_sd = six.moves.filterfalse(lambda sd: sd.config is None,
- all_sd)
+ flt_sd = itertools.filterfalse(lambda sd: sd.config is None, all_sd)
# sort the configs by config name, to give the list of metadata a
# deterministic and controllable order.
flt_sd_s = sorted(flt_sd, key=lambda sd: sd.config.name)
@@ -153,7 +152,7 @@ class SoftwareConfigService(object):
raise exception.ConcurrentTransaction(action=action)
def _refresh_swift_software_deployment(self, cnxt, sd, deploy_signal_id):
- container, object_name = urlparse.urlparse(
+ container, object_name = parse.urlparse(
deploy_signal_id).path.split('/')[-2:]
swift_plugin = cnxt.clients.client_plugin('swift')
swift = swift_plugin.client()
@@ -281,7 +280,7 @@ class SoftwareConfigService(object):
'stack_user_project_id': stack_user_project_id,
'action': action,
'status': status,
- 'status_reason': six.text_type(status_reason)})
+ 'status_reason': str(status_reason)})
self._push_metadata_software_deployments(
cnxt, server_id, stack_user_project_id)
return api.format_software_deployment(sd)
@@ -332,7 +331,7 @@ class SoftwareConfigService(object):
if status == rpc_api.SOFTWARE_DEPLOYMENT_FAILED:
# build a status reason out of all of the values of outputs
# flagged as error_output
- status_reasons = [' : '.join((k, six.text_type(status_reasons[k])))
+ status_reasons = [' : '.join((k, str(status_reasons[k])))
for k in status_reasons]
status_reason = ', '.join(status_reasons)
else:
@@ -362,7 +361,7 @@ class SoftwareConfigService(object):
if status:
update_data['status'] = status
if status_reason:
- update_data['status_reason'] = six.text_type(status_reason)
+ update_data['status_reason'] = str(status_reason)
if updated_at:
update_data['updated_at'] = timeutils.normalize_time(
timeutils.parse_isotime(updated_at))
diff --git a/heat/engine/software_config_io.py b/heat/engine/software_config_io.py
index 8ca2d28f7..3e2906302 100644
--- a/heat/engine/software_config_io.py
+++ b/heat/engine/software_config_io.py
@@ -17,7 +17,6 @@ APIs for dealing with input and output definitions for Software Configurations.
import collections
import copy
-import six
from heat.common.i18n import _
@@ -104,7 +103,7 @@ class IOConfig(object):
try:
self._props.validate()
except exception.StackValidationFailed as exc:
- raise ValueError(six.text_type(exc))
+ raise ValueError(str(exc))
def name(self):
"""Return the name of the input or output."""
@@ -179,10 +178,14 @@ def check_io_schema_list(io_configs):
Raises TypeError if the list itself is not a list, or if any of the
members are not dicts.
"""
- if (not isinstance(io_configs, collections.Sequence) or
- isinstance(io_configs, collections.Mapping) or
- isinstance(io_configs, six.string_types)):
+ if (
+ not isinstance(io_configs, collections.abc.Sequence) or
+ isinstance(io_configs, collections.abc.Mapping) or
+ isinstance(io_configs, str)
+ ):
raise TypeError('Software Config I/O Schema must be in a list')
- if not all(isinstance(conf, collections.Mapping) for conf in io_configs):
+ if not all(
+ isinstance(conf, collections.abc.Mapping) for conf in io_configs
+ ):
raise TypeError('Software Config I/O Schema must be a dict')
diff --git a/heat/engine/stack.py b/heat/engine/stack.py
index ebaf1165b..829609f06 100644
--- a/heat/engine/stack.py
+++ b/heat/engine/stack.py
@@ -25,7 +25,6 @@ from oslo_utils import excutils
from oslo_utils import timeutils as oslo_timeutils
from oslo_utils import uuidutils
from osprofiler import profiler
-import six
from heat.common import context as common_context
from heat.common import environment_format as env_fmt
@@ -75,20 +74,20 @@ class ForcedCancel(Exception):
def reset_state_on_error(func):
- @six.wraps(func)
+ @functools.wraps(func)
def handle_exceptions(stack, *args, **kwargs):
errmsg = None
try:
return func(stack, *args, **kwargs)
except Exception as exc:
with excutils.save_and_reraise_exception():
- errmsg = six.text_type(exc)
+ errmsg = str(exc)
LOG.error('Unexpected exception in %(func)s: %(msg)s',
{'func': func.__name__, 'msg': errmsg})
except BaseException as exc:
with excutils.save_and_reraise_exception():
exc_type = type(exc).__name__
- errmsg = '%s(%s)' % (exc_type, six.text_type(exc))
+ errmsg = '%s(%s)' % (exc_type, str(exc))
LOG.info('Stopped due to %(msg)s in %(func)s',
{'func': func.__name__, 'msg': errmsg})
finally:
@@ -101,8 +100,7 @@ def reset_state_on_error(func):
return handle_exceptions
-@six.python_2_unicode_compatible
-class Stack(collections.Mapping):
+class Stack(collections.abc.Mapping):
ACTIONS = (
CREATE, DELETE, UPDATE, ROLLBACK, SUSPEND, RESUME, ADOPT,
@@ -128,7 +126,7 @@ class Stack(collections.Mapping):
nested_depth=0, strict_validate=True, convergence=False,
current_traversal=None, tags=None, prev_raw_template_id=None,
current_deps=None, cache_data=None,
- deleted_time=None, converge=False):
+ deleted_time=None, converge=False, refresh_cred=False):
"""Initialise the Stack.
@@ -173,6 +171,7 @@ class Stack(collections.Mapping):
self._access_allowed_handlers = {}
self._db_resources = None
self._tags = tags
+ self._tags_stored = False
self.adopt_stack_data = adopt_stack_data
self.stack_user_project_id = stack_user_project_id
self.created_time = created_time
@@ -182,7 +181,6 @@ class Stack(collections.Mapping):
self.nested_depth = nested_depth
self.convergence = convergence
self.current_traversal = current_traversal
- self.tags = tags
self.prev_raw_template_id = prev_raw_template_id
self.current_deps = current_deps
self._worker_client = None
@@ -190,6 +188,9 @@ class Stack(collections.Mapping):
self.thread_group_mgr = None
self.converge = converge
+ # This flag is use to check whether credential needs to refresh or not
+ self.refresh_cred = refresh_cred
+
# strict_validate can be used to disable value validation
# in the resource properties schema, this is useful when
# performing validation when properties reference attributes
@@ -224,14 +225,17 @@ class Stack(collections.Mapping):
@property
def tags(self):
if self._tags is None:
- tags = stack_tag_object.StackTagList.get(
- self.context, self.id)
- if tags:
- self._tags = [t.tag for t in tags]
+ if self.id is not None:
+ tags = stack_tag_object.StackTagList.get(self.context, self.id)
+ self._tags = [t.tag for t in tags] if tags else []
+ else:
+ self._tags = []
+ self._tags_stored = True
return self._tags
@tags.setter
def tags(self, value):
+ self._tags_stored = (value == self._tags)
self._tags = value
@property
@@ -334,7 +338,7 @@ class Stack(collections.Mapping):
resources = self._db_resources_get()
stk_def_cache = {}
- for rsc in six.itervalues(resources):
+ for rsc in resources.values():
loaded_res = self._resource_from_db_resource(rsc, stk_def_cache)
if loaded_res is not None:
yield loaded_res
@@ -517,14 +521,14 @@ class Stack(collections.Mapping):
"""
if self._dependencies is None:
deps = dependencies.Dependencies()
- for res in six.itervalues(self.resources):
+ for res in self.resources.values():
res.add_explicit_dependencies(deps)
self._dependencies = deps
return self._dependencies
def _add_implicit_dependencies(self, deps, ignore_errors=True):
"""Augment the given dependencies with implicit ones from plugins."""
- for res in six.itervalues(self.resources):
+ for res in self.resources.values():
try:
res.add_dependencies(deps)
except Exception as exc:
@@ -537,13 +541,38 @@ class Stack(collections.Mapping):
else:
LOG.warning('Ignoring error adding implicit '
'dependencies for %(res)s: %(err)s',
- {'res': six.text_type(res),
- 'err': six.text_type(exc)})
+ {'res': str(res),
+ 'err': str(exc)})
+
+ @classmethod
+ def _check_refresh_cred(cls, context, stack):
+ if stack.user_creds_id:
+ creds_obj = ucreds_object.UserCreds.get_by_id(
+ context, stack.user_creds_id)
+ creds = creds_obj.obj_to_primitive()["versioned_object.data"]
+ stored_context = common_context.StoredContext.from_dict(creds)
+
+ if cfg.CONF.deferred_auth_method == 'trusts':
+ old_trustor_proj_id = stored_context.tenant_id
+ old_trustor_user_id = stored_context.trustor_user_id
+
+ trustor_user_id = context.auth_plugin.get_user_id(
+ context.clients.client('keystone').session)
+ trustor_proj_id = context.auth_plugin.get_project_id(
+ context.clients.client('keystone').session)
+ return False if (
+ old_trustor_user_id == trustor_user_id) and (
+ old_trustor_proj_id == trustor_proj_id
+ ) else True
+
+ # Should not raise error or allow refresh credential when we can't find
+ # user_creds_id in stack
+ return False
@classmethod
def load(cls, context, stack_id=None, stack=None, show_deleted=True,
use_stored_context=False, force_reload=False, cache_data=None,
- load_template=True):
+ load_template=True, check_refresh_cred=False):
"""Retrieve a Stack from the database."""
if stack is None:
stack = stack_object.Stack.get_by_id(
@@ -554,13 +583,22 @@ class Stack(collections.Mapping):
message = _('No stack exists with id "%s"') % str(stack_id)
raise exception.NotFound(message)
+ refresh_cred = False
+ if check_refresh_cred and (
+ cfg.CONF.deferred_auth_method == 'trusts'
+ ):
+ if cls._check_refresh_cred(context, stack):
+ use_stored_context = False
+ refresh_cred = True
+
if force_reload:
stack.refresh()
return cls._from_db(context, stack,
use_stored_context=use_stored_context,
cache_data=cache_data,
- load_template=load_template)
+ load_template=load_template,
+ refresh_cred=refresh_cred)
@classmethod
def load_all(cls, context, limit=None, marker=None, sort_keys=None,
@@ -594,7 +632,7 @@ class Stack(collections.Mapping):
@classmethod
def _from_db(cls, context, stack,
use_stored_context=False, cache_data=None,
- load_template=True):
+ load_template=True, refresh_cred=False):
if load_template:
template = tmpl.Template.load(
context, stack.raw_template_id, stack.raw_template)
@@ -618,7 +656,8 @@ class Stack(collections.Mapping):
prev_raw_template_id=stack.prev_raw_template_id,
current_deps=stack.current_deps, cache_data=cache_data,
nested_depth=stack.nested_depth,
- deleted_time=stack.deleted_at)
+ deleted_time=stack.deleted_at,
+ refresh_cred=refresh_cred)
def get_kwargs_for_cloning(self, keep_status=False, only_db=False,
keep_tags=False):
@@ -652,7 +691,7 @@ class Stack(collections.Mapping):
stack.update({
'action': self.action,
'status': self.status,
- 'status_reason': six.text_type(self.status_reason)})
+ 'status_reason': str(self.status_reason)})
if only_db:
stack['parent_resource_name'] = self.parent_resource_name
@@ -686,6 +725,17 @@ class Stack(collections.Mapping):
s['raw_template_id'] = self.t.id
if self.id is not None:
+ if self.refresh_cred:
+ keystone = self.clients.client('keystone')
+ trust_ctx = keystone.regenerate_trust_context()
+ new_creds = ucreds_object.UserCreds.create(trust_ctx)
+ s['user_creds_id'] = new_creds.id
+
+ self._delete_user_cred(raise_keystone_exception=True)
+
+ self.user_creds_id = new_creds.id
+ self.refresh_cred = False
+
if exp_trvsl is None and not ignore_traversal_check:
exp_trvsl = self.current_traversal
@@ -713,21 +763,31 @@ class Stack(collections.Mapping):
self.user_creds_id = new_creds.id
if self.convergence:
- # create a traversal ID
- self.current_traversal = uuidutils.generate_uuid()
- s['current_traversal'] = self.current_traversal
+ # create a traversal ID
+ self.current_traversal = uuidutils.generate_uuid()
+ s['current_traversal'] = self.current_traversal
new_s = stack_object.Stack.create(self.context, s)
self.id = new_s.id
self.created_time = new_s.created_at
- if self.tags:
- stack_tag_object.StackTagList.set(self.context, self.id, self.tags)
+ self._store_tags()
self._set_param_stackid()
return self.id
+ def _store_tags(self):
+ if (self._tags is not None and
+ not self._tags_stored and
+ self.id is not None):
+ tags = self._tags
+ if tags:
+ stack_tag_object.StackTagList.set(self.context, self.id, tags)
+ else:
+ stack_tag_object.StackTagList.delete(self.context, self.id)
+ self._tags_stored = True
+
def _backup_name(self):
return '%s*' % self.name
@@ -788,7 +848,7 @@ class Stack(collections.Mapping):
def __str__(self):
"""Return a human-readable string representation of the stack."""
text = 'Stack "%s" [%s]' % (self.name, self.id)
- return six.text_type(text)
+ return str(text)
def resource_by_refid(self, refid):
"""Return the resource in this stack with the specified refid.
@@ -796,7 +856,7 @@ class Stack(collections.Mapping):
:returns: resource in this stack with the specified refid, or None if
not found.
"""
- for r in six.itervalues(self):
+ for r in self.values():
if r.state not in ((r.INIT, r.COMPLETE),
(r.CREATE, r.IN_PROGRESS),
(r.CREATE, r.COMPLETE),
@@ -893,7 +953,7 @@ class Stack(collections.Mapping):
else:
iter_rsc = self._explicit_dependencies()
- unique_defns = set(res.t for res in six.itervalues(resources))
+ unique_defns = set(res.t for res in resources.values())
unique_defn_names = set(defn.name for defn in unique_defns)
for res in iter_rsc:
@@ -926,7 +986,7 @@ class Stack(collections.Mapping):
raise exception.StackValidationFailed(message=result)
eventlet.sleep(0)
- for op_name, output in six.iteritems(self.outputs):
+ for op_name, output in self.outputs.items():
try:
output.validate()
except exception.StackValidationFailed as ex:
@@ -945,7 +1005,7 @@ class Stack(collections.Mapping):
during its lifecycle using the configured deferred authentication
method.
"""
- return any(res.requires_deferred_auth for res in six.itervalues(self))
+ return any(res.requires_deferred_auth for res in self.values())
def _add_event(self, action, status, reason):
"""Add a state change event to the database."""
@@ -1032,7 +1092,7 @@ class Stack(collections.Mapping):
if stack is not None:
values = {'action': self.action,
'status': self.status,
- 'status_reason': six.text_type(self.status_reason)}
+ 'status_reason': str(self.status_reason)}
self._send_notification_and_add_event()
if self.convergence:
# do things differently for convergence
@@ -1062,7 +1122,7 @@ class Stack(collections.Mapping):
if stack is not None:
values = {'action': self.action,
'status': self.status,
- 'status_reason': six.text_type(self.status_reason)}
+ 'status_reason': str(self.status_reason)}
self._send_notification_and_add_event()
stack.persist_state_and_release_lock(self.context, self.id,
engine_id, values)
@@ -1082,7 +1142,7 @@ class Stack(collections.Mapping):
def preview_resources(self):
"""Preview the stack with all of the resources."""
return [resource.preview()
- for resource in six.itervalues(self.resources)]
+ for resource in self.resources.values()]
def get_nested_parameters(self, filter_func):
"""Return nested parameters schema, if any.
@@ -1092,7 +1152,7 @@ class Stack(collections.Mapping):
stack.
"""
result = {}
- for name, rsrc in six.iteritems(self.resources):
+ for name, rsrc in self.resources.items():
nested = rsrc.get_nested_parameters_stack()
if nested is None:
continue
@@ -1144,7 +1204,6 @@ class Stack(collections.Mapping):
return {'resource_data': data['resources'].get(resource.name)}
- @scheduler.wrappertask
def stack_task(self, action, reverse=False, post_func=None,
aggregate_exceptions=False, pre_completion_func=None,
notify=None):
@@ -1169,7 +1228,7 @@ class Stack(collections.Mapping):
None, action)
except Exception as e:
self.state_set(action, self.FAILED, e.args[0] if e.args else
- 'Failed stack pre-ops: %s' % six.text_type(e))
+ 'Failed stack pre-ops: %s' % str(e))
if callable(post_func):
post_func()
if notify is not None:
@@ -1193,12 +1252,11 @@ class Stack(collections.Mapping):
lambda x: {})
@functools.wraps(getattr(resource.Resource, action_method))
- @scheduler.wrappertask
def resource_action(r):
# Find e.g resource.create and call it
handle = getattr(r, action_method)
- yield handle(**handle_kwargs(r))
+ yield from handle(**handle_kwargs(r))
if action == self.CREATE:
stk_defn.update_resource_data(self.defn, r.name, r.node_data())
@@ -1214,7 +1272,7 @@ class Stack(collections.Mapping):
aggregate_exceptions=aggregate_exceptions)
try:
- yield action_task()
+ yield from action_task()
except scheduler.Timeout:
stack_status = self.FAILED
reason = '%s timed out' % action.title()
@@ -1225,7 +1283,7 @@ class Stack(collections.Mapping):
# ExceptionGroup, but the raw exception.
# see scheduler.py line 395-399
stack_status = self.FAILED
- reason = 'Resource %s failed: %s' % (action, six.text_type(ex))
+ reason = 'Resource %s failed: %s' % (action, str(ex))
if pre_completion_func:
pre_completion_func(self, action, stack_status, reason)
@@ -1259,7 +1317,7 @@ class Stack(collections.Mapping):
return hasattr(res, 'handle_%s' % res.CHECK.lower())
all_supported = all(is_supported(res)
- for res in six.itervalues(self.resources))
+ for res in self.resources.values())
if not all_supported:
msg = ". '%s' not fully supported (see resources)" % self.CHECK
@@ -1303,7 +1361,7 @@ class Stack(collections.Mapping):
if not self.disable_rollback and self.state == (self.ADOPT,
self.FAILED):
# enter the same flow as abandon and just delete the stack
- for res in six.itervalues(self.resources):
+ for res in self.resources.values():
res.abandon_in_progress = True
self.delete(action=self.ROLLBACK, abandon=True)
@@ -1362,11 +1420,6 @@ class Stack(collections.Mapping):
self._set_param_stackid()
self.tags = new_stack.tags
- if new_stack.tags:
- stack_tag_object.StackTagList.set(self.context, self.id,
- new_stack.tags)
- else:
- stack_tag_object.StackTagList.delete(self.context, self.id)
self.action = action
self.status = self.IN_PROGRESS
@@ -1418,7 +1471,7 @@ class Stack(collections.Mapping):
try:
self.delete_all_snapshots()
except Exception as exc:
- self.state_set(self.action, self.FAILED, six.text_type(exc))
+ self.state_set(self.action, self.FAILED, str(exc))
self.purge_db()
return
@@ -1568,14 +1621,23 @@ class Stack(collections.Mapping):
return set(n.rsrc_id for n in dep_nodes if not n.is_update)
def reset_stack_and_resources_in_progress(self, reason):
- for name, rsrc in six.iteritems(self.resources):
+ for name, rsrc in self.resources.items():
if rsrc.status == rsrc.IN_PROGRESS:
rsrc.state_set(rsrc.action,
rsrc.FAILED,
- six.text_type(reason))
- self.state_set(self.action, self.FAILED, six.text_type(reason))
+ str(reason))
+ if self.action == self.UPDATE and not self.convergence:
+ backup_stack = self._backup_stack(False)
+ existing_params = environment.Environment({env_fmt.PARAMETERS:
+ self.t.env.params})
+ template = tmpl.Template.load(self.context,
+ self.prev_raw_template_id)
+ bkp_stack_template = backup_stack.t if backup_stack else None
+ self._merge_user_param_template(existing_params, template,
+ bkp_stack_template)
+
+ self.state_set(self.action, self.FAILED, str(reason))
- @scheduler.wrappertask
def update_task(self, newstack, action=UPDATE,
msg_queue=None, notify=None):
if action not in (self.UPDATE, self.ROLLBACK, self.RESTORE):
@@ -1591,7 +1653,7 @@ class Stack(collections.Mapping):
newstack, action)
except Exception as e:
self.state_set(action, self.FAILED, e.args[0] if e.args else
- 'Failed stack pre-ops: %s' % six.text_type(e))
+ 'Failed stack pre-ops: %s' % str(e))
if notify is not None:
notify.signal()
return
@@ -1651,17 +1713,15 @@ class Stack(collections.Mapping):
self._set_param_stackid()
self.tags = newstack.tags
- if newstack.tags:
- stack_tag_object.StackTagList.set(self.context, self.id,
- newstack.tags)
- else:
- stack_tag_object.StackTagList.delete(self.context, self.id)
+ # Stack is already store()d in IN_PROGRESS state, so write tags now
+ # otherwise new set won't appear until COMPLETE/FAILED.
+ self._store_tags()
check_message = functools.partial(self._check_for_message,
msg_queue)
try:
- yield updater.as_task(timeout=self.timeout_secs(),
- progress_callback=check_message)
+ yield from updater.as_task(timeout=self.timeout_secs(),
+ progress_callback=check_message)
finally:
self.reset_dependencies()
@@ -1677,7 +1737,7 @@ class Stack(collections.Mapping):
# so we roll back to the original state
should_rollback = self._update_exception_handler(e, action)
if should_rollback:
- yield self.update_task(oldstack, action=self.ROLLBACK)
+ yield from self.update_task(oldstack, action=self.ROLLBACK)
except BaseException as e:
with excutils.save_and_reraise_exception():
self._update_exception_handler(e, action)
@@ -1701,20 +1761,8 @@ class Stack(collections.Mapping):
self._log_status()
self._send_notification_and_add_event()
if self.status == self.FAILED:
- # Since template was incrementally updated based on existing
- # and new stack resources, we should have user params of both.
- existing_params.load(newstack.t.env.user_env_as_dict())
- self.t.env = existing_params
- # Update the template version, in case new things were used
- self.t.t[newstack.t.version[0]] = max(
- newstack.t.version[1], self.t.version[1])
- self.t.merge_snippets(newstack.t)
- self.t.store(self.context)
- backup_stack.t.env = existing_params
- backup_stack.t.t[newstack.t.version[0]] = max(
- newstack.t.version[1], self.t.version[1])
- backup_stack.t.merge_snippets(newstack.t)
- backup_stack.t.store(self.context)
+ self._merge_user_param_template(existing_params, newstack.t,
+ backup_stack.t)
self.store()
if previous_template_id is not None:
@@ -1725,6 +1773,24 @@ class Stack(collections.Mapping):
newstack, action,
(self.status == self.FAILED))
+ def _merge_user_param_template(self, existing_params, new_template,
+ bkp_stack_template):
+ # Since template was incrementally updated based on existing
+ # and new stack resources, we should have user params of both.
+ existing_params.load(new_template.env.user_env_as_dict())
+ self.t.env = existing_params
+ # Update the template version, in case new things were used
+ self.t.t[new_template.version[0]] = max(new_template.version[1],
+ self.t.version[1])
+ self.t.merge_snippets(new_template)
+ self.t.store(self.context)
+ if bkp_stack_template:
+ bkp_stack_template.env = existing_params
+ bkp_stack_template.t[new_template.version[0]] = max(
+ new_template.version[1], self.t.version[1])
+ bkp_stack_template.merge_snippets(new_template)
+ bkp_stack_template.store(self.context)
+
def _update_exception_handler(self, exc, action):
"""Handle exceptions in update_task.
@@ -1734,7 +1800,7 @@ class Stack(collections.Mapping):
:returns: a boolean for require rollback flag.
"""
- self.status_reason = six.text_type(exc)
+ self.status_reason = str(exc)
self.status = self.FAILED
if action != self.UPDATE:
return False
@@ -1778,7 +1844,7 @@ class Stack(collections.Mapping):
def copy_data(source_res, destination_res):
if source_res.data():
- for key, val in six.iteritems(source_res.data()):
+ for key, val in source_res.data().items():
destination_res.data_set(key, val)
for key, backup_res in stack.resources.items():
@@ -1823,11 +1889,10 @@ class Stack(collections.Mapping):
LOG.exception("Failed to retrieve user_creds")
return None
- def _delete_credentials(self, stack_status, reason, abandon):
+ def _delete_user_cred(self, stack_status=None, reason=None,
+ raise_keystone_exception=False):
# Cleanup stored user_creds so they aren't accessible via
# the soft-deleted stack which remains in the DB
- # The stack_status and reason passed in are current values, which
- # may get rewritten and returned from this method
if self.user_creds_id:
user_creds = self._try_get_user_creds()
# If we created a trust, delete it
@@ -1850,13 +1915,15 @@ class Stack(collections.Mapping):
else:
self.clients.client('keystone').delete_trust(
trust_id)
- except Exception as ex:
+ except Exception:
# We want the admin to be able to delete the stack
# Do not FAIL a delete when we cannot delete a trust.
# We already carry through and delete the credentials
# Without this, they would need to issue
# an additional stack-delete
LOG.exception("Error deleting trust")
+ if raise_keystone_exception:
+ raise
# Delete the stored credentials
try:
@@ -1866,13 +1933,18 @@ class Stack(collections.Mapping):
LOG.info("Tried to delete user_creds that do not exist "
"(stack=%(stack)s user_creds_id=%(uc)s)",
{'stack': self.id, 'uc': self.user_creds_id})
+ self.user_creds_id = None
+ return stack_status, reason
- try:
- self.user_creds_id = None
- self.store()
- except exception.NotFound:
- LOG.info("Tried to store a stack that does not exist %s",
- self.id)
+ def _delete_credentials(self, stack_status, reason, abandon):
+ # The stack_status and reason passed in are current values, which
+ # may get rewritten and returned from this method
+ stack_status, reason = self._delete_user_cred(stack_status, reason)
+ try:
+ self.store()
+ except exception.NotFound:
+ LOG.info("Tried to store a stack that does not exist %s",
+ self.id)
# If the stack has a domain project, delete it
if self.stack_user_project_id and not abandon:
@@ -1883,7 +1955,7 @@ class Stack(collections.Mapping):
except Exception as ex:
LOG.exception("Error deleting project")
stack_status = self.FAILED
- reason = "Error deleting project: %s" % six.text_type(ex)
+ reason = "Error deleting project: %s" % str(ex)
return stack_status, reason
@@ -1911,8 +1983,8 @@ class Stack(collections.Mapping):
stack_status = self.COMPLETE
reason = 'Stack %s completed successfully' % action
- self.state_set(action, self.IN_PROGRESS, 'Stack %s started' %
- action)
+ self.state_set(action, self.IN_PROGRESS, 'Stack %s started at %s' %
+ (action, oslo_timeutils.utcnow().isoformat()))
if notify is not None:
notify.signal()
@@ -1935,7 +2007,7 @@ class Stack(collections.Mapping):
except Exception as e:
self.state_set(action, self.FAILED,
e.args[0] if e.args else
- 'Failed stack pre-ops: %s' % six.text_type(e))
+ 'Failed stack pre-ops: %s' % str(e))
return
action_task = scheduler.DependencyTaskGroup(self.dependencies,
@@ -1945,7 +2017,7 @@ class Stack(collections.Mapping):
scheduler.TaskRunner(action_task)(timeout=self.timeout_secs())
except exception.ResourceFailure as ex:
stack_status = self.FAILED
- reason = 'Resource %s failed: %s' % (action, six.text_type(ex))
+ reason = 'Resource %s failed: %s' % (action, str(ex))
except scheduler.Timeout:
stack_status = self.FAILED
reason = '%s timed out' % action.title()
@@ -2070,7 +2142,7 @@ class Stack(collections.Mapping):
ss_defn = self.defn.clone_with_new_template(template,
self.identifier())
resources = self._resources_for_defn(ss_defn)
- for name, rsrc in six.iteritems(resources):
+ for name, rsrc in resources.items():
data = snapshot.data['resources'].get(name)
if data:
scheduler.TaskRunner(rsrc.delete_snapshot, data)()
@@ -2137,7 +2209,7 @@ class Stack(collections.Mapping):
'status': self.status,
'template': self.t.t,
'resources': dict((res.name, res.prepare_abandon())
- for res in six.itervalues(self.resources)),
+ for res in self.resources.values()),
'project_id': self.tenant_id,
'stack_user_project_id': self.stack_user_project_id,
'tags': self.tags,
@@ -2257,9 +2329,9 @@ class Stack(collections.Mapping):
requires = set(res_id_dep.requires(db_res.id))
r = self.resources.get(db_res.name)
if r is None:
- # delete db resources not in current_template_id
+ # delete DB resources not in current_template_id
LOG.warning("Resource %(res)s not found in template "
- "for stack %(st)s, deleting from db.",
+ "for stack %(st)s, deleting from DB.",
{'res': db_res.name, 'st': self.id})
resource_objects.Resource.delete(self.context, db_res.id)
else:
diff --git a/heat/engine/stk_defn.py b/heat/engine/stk_defn.py
index 286ab5b1a..eb8e1334c 100644
--- a/heat/engine/stk_defn.py
+++ b/heat/engine/stk_defn.py
@@ -12,7 +12,6 @@
# under the License.
import itertools
-import six
from heat.common import exception
from heat.engine import attributes
@@ -232,7 +231,7 @@ class ResourceProxy(status.ResourceStatus):
the "show" attribute.
"""
all_attrs = self._res_data().attributes()
- return dict((k, v) for k, v in six.iteritems(all_attrs)
+ return dict((k, v) for k, v in all_attrs.items()
if k != attributes.SHOW_ATTR)
@@ -253,8 +252,8 @@ def update_resource_data(stack_definition, resource_name, resource_data):
res_defns = stack_definition._resource_defns or {}
op_defns = stack_definition._output_defns or {}
- all_defns = itertools.chain(six.itervalues(res_defns),
- six.itervalues(op_defns))
+ all_defns = itertools.chain(res_defns.values(),
+ op_defns.values())
for defn in all_defns:
if resource_name in defn.required_resource_names():
defn._all_dep_attrs = None
diff --git a/heat/engine/support.py b/heat/engine/support.py
index fd765e31d..d654a7150 100644
--- a/heat/engine/support.py
+++ b/heat/engine/support.py
@@ -54,11 +54,11 @@ class SupportStatus(object):
self.previous_status = None
def to_dict(self):
- return {'status': self.status,
- 'message': self.message,
- 'version': self.version,
- 'previous_status': self.previous_status.to_dict()
- if self.previous_status is not None else None}
+ return {'status': self.status,
+ 'message': self.message,
+ 'version': self.version,
+ 'previous_status': self.previous_status.to_dict()
+ if self.previous_status is not None else None}
def is_substituted(self, substitute_class):
if self.substitute_class is None:
diff --git a/heat/engine/sync_point.py b/heat/engine/sync_point.py
index 99e83c74a..fb5ecb0f5 100644
--- a/heat/engine/sync_point.py
+++ b/heat/engine/sync_point.py
@@ -13,7 +13,6 @@
# limitations under the License.
import ast
-import six
import tenacity
from oslo_log import log as logging
@@ -84,7 +83,7 @@ def _str_unpack_tuple(s):
def _deserialize(d):
d2 = {}
for k, v in d.items():
- if isinstance(k, six.string_types) and k.startswith(u'tuple:('):
+ if isinstance(k, str) and k.startswith(u'tuple:('):
k = _str_unpack_tuple(k)
if isinstance(v, dict):
v = _deserialize(v)
diff --git a/heat/engine/template.py b/heat/engine/template.py
index 83d3d6f25..eece1d004 100644
--- a/heat/engine/template.py
+++ b/heat/engine/template.py
@@ -17,7 +17,6 @@ import copy
import functools
import hashlib
-import six
from stevedore import extension
from heat.common import exception
@@ -37,8 +36,8 @@ _template_classes = None
def get_version(template_data, available_versions):
version_keys = set(key for key, version in available_versions)
- candidate_keys = set(k for k, v in six.iteritems(template_data) if
- isinstance(v, six.string_types))
+ candidate_keys = set(k for k, v in template_data.items() if
+ isinstance(v, str))
keys_present = version_keys & candidate_keys
@@ -61,7 +60,7 @@ def _get_template_extension_manager():
def raise_extension_exception(extmanager, ep, err):
- raise TemplatePluginNotRegistered(name=ep.name, error=six.text_type(err))
+ raise TemplatePluginNotRegistered(name=ep.name, error=str(err))
class TemplatePluginNotRegistered(exception.HeatException):
@@ -90,7 +89,7 @@ def get_template_class(template_data):
raise exception.InvalidTemplateVersion(explanation=explanation)
-class Template(collections.Mapping):
+class Template(collections.abc.Mapping):
"""Abstract base class for template format plugins.
All template formats (both internal and third-party) should derive from
@@ -296,7 +295,7 @@ class Template(collections.Mapping):
sections (e.g. parameters are check by parameters schema class).
"""
t_digest = hashlib.sha256(
- six.text_type(self.t).encode('utf-8')).hexdigest()
+ str(self.t).encode('utf-8')).hexdigest()
# TODO(kanagaraj-manickam) currently t_digest is stored in self. which
# is used to check whether already template is validated or not.
@@ -315,7 +314,7 @@ class Template(collections.Mapping):
raise exception.InvalidTemplateSection(section=k)
# check resources
- for res in six.itervalues(self[self.RESOURCES]):
+ for res in self[self.RESOURCES].values():
try:
if not res or not res.get('Type'):
message = _('Each Resource must contain '
@@ -356,12 +355,12 @@ class Template(collections.Mapping):
def parse(functions, stack, snippet, path='', template=None):
recurse = functools.partial(parse, functions, stack, template=template)
- if isinstance(snippet, collections.Mapping):
+ if isinstance(snippet, collections.abc.Mapping):
def mkpath(key):
- return '.'.join([path, six.text_type(key)])
+ return '.'.join([path, str(key)])
if len(snippet) == 1:
- fn_name, args = next(six.iteritems(snippet))
+ fn_name, args = next(iter(snippet.items()))
Func = functions.get(fn_name)
if Func is not None:
try:
@@ -376,12 +375,12 @@ def parse(functions, stack, snippet, path='', template=None):
except (ValueError, TypeError, KeyError) as e:
raise exception.StackValidationFailed(
path=path,
- message=six.text_type(e))
+ message=str(e))
return dict((k, recurse(v, mkpath(k)))
- for k, v in six.iteritems(snippet))
- elif (not isinstance(snippet, six.string_types) and
- isinstance(snippet, collections.Iterable)):
+ for k, v in snippet.items())
+ elif (not isinstance(snippet, str) and
+ isinstance(snippet, collections.abc.Iterable)):
def mkpath(idx):
return ''.join([path, '[%d]' % idx])
diff --git a/heat/engine/template_common.py b/heat/engine/template_common.py
index 63afd2dc1..f5d80a509 100644
--- a/heat/engine/template_common.py
+++ b/heat/engine/template_common.py
@@ -15,8 +15,6 @@ import collections
import functools
import weakref
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.engine import conditions
@@ -77,30 +75,30 @@ class CommonTemplate(template.Template):
yield ('resource_type',
self._parse_resource_field(self.RES_TYPE,
- six.string_types, 'string',
+ str, 'string',
name, data, parse))
yield ('properties',
self._parse_resource_field(self.RES_PROPERTIES,
- (collections.Mapping,
+ (collections.abc.Mapping,
function.Function), 'object',
name, data, parse))
yield ('metadata',
self._parse_resource_field(self.RES_METADATA,
- (collections.Mapping,
+ (collections.abc.Mapping,
function.Function), 'object',
name, data, parse))
depends = self._parse_resource_field(self.RES_DEPENDS_ON,
- collections.Sequence,
+ collections.abc.Sequence,
'list or string',
name, data, no_parse)
- if isinstance(depends, six.string_types):
+ if isinstance(depends, str):
depends = [depends]
elif depends:
for dep in depends:
- if not isinstance(dep, six.string_types):
+ if not isinstance(dep, str):
msg = _('Resource %(name)s %(key)s '
'must be a list of strings') % {
'name': name, 'key': self.RES_DEPENDS_ON}
@@ -109,7 +107,7 @@ class CommonTemplate(template.Template):
yield 'depends', depends
del_policy = self._parse_resource_field(self.RES_DELETION_POLICY,
- (six.string_types,
+ (str,
function.Function),
'string',
name, data, parse)
@@ -124,13 +122,13 @@ class CommonTemplate(template.Template):
yield ('update_policy',
self._parse_resource_field(self.RES_UPDATE_POLICY,
- (collections.Mapping,
+ (collections.abc.Mapping,
function.Function), 'object',
name, data, parse))
yield ('description',
self._parse_resource_field(self.RES_DESCRIPTION,
- six.string_types, 'string',
+ str, 'string',
name, data, no_parse))
def _get_condition_definitions(self):
@@ -145,7 +143,7 @@ class CommonTemplate(template.Template):
return cached_conds
raw_defs = self._get_condition_definitions()
- if not isinstance(raw_defs, collections.Mapping):
+ if not isinstance(raw_defs, collections.abc.Mapping):
message = _('Condition definitions must be a map. Found a '
'%s instead') % type(raw_defs).__name__
raise exception.StackValidationFailed(
@@ -168,7 +166,7 @@ class CommonTemplate(template.Template):
def get_outputs():
for key, val in outputs.items():
- if not isinstance(val, collections.Mapping):
+ if not isinstance(val, collections.abc.Mapping):
message = _('Output definitions must be a map. Found a '
'%s instead') % type(val).__name__
raise exception.StackValidationFailed(
@@ -195,7 +193,7 @@ class CommonTemplate(template.Template):
enabled = conds.is_enabled(function.resolve(cond))
except ValueError as exc:
path = [self.OUTPUTS, key, self.OUTPUT_CONDITION]
- message = six.text_type(exc)
+ message = str(exc)
raise exception.StackValidationFailed(path=path,
message=message)
diff --git a/heat/engine/template_files.py b/heat/engine/template_files.py
index 0e7f6e77e..55a54e0d6 100644
--- a/heat/engine/template_files.py
+++ b/heat/engine/template_files.py
@@ -12,7 +12,6 @@
# under the License.
import collections
-import six
import weakref
from heat.common import context
@@ -29,7 +28,7 @@ class ReadOnlyDict(dict):
raise ValueError("Attempted to write to internal TemplateFiles cache")
-class TemplateFiles(collections.Mapping):
+class TemplateFiles(collections.abc.Mapping):
def __init__(self, files):
self.files = None
@@ -40,7 +39,7 @@ class TemplateFiles(collections.Mapping):
self.files_id = files.files_id
self.files = files.files
return
- if isinstance(files, six.integer_types):
+ if isinstance(files, int):
self.files_id = files
if self.files_id in _d:
self.files = _d[self.files_id]
@@ -50,7 +49,7 @@ class TemplateFiles(collections.Mapping):
'(value is %(val)s)') %
{'cname': files.__class__,
'val': str(files)})
- # the dict has not been persisted as a raw_template_files db obj
+ # the dict has not been persisted as a raw_template_files DB obj
# yet, so no self.files_id
self.files = ReadOnlyDict(files)
@@ -82,7 +81,7 @@ class TemplateFiles(collections.Mapping):
return iter(self.files)
def _refresh_if_needed(self):
- # retrieve files from db if needed
+ # retrieve files from DB if needed
if self.files_id is None:
return
if self.files_id in _d:
@@ -112,13 +111,13 @@ class TemplateFiles(collections.Mapping):
def update(self, files):
# Sets up the next call to store() to create a new
- # raw_template_files db obj. It seems like we *could* just
+ # raw_template_files DB obj. It seems like we *could* just
# update the existing raw_template_files obj, but the problem
# with that is other heat-engine processes' _d dictionaries
# would have stale data for a given raw_template_files.id with
# no way of knowing whether that data should be refreshed or
# not. So, just avoid the potential for weird race conditions
- # and create another db obj in the next store().
+ # and create another DB obj in the next store().
if len(files) == 0:
return
if not isinstance(files, dict):
diff --git a/heat/engine/translation.py b/heat/engine/translation.py
index 3e5b2bf3b..7fe5d1804 100644
--- a/heat/engine/translation.py
+++ b/heat/engine/translation.py
@@ -14,7 +14,6 @@
import functools
from oslo_log import log as logging
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -354,6 +353,6 @@ def resolve_and_find(value, cplugin, finder, entity=None,
except Exception as ex:
if ignore_resolve_error:
LOG.info("Ignoring error in RESOLVE translation: %s",
- six.text_type(ex))
+ str(ex))
return value
raise
diff --git a/heat/engine/update.py b/heat/engine/update.py
index cd8baba62..913f49a88 100644
--- a/heat/engine/update.py
+++ b/heat/engine/update.py
@@ -12,10 +12,8 @@
# under the License.
from oslo_log import log as logging
-import six
from heat.common import exception
-from heat.common.i18n import repr_wrapper
from heat.engine import dependencies
from heat.engine import resource
from heat.engine import scheduler
@@ -25,7 +23,6 @@ from heat.objects import resource as resource_objects
LOG = logging.getLogger(__name__)
-@repr_wrapper
class StackUpdate(object):
"""A Task to perform the update of an existing stack to a new template."""
@@ -39,7 +36,8 @@ class StackUpdate(object):
self.rollback = rollback
self.existing_snippets = dict((n, r.frozen_definition())
- for n, r in self.existing_stack.items())
+ for n, r in self.existing_stack.items()
+ if n in self.new_stack)
def __repr__(self):
if self.rollback:
@@ -47,7 +45,6 @@ class StackUpdate(object):
else:
return '%s Update' % str(self.existing_stack)
- @scheduler.wrappertask
def __call__(self):
"""Return a co-routine that updates the stack."""
@@ -65,10 +62,10 @@ class StackUpdate(object):
error_wait_time=get_error_wait_time)
if not self.rollback:
- yield cleanup_prev()
+ yield from cleanup_prev()
try:
- yield updater()
+ yield from updater()
finally:
self.previous_stack.reset_dependencies()
@@ -78,12 +75,11 @@ class StackUpdate(object):
else:
return self._process_existing_resource_update(res)
- @scheduler.wrappertask
def _remove_backup_resource(self, prev_res):
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
(prev_res.DELETE, prev_res.COMPLETE)):
LOG.debug("Deleting backup resource %s", prev_res.name)
- yield prev_res.destroy()
+ yield from prev_res.destroy()
@staticmethod
def _exchange_stacks(existing_res, prev_res):
@@ -93,7 +89,6 @@ class StackUpdate(object):
prev_stack.add_resource(existing_res)
existing_stack.add_resource(prev_res)
- @scheduler.wrappertask
def _create_resource(self, new_res):
res_name = new_res.name
@@ -112,7 +107,7 @@ class StackUpdate(object):
return
LOG.debug("Deleting backup Resource %s", res_name)
- yield prev_res.destroy()
+ yield from prev_res.destroy()
# Back up existing resource
if res_name in self.existing_stack:
@@ -133,7 +128,7 @@ class StackUpdate(object):
self.previous_stack.t.add_resource(new_res.t)
self.previous_stack.t.store(self.previous_stack.context)
- yield new_res.create()
+ yield from new_res.create()
self._update_resource_data(new_res)
@@ -146,7 +141,7 @@ class StackUpdate(object):
failure = exception.ResourceFailure(ex, existing_res,
existing_res.UPDATE)
existing_res._add_event(existing_res.UPDATE, existing_res.FAILED,
- six.text_type(ex))
+ str(ex))
raise failure
def _update_resource_data(self, resource):
@@ -162,7 +157,6 @@ class StackUpdate(object):
stk_defn.update_resource_data(self.new_stack.defn,
resource.name, node_data)
- @scheduler.wrappertask
def _process_new_resource_update(self, new_res):
res_name = new_res.name
@@ -171,9 +165,9 @@ class StackUpdate(object):
is_substituted = existing_res.check_is_substituted(type(new_res))
if type(existing_res) is type(new_res) or is_substituted:
try:
- yield self._update_in_place(existing_res,
- new_res,
- is_substituted)
+ yield from self._update_in_place(existing_res,
+ new_res,
+ is_substituted)
except resource.UpdateReplace:
pass
else:
@@ -197,7 +191,7 @@ class StackUpdate(object):
else:
self._check_replace_restricted(new_res)
- yield self._create_resource(new_res)
+ yield from self._create_resource(new_res)
def _update_in_place(self, existing_res, new_res, is_substituted=False):
existing_snippet = self.existing_snippets[existing_res.name]
@@ -216,15 +210,15 @@ class StackUpdate(object):
existing_res.stack.resources[existing_res.name] = substitute
existing_res = substitute
existing_res.converge = self.new_stack.converge
- return existing_res.update(new_snippet, existing_snippet,
- prev_resource=prev_res)
+ yield from existing_res.update(new_snippet, existing_snippet,
+ prev_resource=prev_res)
- @scheduler.wrappertask
def _process_existing_resource_update(self, existing_res):
res_name = existing_res.name
if res_name in self.previous_stack:
- yield self._remove_backup_resource(self.previous_stack[res_name])
+ backup_res = self.previous_stack[res_name]
+ yield from self._remove_backup_resource(backup_res)
if res_name in self.new_stack:
new_res = self.new_stack[res_name]
@@ -233,7 +227,7 @@ class StackUpdate(object):
return
if existing_res.stack is not self.previous_stack:
- yield existing_res.destroy()
+ yield from existing_res.destroy()
if res_name not in self.new_stack:
self.existing_stack.remove_resource(res_name)
@@ -256,7 +250,7 @@ class StackUpdate(object):
for e in existing_deps.graph(reverse=True).edges():
yield e
# Don't cleanup old resources until after they have been replaced
- for name, res in six.iteritems(self.existing_stack):
+ for name, res in self.existing_stack.items():
if name in self.new_stack:
yield (res, self.new_stack[name])
diff --git a/heat/engine/worker.py b/heat/engine/worker.py
index 274dd436f..28e2424e0 100644
--- a/heat/engine/worker.py
+++ b/heat/engine/worker.py
@@ -110,6 +110,9 @@ class WorkerService(object):
Marks the stack as FAILED due to cancellation, but, allows all
in_progress resources to complete normally; no worker is stopped
abruptly.
+
+ Any in-progress traversals are also stopped on all nested stacks that
+ are descendants of the one passed.
"""
_stop_traversal(stack)
@@ -154,9 +157,8 @@ class WorkerService(object):
db_api.resource_update_and_save(stack.context, rsrc.id, values)
# The old resource might be in the graph (a rollback case);
# just re-trigger it.
- key = parser.ConvergenceNode(rsrc.replaces, is_update)
- check_resource.retrigger_check_resource(stack.context, is_update,
- key.rsrc_id, stack)
+ check_resource.retrigger_check_resource(stack.context,
+ rsrc.replaces, stack)
@context.request_context
@log_exceptions
diff --git a/heat/hacking/checks.py b/heat/hacking/checks.py
index dade30bb2..4c3943538 100644
--- a/heat/hacking/checks.py
+++ b/heat/hacking/checks.py
@@ -15,6 +15,8 @@
import re
+from hacking import core
+
"""
Guidelines for writing new hacking checks
@@ -31,6 +33,7 @@ Guidelines for writing new hacking checks
"""
+@core.flake8ext
def no_log_warn(logical_line):
"""Disallow 'LOG.warn('
@@ -42,6 +45,7 @@ def no_log_warn(logical_line):
yield(0, 'Heat301 Use LOG.warning() rather than LOG.warn()')
+@core.flake8ext
def check_python3_no_iteritems(logical_line):
msg = ("Heat302: Use dict.items() instead of dict.iteritems().")
@@ -49,6 +53,7 @@ def check_python3_no_iteritems(logical_line):
yield(0, msg)
+@core.flake8ext
def check_python3_no_iterkeys(logical_line):
msg = ("Heat303: Use dict.keys() instead of dict.iterkeys().")
@@ -56,15 +61,9 @@ def check_python3_no_iterkeys(logical_line):
yield(0, msg)
+@core.flake8ext
def check_python3_no_itervalues(logical_line):
msg = ("Heat304: Use dict.values() instead of dict.itervalues().")
if re.search(r".*\.itervalues\(\)", logical_line):
yield(0, msg)
-
-
-def factory(register):
- register(no_log_warn)
- register(check_python3_no_iteritems)
- register(check_python3_no_iterkeys)
- register(check_python3_no_itervalues)
diff --git a/heat/httpd/heat_api.py b/heat/httpd/heat_api.py
index 64f7e2c73..285894dbe 100644
--- a/heat/httpd/heat_api.py
+++ b/heat/httpd/heat_api.py
@@ -27,21 +27,26 @@ from heat.common import profiler
from heat import version as hversion
+CONF = cfg.CONF
+
+
def init_application():
i18n.enable_lazy()
- LOG = logging.getLogger('heat.api')
-
- logging.register_options(cfg.CONF)
+ # NOTE(hberaud): Call reset to ensure the ConfigOpts object doesn't
+ # already contain registered options if the app is reloaded.
+ CONF.reset()
+ logging.register_options(CONF)
version = hversion.version_info.version_string()
- cfg.CONF(project='heat', prog='heat-api', version=version)
- logging.setup(cfg.CONF, 'heat-api')
+ CONF(project='heat', prog='heat-api', version=version)
+ logging.setup(CONF, CONF.prog)
+ LOG = logging.getLogger(CONF.prog)
config.set_config_defaults()
messaging.setup()
- port = cfg.CONF.heat_api.bind_port
- host = cfg.CONF.heat_api.bind_host
- profiler.setup('heat-api', host)
+ port = CONF.heat_api.bind_port
+ host = CONF.heat_api.bind_host
+ profiler.setup(CONF.prog, host)
LOG.info('Starting Heat REST API on %(host)s:%(port)s',
{'host': host, 'port': port})
return config.load_paste_app()
diff --git a/heat/httpd/heat_api_cfn.py b/heat/httpd/heat_api_cfn.py
index 5e8cfcec3..67da8ddb9 100644
--- a/heat/httpd/heat_api_cfn.py
+++ b/heat/httpd/heat_api_cfn.py
@@ -27,24 +27,29 @@ from heat.common import profiler
from heat import version
+CONF = cfg.CONF
+
+
def init_application():
i18n.enable_lazy()
- LOG = logging.getLogger('heat.api.cfn')
-
- logging.register_options(cfg.CONF)
- cfg.CONF(project='heat',
- prog='heat-api-cfn',
- version=version.version_info.version_string())
- logging.setup(cfg.CONF, 'heat-api-cfn')
+ # NOTE(hberaud): Call reset to ensure the ConfigOpts object doesn't
+ # already contain registered options if the app is reloaded.
+ CONF.reset()
+ logging.register_options(CONF)
+ CONF(project='heat',
+ prog='heat-api-cfn',
+ version=version.version_info.version_string())
+ logging.setup(CONF, CONF.prog)
logging.set_defaults()
+ LOG = logging.getLogger(CONF.prog)
config.set_config_defaults()
messaging.setup()
- port = cfg.CONF.heat_api_cfn.bind_port
- host = cfg.CONF.heat_api_cfn.bind_host
+ port = CONF.heat_api_cfn.bind_port
+ host = CONF.heat_api_cfn.bind_host
LOG.info('Starting Heat API on %(host)s:%(port)s',
{'host': host, 'port': port})
- profiler.setup('heat-api-cfn', host)
+ profiler.setup(CONF.prog, host)
return config.load_paste_app()
diff --git a/heat/locale/de/LC_MESSAGES/heat.po b/heat/locale/de/LC_MESSAGES/heat.po
index 66b29e784..eb6f723be 100644
--- a/heat/locale/de/LC_MESSAGES/heat.po
+++ b/heat/locale/de/LC_MESSAGES/heat.po
@@ -8,16 +8,18 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
# Robert Simai <robert.simai@suse.com>, 2016. #zanata
# Frank Kloeker <eumel@arcor.de>, 2018. #zanata
+# Andreas Jaeger <jaegerandi@gmail.com>, 2019. #zanata
+# Andreas Jaeger <jaegerandi@gmail.com>, 2020. #zanata
msgid ""
msgstr ""
"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-01-24 04:18+0000\n"
+"POT-Creation-Date: 2020-10-10 08:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2018-03-06 07:22+0000\n"
-"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
+"PO-Revision-Date: 2020-04-25 08:04+0000\n"
+"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
"Language: de\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
@@ -227,10 +229,6 @@ msgstr ""
"und können nicht gleichzeitig angegeben werden."
#, python-format
-msgid "%(a)s paused until Hook %(h)s is cleared"
-msgstr "%(a)s wurde angehalten, bis Hook %(h)s gelöscht wurde"
-
-#, python-format
msgid "%(action)s is not supported for resource."
msgstr "%(action)s wird für die Ressource nicht unterstützt."
@@ -375,6 +373,11 @@ msgid "%(type)s not in valid format: %(error)s"
msgstr "%(type)s nicht im gültigen Format: %(error)s"
#, python-format
+msgid "%(value)s is out of range (min: %(min)s, max: %(max)s)"
+msgstr ""
+"%(value)s ist außerhalb des gültigen Bereichs (min: %(min)s, max: %(max)s)"
+
+#, python-format
msgid "%s Key Name must be a string"
msgstr "%s Schlüsselname muss eine Zeichenfolge sein"
@@ -1175,11 +1178,6 @@ msgstr "Eine Adressbereichs-ID, die dem Subnetzpool zugewiesen werden soll."
msgid "An application health check for the instances."
msgstr "Eine Anwendungszustandsprüfung für die Instanzen."
-msgid "An ordered list of firewall rules to apply to the firewall."
-msgstr ""
-"Eine geordnete Liste von Firewallregeln, die auf die Firewall angewendet "
-"werden."
-
msgid ""
"An ordered list of nics to be added to this server, with information about "
"connected networks, fixed ips, port etc."
@@ -1390,8 +1388,9 @@ msgstr "Authentifizierungs-Hash-Algorithmus für die Ike-Richtlinie."
msgid "Authentication hash algorithm for the ipsec policy."
msgstr "Authentifizierungs-Hash-Algorithmus für die IPSec-Richtlinie."
-msgid "Authorization failed."
-msgstr "Autorisation fehlgeschlagen."
+#, python-format
+msgid "Authorization failed.%(failure_reason)s"
+msgstr "Authorisierung fehlgeschlagen. %(failure_reason)s"
msgid "AutoScaling group ID to apply policy to."
msgstr "AutoScaling-Gruppen-ID, auf die die Richtlinie angewendet werden soll."
@@ -1554,14 +1553,6 @@ msgid "Cannot define the following properties at the same time: %s"
msgstr "Folgende Eigenschaften können nicht gleichzeitig definiert werden: %s"
#, python-format
-msgid ""
-"Cannot establish connection to Heat endpoint at region \"%(region)s\" due to "
-"\"%(exc)s\""
-msgstr ""
-"Verbindung zum Heat-Endpunkt in der Region \"%(region)s\" kann aufgrund von "
-"\"%(exc)s\" nicht hergestellt werden"
-
-#, python-format
msgid "Cannot get console url: %s"
msgstr "Die Konsolen-URL kann nicht abgerufen werden: %s"
@@ -1842,18 +1833,12 @@ msgstr ""
msgid "DB instance restore point."
msgstr "DB-Instanzwiederherstellungspunkt"
-msgid "DNS Domain id or name."
-msgstr "DNS-Domänen-ID oder -Name"
-
msgid "DNS IP address used inside tenant's network."
msgstr "DNS-IP-Adresse, die im Netzwerk des Mandanten verwendet wird."
msgid "DNS Name for the zone."
msgstr "DNS-Name für die Zone."
-msgid "DNS Record type."
-msgstr "DNS-Aufnahmetyp"
-
msgid "DNS RecordSet type."
msgstr "DNS-Datensatztyp."
@@ -1866,29 +1851,12 @@ msgstr "DNS-Domäne mit Floating-IP verbunden."
msgid "DNS domain associated with this network."
msgstr "DNS-Domäne, die diesem Netzwerk zugeordnet ist."
-msgid "DNS domain serial."
-msgstr "DNS-Domänen-Serial"
-
msgid "DNS name associated with floating ip."
msgstr "DNS-Name mit Floating-IP verbunden."
msgid "DNS name associated with the port."
msgstr "DNS-Name, der dem Port zugeordnet ist."
-msgid ""
-"DNS record data, varies based on the type of record. For more details, "
-"please refer rfc 1035."
-msgstr ""
-"DNS-Datensatzdaten variieren je nach Art des Datensatzes. Weitere "
-"Informationen finden Sie in RFC 1035."
-
-msgid ""
-"DNS record priority. It is considered only for MX and SRV types, otherwise, "
-"it is ignored."
-msgstr ""
-"DNS-Eintragspriorität Es wird nur für MX- und SRV-Typen berücksichtigt, "
-"andernfalls wird es ignoriert."
-
msgid "DNS zone serial number."
msgstr "DNS-Zonen-Seriennummer"
@@ -2142,9 +2110,6 @@ msgstr "Beschreibung für diese Schnittstelle"
msgid "Description of RecordSet."
msgstr "Beschreibung von RecordSet."
-msgid "Description of domain."
-msgstr "Beschreibung der Domäne"
-
msgid "Description of keystone domain."
msgstr "Beschreibung der Keystone-Domäne"
@@ -2163,9 +2128,6 @@ msgstr "Beschreibung des Keystone-Service."
msgid "Description of keystone user."
msgstr "Beschreibung des Keystone-Benutzers"
-msgid "Description of record."
-msgstr "Beschreibung des Datensatzes"
-
msgid "Description of the Node Group Template."
msgstr "Beschreibung der Knotengruppenvorlage"
@@ -2364,18 +2326,12 @@ msgid "Does not contain a valid AWS Access Key or certificate"
msgstr ""
"Enthält keinen gültigen AWS-Zugriffsschlüssel oder ein gültiges Zertifikat"
-msgid "Domain email."
-msgstr "Domänen-E-Mail"
-
msgid "Domain id for project."
msgstr "Domänen-ID für das Projekt"
msgid "Domain id for user."
msgstr "Domänen-ID für den Benutzer"
-msgid "Domain name."
-msgstr "Domänenname"
-
#, python-format
msgid "Duplicate names %s"
msgstr "Doppelte Namen %s"
@@ -2769,14 +2725,6 @@ msgstr "Fehler beim Aktualisieren der Clustervorlage '%(name)s' - %(reason)s"
msgid "Failed to update, can not found port info."
msgstr "Fehler beim Aktualisieren, keine Port-Informationen gefunden."
-#, python-format
-msgid ""
-"Failed validating stack template using Heat endpoint at region \"%(region)s"
-"\" due to \"%(exc)s\""
-msgstr ""
-"Fehler beim Überprüfen der Stapelvorlage mit dem Heat-Endpunkt in der Region "
-"\"%(region)s\" aufgrund von \"%(exc)s\""
-
msgid "Fake attribute !a."
msgstr "Gefälschtes Attribut !a."
@@ -2792,6 +2740,9 @@ msgstr "Gefälschte Eigenschaft !c."
msgid "Fake property a."
msgstr "Gefälschte Eigenschaft a."
+msgid "Fake property b."
+msgstr "Gefälschte Eigenschaft b."
+
msgid "Fake property c."
msgstr "Gefälschte Eigenschaft c."
@@ -3008,21 +2959,6 @@ msgstr ""
"COMPLETE übergeht, ohne auf ein Signal zu warten."
msgid ""
-"How the user_data should be formatted for the server. For HEAT_CFNTOOLS, the "
-"user_data is bundled as part of the heat-cfntools cloud-init boot "
-"configuration data. For RAW the user_data is passed to Nova unmodified. For "
-"SOFTWARE_CONFIG user_data is bundled as part of the software config data, "
-"and metadata is derived from any associated SoftwareDeployment resources."
-msgstr ""
-"Wie die user_data für den Server formatiert werden soll. Für HEAT_CFNTOOLS "
-"wird die Benutzerdaten als Teil der Konfigurationsdaten für die Cloud-"
-"Initialisierung von heat-cfntools gebündelt. Für RAW werden die "
-"Benutzerdaten unverändert an Nova übergeben. Für SOFTWARE_CONFIG werden "
-"Benutzerdaten als Teil der Softwarekonfigurationsdaten gebündelt, und "
-"Metadaten werden von allen zugehörigen SoftwareDeployment-Ressourcen "
-"abgeleitet."
-
-msgid ""
"How to handle changes to removal_policies on update. The default \"append\" "
"mode appends to the internal list, \"update\" replaces it on update."
msgstr ""
@@ -3484,9 +3420,6 @@ msgstr ""
msgid "Incorrect arguments to \"%(fn_name)s\" should be: %(example)s"
msgstr "Falsche Argumente zu \"%(fn_name)s\" sollten sein: %(example)s"
-msgid "Incorrect arguments: Items to concat must be lists."
-msgstr "Falsche Argumente: Zu concat gehörende Elemente müssen Listen sein."
-
msgid "Incorrect arguments: Items to merge must be maps."
msgstr "Falsche Argumente: Zu vereinende Elemente müssen Maps sein."
@@ -4440,6 +4373,13 @@ msgstr "Maximale Anzahl von Ressourcen im Cluster -1 bedeutet unbegrenzt."
msgid "Maximum number of resources in the group."
msgstr "Maximale Anzahl von Ressourcen in der Gruppe."
+msgid ""
+"Maximum number of seconds for a monitor to wait for a connection to be "
+"established before it times out."
+msgstr ""
+"Maximale Anzahl an Sekunden, die ein Monitor darauf wartet, dass eine "
+"Verbindung hergestellt wird, bevor das Zeitlimit überschritten wird. "
+
msgid "Maximum number of stacks any one tenant may have active at one time."
msgstr "Maximale Anzahl von Stapeln, die ein Mieter gleichzeitig haben darf."
@@ -5244,14 +5184,6 @@ msgid "Number of seconds for the DPD timeout."
msgstr "Anzahl der Sekunden für das DPD-Zeitlimit."
msgid ""
-"Number of stacks to delete at a time (per transaction). Note that a single "
-"stack may have many db rows (events, etc.) associated with it."
-msgstr ""
-"Anzahl der zu löschenden Stapel (pro Transaktion). Beachten Sie, dass einem "
-"einzelnen Stapel möglicherweise mehrere DB-Zeilen (Ereignisse usw.) "
-"zugeordnet sind."
-
-msgid ""
"Number of times to check whether an interface has been attached or detached."
msgstr ""
"Häufigkeit, mit der überprüft wird, ob eine Schnittstelle angehängt oder "
@@ -5578,6 +5510,9 @@ msgstr "Zeitraum (Sekunden) für die Auswertung."
msgid "Physical ID of the VPC. Not implemented."
msgstr "Physische ID der VPC. Nicht implementiert."
+msgid "Placeholder"
+msgstr "Platzhalter"
+
msgid "Please use OS::Heat::SoftwareDeploymentGroup instead."
msgstr "Bitte verwenden Sie stattdessen OS::Heat::SoftwareDeploymentGroup."
@@ -6136,9 +6071,6 @@ msgstr "RX / TX-Faktor."
msgid "Rebuilding server failed, status '%s'"
msgstr "Neuaufbau des Servers fehlgeschlagen, Status ' %s'"
-msgid "Record name."
-msgstr "Namen aufzeichnen"
-
msgid "RecordSet name."
msgstr "RecordSet-Name"
@@ -8121,6 +8053,11 @@ msgstr ""
"Die Mindestzeit in Millisekunden zwischen regulären Verbindungen des "
"Mitglieds."
+msgid "The minimum time in seconds between regular connections of the member."
+msgstr ""
+"Die Mindestzeit in Sekunden zwischen den normalen Verbindungen des "
+"Mitglieds. "
+
msgid "The name for the QoS policy."
msgstr "Der Name für die QoS-Richtlinie."
@@ -8628,13 +8565,6 @@ msgstr ""
"Der Server konnte der Anforderung nicht entsprechen, da sie entweder "
"fehlerhaft oder auf andere Weise falsch ist."
-msgid ""
-"The servers to slave from to get DNS information and is mandatory for zone "
-"type SECONDARY, otherwise ignored."
-msgstr ""
-"Die Server, von denen aus die DNS-Informationen abgerufen werden sollen, "
-"sind obligatorisch für den Zonentyp SECONDARY, andernfalls ignoriert."
-
msgid "The set of parameters passed to this nested stack."
msgstr ""
"Die Menge der Parameter, die an diesen verschachtelten Stapel übergeben "
@@ -9116,6 +9046,20 @@ msgstr ""
"in der Gruppe [cache]) aktiviert sein muss, um diese Funktion zu verwenden."
msgid ""
+"Toggle to enable/disable caching when Orchestration Engine validates "
+"property constraints of stack. During property validation with constraints "
+"Orchestration Engine caches requests to other OpenStack services. Please "
+"note that the global toggle for oslo.cache(enabled=True in [cache] group) "
+"must be enabled to use this feature."
+msgstr ""
+"Umschalten zum Aktivieren/Deaktivieren des Caching, wenn die Orchestrierungs-"
+"Engine die Eigenschafteneinschränkungen von stack überprüft. Bei der "
+"Überprüfung der Eigenschaften mit Einschränkungen speichert die "
+"Orchestrierung Engine Anfragen an andere OpenStack-Dienste zwischen. Bitte "
+"beachten Sie, dass der globale Schalter für oslo.cache (enables=True in der "
+"Gruppe [cache]) aktiviert sein muss, um diese Funktion zu verwenden."
+
+msgid ""
"Token for stack-user which can be used for signalling handle when "
"signal_transport is set to TOKEN_SIGNAL. None for all other signal "
"transports."
@@ -9196,13 +9140,6 @@ msgid "Type of the volume to create on Cinder backend."
msgstr "Typ des Datenträgers, das auf dem Cinder-Backend erstellt werden soll."
msgid ""
-"Type of zone. PRIMARY is controlled by Designate, SECONDARY zones are slaved "
-"from another DNS Server."
-msgstr ""
-"Art der Zone PRIMARY wird von Designate gesteuert, SECONDARY-Zonen werden "
-"von einem anderen DNS-Server verwaltet."
-
-msgid ""
"URI of the subscriber which will be notified. Must be in the format: <TYPE>:"
"<VALUE>."
msgstr ""
@@ -9501,12 +9438,6 @@ msgstr "Aktualisieren eines Stapels, wenn dieser angehalten ist"
msgid "Use LBaaS V2 instead."
msgstr "Verwenden Sie stattdessen LBaaS V2."
-msgid "Use OS::Designate::RecordSet instead."
-msgstr "Verwenden Sie stattdessen OS::Designate::RecordSet."
-
-msgid "Use OS::Designate::Zone instead."
-msgstr "Verwenden Sie stattdessen OS::Designate::Zone."
-
msgid ""
"Use get_resource|Ref command instead. For example: { get_resource : "
"<resource_name> }"
@@ -9559,16 +9490,6 @@ msgstr "Benutzer %s in ungültigem Projekt"
msgid "User ID for API authentication"
msgstr "Benutzer-ID für die API-Authentifizierung"
-msgid ""
-"User data script to be executed by cloud-init. Changes cause replacement of "
-"the resource by default, but can be ignored altogether by setting the "
-"`user_data_update_policy` property."
-msgstr ""
-"Benutzerdatenskript, das von cloud-init ausgeführt wird. Änderungen "
-"verursachen standardmäßig das Ersetzen der Ressource, können jedoch durch "
-"Festlegen der Eigenschaft 'user_data_update_policy' vollständig ignoriert "
-"werden."
-
msgid "User data to pass to instance."
msgstr "Benutzerdaten, die an die Instanz übergeben werden."
@@ -10068,6 +9989,12 @@ msgid "key replacement %s collides with a key in the output map"
msgstr ""
"Schlüsselersetzung %s kollidiert mit einem Schlüssel in der Ausgabekarte"
+#, python-format
+msgid "length (%(length)d) is out of range (min: %(min)s, max: %(max)s)"
+msgstr ""
+"Länge (%(length)d) ist außerhalb des gültigen Bereichs (min: %(min)s, max: "
+"%(max)s)"
+
msgid "limit cannot be less than 4"
msgstr "Limit kann nicht weniger als 4 sein"
diff --git a/heat/locale/es/LC_MESSAGES/heat.po b/heat/locale/es/LC_MESSAGES/heat.po
index 11d43d1a4..894600c60 100644
--- a/heat/locale/es/LC_MESSAGES/heat.po
+++ b/heat/locale/es/LC_MESSAGES/heat.po
@@ -10,7 +10,7 @@ msgid ""
msgstr ""
"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-01-24 04:18+0000\n"
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -151,10 +151,6 @@ msgid "\"repeat\" syntax should be %s"
msgstr "La sintaxis de \"repeat\" debe ser %s"
#, python-format
-msgid "%(a)s paused until Hook %(h)s is cleared"
-msgstr "%(a)s se ha detenido hasta que se borre el gancho %(h)s"
-
-#, python-format
msgid "%(action)s is not supported for resource."
msgstr "%(action)s no está soportada para el recurso."
@@ -779,10 +775,6 @@ msgstr "Un ID de ámbito de dirección a asignar a la agrupación de subred."
msgid "An application health check for the instances."
msgstr "Una comprobación de estado de aplicación para las instancias."
-msgid "An ordered list of firewall rules to apply to the firewall."
-msgstr ""
-"Una lista ordenada de reglas de cortafuegos que se aplican al cortafuegos."
-
msgid ""
"An ordered list of nics to be added to this server, with information about "
"connected networks, fixed ips, port etc."
@@ -927,9 +919,6 @@ msgstr "El algoritmo hash de autenticación para la política ike."
msgid "Authentication hash algorithm for the ipsec policy."
msgstr "El algoritmo hash de autenticación para la política ipsec."
-msgid "Authorization failed."
-msgstr "Ha fallado la autorización."
-
msgid "AutoScaling group ID to apply policy to."
msgstr "ID de grupo de escalado automático al que aplicar la política."
@@ -1046,14 +1035,6 @@ msgid "Cannot define the following properties at the same time: %(props)s."
msgstr ""
"No se puede definir las siguientes propiedades al mismo tiempo: %(props)s."
-#, python-format
-msgid ""
-"Cannot establish connection to Heat endpoint at region \"%(region)s\" due to "
-"\"%(exc)s\""
-msgstr ""
-"No se puede establecer la conexión con el punto final de Heat en la región "
-"\"%(region)s\" debido a \"%(exc)s\""
-
msgid ""
"Cannot get stack domain user token, no stack domain id configured, please "
"fix your heat.conf"
@@ -1273,32 +1254,9 @@ msgstr "No se puede encontrar plantilla de equilibrador de carga personalizada"
msgid "DB instance restore point."
msgstr "Punto de restauración de instancia de base de datos."
-msgid "DNS Domain id or name."
-msgstr "ID o nombre de dominio DNS."
-
msgid "DNS IP address used inside tenant's network."
msgstr "Dirección IP DNS utilizada dentro de la red del inquilino."
-msgid "DNS Record type."
-msgstr "Tipo de registro DNS."
-
-msgid "DNS domain serial."
-msgstr "Serial de dominio DNS."
-
-msgid ""
-"DNS record data, varies based on the type of record. For more details, "
-"please refer rfc 1035."
-msgstr ""
-"Los datos de registro DNS varían según el tipo de recurso. Para obtener "
-"información más detallada, consulte rfc 1035."
-
-msgid ""
-"DNS record priority. It is considered only for MX and SRV types, otherwise, "
-"it is ignored."
-msgstr ""
-"Prioridad del registro DNS. Solo se tiene en cuenta para los tipos MX y SRV, "
-"en otros casos se ignora."
-
#, python-format
msgid "Data supplied was not valid: %(reason)s"
msgstr "Datos proporcionados no son válidos: %(reason)s"
@@ -1510,9 +1468,6 @@ msgstr "Descripción para el servicio vpn."
msgid "Description for this interface."
msgstr "Descripción para esta interfaz."
-msgid "Description of domain."
-msgstr "Descripción del dominio."
-
msgid "Description of keystone group."
msgstr "Descripción del grupo de keystone."
@@ -1528,9 +1483,6 @@ msgstr "Descripción del servicio de keystone."
msgid "Description of keystone user."
msgstr "Descripción del usuario de keystone."
-msgid "Description of record."
-msgstr "Descripción del registro."
-
msgid "Description of the Node Group Template."
msgstr "Descripción de la plantilla del grupo de nodos."
@@ -1677,12 +1629,6 @@ msgstr "Formato de disco de la imagen."
msgid "Does not contain a valid AWS Access Key or certificate"
msgstr "No contiene una clave AWS Access Key o un certificádo válido"
-msgid "Domain email."
-msgstr "Correo electrónico del dominio."
-
-msgid "Domain name."
-msgstr "Nombre de dominio."
-
#, python-format
msgid "Duplicate names %s"
msgstr "Nombres duplicados %s"
@@ -1967,14 +1913,6 @@ msgstr "No se ha podido actualizar la bahía '%(name)s' - %(reason)s"
msgid "Failed to update, can not found port info."
msgstr "Ha fallado la actualización, no se encuentra información de puerto."
-#, python-format
-msgid ""
-"Failed validating stack template using Heat endpoint at region \"%(region)s"
-"\" due to \"%(exc)s\""
-msgstr ""
-"No se ha podido validar la plantilla de pila utilizando el punto final de "
-"Heat en la región \"%(region)s\" debido a \"%(exc)s\""
-
msgid "Fake attribute !a."
msgstr "Atributo ficticio !a."
@@ -2180,20 +2118,6 @@ msgstr ""
"dará como resultado que el recurso se ponga en estado COMPLETO, sin esperar "
"ninguna señal."
-msgid ""
-"How the user_data should be formatted for the server. For HEAT_CFNTOOLS, the "
-"user_data is bundled as part of the heat-cfntools cloud-init boot "
-"configuration data. For RAW the user_data is passed to Nova unmodified. For "
-"SOFTWARE_CONFIG user_data is bundled as part of the software config data, "
-"and metadata is derived from any associated SoftwareDeployment resources."
-msgstr ""
-"La manera en que los datos de usuario se deben formatear para el servidor. "
-"Para HEAT_CFNTOOLS, los datos de usuario se empaquetan como parte de los "
-"datos de configuración de arranque heat-cfntools cloud-init. Para RAW, los "
-"datos de usuario se pasan a Nova sin modificarlos. Para SOFTWARE_CONFIG, los "
-"datos de usuario se empaquetan como parte de los datos de configuración de "
-"software y los metadatos se obtienen de los recursos SoftwareDeployment ."
-
msgid "Human readable name for the secret."
msgstr "El nombre del secreto legible por humanos."
@@ -3218,6 +3142,13 @@ msgstr "Número máximo de recursos del clúster. -1 significa ilimitado."
msgid "Maximum number of resources in the group."
msgstr "Número máximo de recursos del grupo."
+msgid ""
+"Maximum number of seconds for a monitor to wait for a connection to be "
+"established before it times out."
+msgstr ""
+"Número máximo de segundos que un supervisor debe esperar a que se establezca "
+"una conexión antes de que exceda el tiempo de espera."
+
msgid "Maximum number of stacks any one tenant may have active at one time."
msgstr ""
"Número máximo de pilas que cualquier arrendatario puede tener activas "
@@ -4392,9 +4323,6 @@ msgstr "Factor RX/TX."
msgid "Rebuilding server failed, status '%s'"
msgstr "La reconstrucción del servidor ha fallado, estado '%s'"
-msgid "Record name."
-msgstr "Nombre de registro."
-
#, python-format
msgid "Recursion depth exceeds %d."
msgstr "La profundidad de recursión excede %d."
@@ -5973,6 +5901,10 @@ msgstr ""
"igual al valor del atributo port_range_max. Si el protocolo es ICMP, este "
"valor debe ser un tipo ICMP."
+msgid "The minimum time in seconds between regular connections of the member."
+msgstr ""
+"El tiempo mínimo en segundos entre las conexiones regulares del miembro."
+
msgid "The name for the QoS policy."
msgstr "El nombre de la política de QoS."
diff --git a/heat/locale/fr/LC_MESSAGES/heat.po b/heat/locale/fr/LC_MESSAGES/heat.po
index 8fe5c801b..e381b54d5 100644
--- a/heat/locale/fr/LC_MESSAGES/heat.po
+++ b/heat/locale/fr/LC_MESSAGES/heat.po
@@ -10,7 +10,7 @@ msgid ""
msgstr ""
"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-01-24 04:18+0000\n"
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -154,10 +154,6 @@ msgid "\"repeat\" syntax should be %s"
msgstr "la syntaxe de \"repeat\" doit être %s"
#, python-format
-msgid "%(a)s paused until Hook %(h)s is cleared"
-msgstr "%(a)s mis en pause jusqu'à ce que le point d'ancrage %(h)s soit effacé"
-
-#, python-format
msgid "%(action)s is not supported for resource."
msgstr "%(action)s n'est pas supporté par la ressource."
@@ -774,9 +770,6 @@ msgstr "ID de portée d'adresse à affecter au pool de sous-réseau."
msgid "An application health check for the instances."
msgstr "Un diagnostic d'intégrité d'application pour les instances."
-msgid "An ordered list of firewall rules to apply to the firewall."
-msgstr "Liste ordonnée de règles de pare-feu à appliquer au pare-feu."
-
msgid ""
"An ordered list of nics to be added to this server, with information about "
"connected networks, fixed ips, port etc."
@@ -923,9 +916,6 @@ msgstr "Algorithme de hachage d'authentification pour la stratégie IKE."
msgid "Authentication hash algorithm for the ipsec policy."
msgstr "Algorithme de hachage d'authentification pour la stratégie IPSec."
-msgid "Authorization failed."
-msgstr "Echec de l'autorisation."
-
msgid "AutoScaling group ID to apply policy to."
msgstr "ID de groupe AutoScaling auquel appliquer la règle."
@@ -1041,14 +1031,6 @@ msgid "Cannot define the following properties at the same time: %(props)s."
msgstr ""
"Impossible de définir simultanément les propriétés suivantes : %(props)s."
-#, python-format
-msgid ""
-"Cannot establish connection to Heat endpoint at region \"%(region)s\" due to "
-"\"%(exc)s\""
-msgstr ""
-"Impossible d'établir une connexion au noeud final Heat dans la région "
-"\"%(region)s\". Cause : \"%(exc)s\""
-
msgid ""
"Cannot get stack domain user token, no stack domain id configured, please "
"fix your heat.conf"
@@ -1262,32 +1244,9 @@ msgstr "Le modèle LoadBalancer personnalisé est introuvable"
msgid "DB instance restore point."
msgstr "Point de restauration de l'instance de base de données."
-msgid "DNS Domain id or name."
-msgstr "ID ou nom du domaine DNS."
-
msgid "DNS IP address used inside tenant's network."
msgstr "Adresse IP DNS utilisée dans le réseau du locataire."
-msgid "DNS Record type."
-msgstr "Type d'enregistrement DNS."
-
-msgid "DNS domain serial."
-msgstr "Numéro de série du domaine DNS."
-
-msgid ""
-"DNS record data, varies based on the type of record. For more details, "
-"please refer rfc 1035."
-msgstr ""
-"Données d'enregistrement DNS, varient en fonction du type d'enregistrement. "
-"Pour plus de détails, voir rfc 1035."
-
-msgid ""
-"DNS record priority. It is considered only for MX and SRV types, otherwise, "
-"it is ignored."
-msgstr ""
-"Priorité de l'enregistrement DNS. Prise compte uniquement pour les types MX "
-"et SRV (sinon est ignorée)."
-
#, python-format
msgid "Data supplied was not valid: %(reason)s"
msgstr "Les données fournies n'étaient pas valides: %(reason)s "
@@ -1504,9 +1463,6 @@ msgstr "Description du service vpn."
msgid "Description for this interface."
msgstr "Description pour cette interface."
-msgid "Description of domain."
-msgstr "Description du domaine."
-
msgid "Description of keystone group."
msgstr "Description du groupe Keystone."
@@ -1522,9 +1478,6 @@ msgstr "Description du service Keystone."
msgid "Description of keystone user."
msgstr "Description de l'utilisateur Keystone."
-msgid "Description of record."
-msgstr "Description de l'enregistrement."
-
msgid "Description of the Node Group Template."
msgstr "Description du modèle de groupe de noeuds."
@@ -1672,12 +1625,6 @@ msgstr "Format de disque de l'image."
msgid "Does not contain a valid AWS Access Key or certificate"
msgstr "Ne contient pas de clé d'accès ou de certificat AWS valide"
-msgid "Domain email."
-msgstr "E-mail du domaine."
-
-msgid "Domain name."
-msgstr "Nom du domaine."
-
#, python-format
msgid "Duplicate names %s"
msgstr "Noms en double %s"
@@ -1956,14 +1903,6 @@ msgstr "Echec de mise à jour de la baie '%(name)s' - %(reason)s"
msgid "Failed to update, can not found port info."
msgstr "Echec de la mise à jour, les informations de port sont introuvables."
-#, python-format
-msgid ""
-"Failed validating stack template using Heat endpoint at region \"%(region)s"
-"\" due to \"%(exc)s\""
-msgstr ""
-"Echec de la validation du modèle de pile à l'aide du noeud final Heat dans "
-"la région \"%(region)s\". Cause : \"%(exc)s\""
-
msgid "Fake attribute !a."
msgstr "Attribut factice !a."
@@ -2171,20 +2110,6 @@ msgstr ""
"d'identification Keystone fournies. NO_SIGNAL va se produire dans la "
"ressource qui passe à l'état COMPLETE sans attendre aucun signal."
-msgid ""
-"How the user_data should be formatted for the server. For HEAT_CFNTOOLS, the "
-"user_data is bundled as part of the heat-cfntools cloud-init boot "
-"configuration data. For RAW the user_data is passed to Nova unmodified. For "
-"SOFTWARE_CONFIG user_data is bundled as part of the software config data, "
-"and metadata is derived from any associated SoftwareDeployment resources."
-msgstr ""
-"Indique comment les user_data doivent être formatées pour le serveur. Pour "
-"HEAT_CFNTOOLS, les user_data sont regroupées en tant qu'élément des données "
-"de configuration d'amorçage heat-cfntools cloud-init. Pour RAW, les "
-"user_data sont transmises à Nova non modifiées. Pour SOFTWARE_CONFIG, les "
-"user_data font partie des données de configuration et les métadonnées sont "
-"dérivées des ressources SoftwareDeployment associées."
-
msgid "Human readable name for the secret."
msgstr "Nom lisible du secret."
@@ -3199,6 +3124,13 @@ msgstr "Nombre maximum de ressources dans le cluster. -1 signifie illimité."
msgid "Maximum number of resources in the group."
msgstr "Nombre maximal de ressources dans le groupe."
+msgid ""
+"Maximum number of seconds for a monitor to wait for a connection to be "
+"established before it times out."
+msgstr ""
+"Nombre maximal de secondes pendant lequel le moniteur attend qu'une "
+"connexion soit établie."
+
msgid "Maximum number of stacks any one tenant may have active at one time."
msgstr ""
"Nombre maximum de piles pouvant être actives en même temps pour n'importe "
@@ -4371,9 +4303,6 @@ msgstr "Facteur RX/TX."
msgid "Rebuilding server failed, status '%s'"
msgstr "Echec de la régénération du serveur, statut '%s'"
-msgid "Record name."
-msgstr "Nom de l'enregistrement."
-
#, python-format
msgid "Recursion depth exceeds %d."
msgstr "La profondeur de récursivité dépasse %d."
@@ -5934,6 +5863,9 @@ msgstr ""
"doit être inférieure ou égale à la valeur de l'attribut port_range_max. Si "
"le protocole est ICMP, cette valeur doit être un type ICMP."
+msgid "The minimum time in seconds between regular connections of the member."
+msgstr "Délai minimal en secondes entre les connexions normales du membre."
+
msgid "The name for the QoS policy."
msgstr "Nom de la stratégie de qualité de service."
diff --git a/heat/locale/it/LC_MESSAGES/heat.po b/heat/locale/it/LC_MESSAGES/heat.po
index 7cbf4b10d..a8282f90d 100644
--- a/heat/locale/it/LC_MESSAGES/heat.po
+++ b/heat/locale/it/LC_MESSAGES/heat.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-01-24 04:18+0000\n"
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -148,10 +148,6 @@ msgid "\"repeat\" syntax should be %s"
msgstr "la sintassi \"repeat\" deve essere %s"
#, python-format
-msgid "%(a)s paused until Hook %(h)s is cleared"
-msgstr "%(a)s messo in pausa fino a quando non è annullato hook %(h)s"
-
-#, python-format
msgid "%(action)s is not supported for resource."
msgstr "%(action)s non è supportata per la risorsa."
@@ -775,9 +771,6 @@ msgstr "Un ID dell'ambito di indirizzi da assegnare al pool di sottorete."
msgid "An application health check for the instances."
msgstr "Una verifica stato dell'applicazione per le istanze."
-msgid "An ordered list of firewall rules to apply to the firewall."
-msgstr "Un elenco ordinato di regole del firewall da applicare al firewall."
-
msgid ""
"An ordered list of nics to be added to this server, with information about "
"connected networks, fixed ips, port etc."
@@ -922,9 +915,6 @@ msgstr "Algoritmo hash di autenticazione per la politica ike."
msgid "Authentication hash algorithm for the ipsec policy."
msgstr "Algoritmo hash di autenticazione per la politica ipsec."
-msgid "Authorization failed."
-msgstr "Autorizzazione non riuscita."
-
msgid "AutoScaling group ID to apply policy to."
msgstr "ID gruppo AutoScaling su cui applicare la politica."
@@ -1041,14 +1031,6 @@ msgid "Cannot define the following properties at the same time: %(props)s."
msgstr ""
"Impossibile definire le seguenti proprietà contemporaneamente: %(props)s."
-#, python-format
-msgid ""
-"Cannot establish connection to Heat endpoint at region \"%(region)s\" due to "
-"\"%(exc)s\""
-msgstr ""
-"Impossibile stabilire una connessione all'endpoint Heat nella regione "
-"\"%(region)s\" a causa di \"%(exc)s\""
-
msgid ""
"Cannot get stack domain user token, no stack domain id configured, please "
"fix your heat.conf"
@@ -1261,32 +1243,9 @@ msgstr "Impossibile trovare il template LoadBalancer personalizzato"
msgid "DB instance restore point."
msgstr "Punto di ripristino dell'istanza DB."
-msgid "DNS Domain id or name."
-msgstr "ID o nome del dominio DNS."
-
msgid "DNS IP address used inside tenant's network."
msgstr "Indirizzo IP DNS utilizzato all'interno della rete del tenant."
-msgid "DNS Record type."
-msgstr "Tipo di record DNS."
-
-msgid "DNS domain serial."
-msgstr "Numero di serie del dominio DNS."
-
-msgid ""
-"DNS record data, varies based on the type of record. For more details, "
-"please refer rfc 1035."
-msgstr ""
-"Dati del record DNS, variano in base al tipo di record. Per maggiori "
-"dettagli, fare riferimento a rfc 1035."
-
-msgid ""
-"DNS record priority. It is considered only for MX and SRV types, otherwise, "
-"it is ignored."
-msgstr ""
-"Priorità del record DNS. Viene considerato solo per i tipi MX e SRV, "
-"altrimenti, viene ignorato."
-
#, python-format
msgid "Data supplied was not valid: %(reason)s"
msgstr "I dati forniti non erano validi: %(reason)s"
@@ -1501,9 +1460,6 @@ msgstr "Descrizione per il servizio vpn."
msgid "Description for this interface."
msgstr "Descrizione per questa interfaccia."
-msgid "Description of domain."
-msgstr "Descrizione del dominio."
-
msgid "Description of keystone group."
msgstr "Descrizione del gruppo keystone."
@@ -1519,9 +1475,6 @@ msgstr "Descrizione del servizio keystone."
msgid "Description of keystone user."
msgstr "Descrizione dell'utente keystone."
-msgid "Description of record."
-msgstr "Descrizione del record."
-
msgid "Description of the Node Group Template."
msgstr "Descrizione del template del gruppo di nodi."
@@ -1668,12 +1621,6 @@ msgstr "Formato disco dell'immagine."
msgid "Does not contain a valid AWS Access Key or certificate"
msgstr "Non contiene un certificato o una chiave di accesso AWS valida"
-msgid "Domain email."
-msgstr "Email del dominio."
-
-msgid "Domain name."
-msgstr "Nome dominio."
-
#, python-format
msgid "Duplicate names %s"
msgstr "Nomi duplicati %s"
@@ -1956,14 +1903,6 @@ msgid "Failed to update, can not found port info."
msgstr ""
"Impossibile aggiornare, non è possibile reperire le informazioni sulla porta."
-#, python-format
-msgid ""
-"Failed validating stack template using Heat endpoint at region \"%(region)s"
-"\" due to \"%(exc)s\""
-msgstr ""
-"Impossibile convalidare il template dello stack utilizzando l'endpoint Heat "
-"nella regione \"%(region)s\" a causa di \"%(exc)s\""
-
msgid "Fake attribute !a."
msgstr "Attributo falso !a."
@@ -2166,19 +2105,6 @@ msgstr ""
"keystone fornite. NO_SIGNAL fa in modo che la risorsa passi nello stato "
"COMPLETE senza attendere segnali."
-msgid ""
-"How the user_data should be formatted for the server. For HEAT_CFNTOOLS, the "
-"user_data is bundled as part of the heat-cfntools cloud-init boot "
-"configuration data. For RAW the user_data is passed to Nova unmodified. For "
-"SOFTWARE_CONFIG user_data is bundled as part of the software config data, "
-"and metadata is derived from any associated SoftwareDeployment resources."
-msgstr ""
-"Il modo in cui user_data deve essere formattato per il server. Per "
-"HEAT_CFNTOOLS, user_data viene fornito come parte dei dati di configurazione "
-"di avvio Per RAW user_data viene passato a Nova senza modifiche. Per "
-"SOFTWARE_CONFIG user_data è fornito come parte dei dati della configurazione "
-"software e i metadati derivano dalle risorse SoftwareDeployment associato."
-
msgid "Human readable name for the secret."
msgstr "Nome leggibile dall'uomo del segreto."
@@ -3182,6 +3108,13 @@ msgstr "Numero massimo di risorse nel cluster. -1 indica illimitato."
msgid "Maximum number of resources in the group."
msgstr "Numero massimo di risorse nel gruppo."
+msgid ""
+"Maximum number of seconds for a monitor to wait for a connection to be "
+"established before it times out."
+msgstr ""
+"Numero massimo di secondi che un monitor deve attendere per stabilire una "
+"connessione prima che venga raggiunto il timeout."
+
msgid "Maximum number of stacks any one tenant may have active at one time."
msgstr ""
"Numero massimo di stack un qualsiasi tenant può avere attivo "
@@ -4347,9 +4280,6 @@ msgstr "Fattore RX/TX."
msgid "Rebuilding server failed, status '%s'"
msgstr "Nuova creazione del server non riuscita, stato '%s'"
-msgid "Record name."
-msgstr "Nome record."
-
#, python-format
msgid "Recursion depth exceeds %d."
msgstr "La profondità di ricorsività ha superato %d."
@@ -5910,6 +5840,10 @@ msgstr ""
"port_range_max. Se il protocollo è ICMP, questo valore deve essere di tipo "
"ICMP."
+msgid "The minimum time in seconds between regular connections of the member."
+msgstr ""
+"Il tempo minimo espresso in secondi tra connessioni regolari del membro."
+
msgid "The name for the QoS policy."
msgstr "Il nome della politica QoS."
diff --git a/heat/locale/ja/LC_MESSAGES/heat.po b/heat/locale/ja/LC_MESSAGES/heat.po
index 9be7a20b3..d639207ae 100644
--- a/heat/locale/ja/LC_MESSAGES/heat.po
+++ b/heat/locale/ja/LC_MESSAGES/heat.po
@@ -12,7 +12,7 @@ msgid ""
msgstr ""
"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-01-24 04:18+0000\n"
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -153,10 +153,6 @@ msgid "\"repeat\" syntax should be %s"
msgstr "\"repeat\" 構文は %s でなければなりません"
#, python-format
-msgid "%(a)s paused until Hook %(h)s is cleared"
-msgstr "%(a)s はフック %(h)s がクリアされるまで休止されます"
-
-#, python-format
msgid "%(action)s is not supported for resource."
msgstr "%(action)s はリソースではサポートされていません。"
@@ -737,9 +733,6 @@ msgstr "サブネットプールに割り当てる必要があるアドレスス
msgid "An application health check for the instances."
msgstr "インスタンスのアプリケーションヘルスチェック。"
-msgid "An ordered list of firewall rules to apply to the firewall."
-msgstr "ファイアウォールに適用するファイアウォールルールの番号付きリスト。"
-
msgid ""
"An ordered list of nics to be added to this server, with information about "
"connected networks, fixed ips, port etc."
@@ -882,9 +875,6 @@ msgstr "IKE ポリシーの認証ハッシュアルゴリズム。"
msgid "Authentication hash algorithm for the ipsec policy."
msgstr "ipsec ポリシーの認証ハッシュアルゴリズム。"
-msgid "Authorization failed."
-msgstr "認証に失敗しました。"
-
msgid "AutoScaling group ID to apply policy to."
msgstr "ポリシーを適用するオートスケールグループ ID。"
@@ -998,14 +988,6 @@ msgstr "%s を検査できません。スタックは作成されません"
msgid "Cannot define the following properties at the same time: %(props)s."
msgstr "次のプロパティーは同時に定義できません: %(props)s。"
-#, python-format
-msgid ""
-"Cannot establish connection to Heat endpoint at region \"%(region)s\" due to "
-"\"%(exc)s\""
-msgstr ""
-"\"%(exc)s\" が原因でリージョン \"%(region)s\" での heat エンドポイントへの接"
-"続を確立できません"
-
msgid ""
"Cannot get stack domain user token, no stack domain id configured, please "
"fix your heat.conf"
@@ -1227,32 +1209,9 @@ msgstr "カスタムロードバランサーテンプレートが見つかりま
msgid "DB instance restore point."
msgstr "DB インスタンスの復元ポイント。"
-msgid "DNS Domain id or name."
-msgstr "DNS ドメインの ID または名前。"
-
msgid "DNS IP address used inside tenant's network."
msgstr "テナントのネットワーク内で使用されている DNS の IP アドレス。"
-msgid "DNS Record type."
-msgstr "DNS のレコードタイプ。"
-
-msgid "DNS domain serial."
-msgstr "DNS のドメインシリアル。"
-
-msgid ""
-"DNS record data, varies based on the type of record. For more details, "
-"please refer rfc 1035."
-msgstr ""
-"レコードのタイプによって異なる DNS レコードのデータ。詳細については、rfc "
-"1035 を参照してください。"
-
-msgid ""
-"DNS record priority. It is considered only for MX and SRV types, otherwise, "
-"it is ignored."
-msgstr ""
-"DNS レコードの優先順位。これは MX と SRV のタイプの場合のみ考慮され、その他の"
-"タイプの場合は無視されます。"
-
#, python-format
msgid "Data supplied was not valid: %(reason)s"
msgstr "指定されたデータは無効でした: %(reason)s"
@@ -1450,9 +1409,6 @@ msgstr "VPN サービスの説明。"
msgid "Description for this interface."
msgstr "このインターフェースの説明。"
-msgid "Description of domain."
-msgstr "ドメインの説明。"
-
msgid "Description of keystone group."
msgstr "keystone グループの説明。"
@@ -1468,9 +1424,6 @@ msgstr "keystone サービスの説明。"
msgid "Description of keystone user."
msgstr "keystone ユーザーの説明。"
-msgid "Description of record."
-msgstr "レコードの説明。"
-
msgid "Description of the Node Group Template."
msgstr "ノードグループテンプレートの説明。"
@@ -1613,12 +1566,6 @@ msgstr "イメージのディスク形式。"
msgid "Does not contain a valid AWS Access Key or certificate"
msgstr "有効な AWS アクセスキーまたは証明書が含まれていません"
-msgid "Domain email."
-msgstr "ドメインの E メール。"
-
-msgid "Domain name."
-msgstr "ドメイン名。"
-
#, python-format
msgid "Duplicate names %s"
msgstr "名前 %s が重複しています"
@@ -1914,14 +1861,6 @@ msgstr "ベイ '%(name)s' の更新に失敗しました: %(reason)s"
msgid "Failed to update, can not found port info."
msgstr "更新できませんでした。ポート情報が見つかりません。"
-#, python-format
-msgid ""
-"Failed validating stack template using Heat endpoint at region \"%(region)s"
-"\" due to \"%(exc)s\""
-msgstr ""
-"\"%(exc)s\" が原因でリージョン \"%(region)s\" での heat エンドポイントを使用"
-"したスタックテンプレートの検証に失敗しました"
-
msgid "Fake attribute !a."
msgstr "フェイク属性 !a。"
@@ -2117,19 +2056,6 @@ msgstr ""
"成できます。NO_SIGNAL を設定すると、リソースは信号を待機せずに COMPLETE 状態"
"に達します。"
-msgid ""
-"How the user_data should be formatted for the server. For HEAT_CFNTOOLS, the "
-"user_data is bundled as part of the heat-cfntools cloud-init boot "
-"configuration data. For RAW the user_data is passed to Nova unmodified. For "
-"SOFTWARE_CONFIG user_data is bundled as part of the software config data, "
-"and metadata is derived from any associated SoftwareDeployment resources."
-msgstr ""
-"user_data のサーバー用のフォーマット方法。heat_CFNTOOLS の場合、user_data は "
-"heat-cfntools cloud-init ブート設定データの一部として組み込まれます。RAW の場"
-"合、user_data は未変更のまま Nova に渡されます。SOFTWARE_CONFIG の場合、"
-"user_data はソフトウェア設定データの一部として組み込まれ、関連付けられたすべ"
-"ての SoftwareDeployment リソースからメタデータが派生します。"
-
msgid "Human readable name for the secret."
msgstr "人間が読むことができる秘密の名前。"
@@ -3124,6 +3050,12 @@ msgstr "クラスター内のリソースの最小数。-1 は無制限を指し
msgid "Maximum number of resources in the group."
msgstr "グループ内のリソースの最大数。"
+msgid ""
+"Maximum number of seconds for a monitor to wait for a connection to be "
+"established before it times out."
+msgstr ""
+"これを超えるとタイムアウトになる、モニターが接続の確立を待機する最大秒数。"
+
msgid "Maximum number of stacks any one tenant may have active at one time."
msgstr "任意の 1 つのテナントが同時にアクティブにできるスタックの最大数。"
@@ -4249,9 +4181,6 @@ msgstr "RX/TX 係数。"
msgid "Rebuilding server failed, status '%s'"
msgstr "サーバーの再ビルドに失敗しました。状況 '%s'"
-msgid "Record name."
-msgstr "レコード名。"
-
#, python-format
msgid "Recursion depth exceeds %d."
msgstr "再帰深度が %d を超えています。"
@@ -5777,6 +5706,9 @@ msgstr ""
"なければなりません。プロトコルがICMP の場合、この値は ICMP タイプでなければな"
"りません。"
+msgid "The minimum time in seconds between regular connections of the member."
+msgstr "メンバーの定期接続間の最小時間 (秒)。"
+
msgid "The name for the QoS policy."
msgstr "QoS ポリシーの名前。"
diff --git a/heat/locale/ko_KR/LC_MESSAGES/heat.po b/heat/locale/ko_KR/LC_MESSAGES/heat.po
index e7f0ec1dd..8ed3c0e9e 100644
--- a/heat/locale/ko_KR/LC_MESSAGES/heat.po
+++ b/heat/locale/ko_KR/LC_MESSAGES/heat.po
@@ -11,7 +11,7 @@ msgid ""
msgstr ""
"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-01-24 04:18+0000\n"
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -154,10 +154,6 @@ msgid "\"repeat\" syntax should be %s"
msgstr "\"repeat\" 구문은 %s이어야 함"
#, python-format
-msgid "%(a)s paused until Hook %(h)s is cleared"
-msgstr "후크 %(h)s을(를) 지울 때까지 %(a)s을(를) 일시정지함"
-
-#, python-format
msgid "%(action)s is not supported for resource."
msgstr "자원에 대해 %(action)s이(가) 지원되지 않습니다."
@@ -716,9 +712,6 @@ msgstr "서브넷 풀에 할당할 주소 범위 ID입니다."
msgid "An application health check for the instances."
msgstr "인스턴스의 애플리케이션 상태 검사입니다."
-msgid "An ordered list of firewall rules to apply to the firewall."
-msgstr "방화벽에 적용할 방화벽 규칙의 순서 지정된 목록입니다."
-
msgid ""
"An ordered list of nics to be added to this server, with information about "
"connected networks, fixed ips, port etc."
@@ -852,9 +845,6 @@ msgstr "ike 정책의 인증 해시 알고리즘입니다."
msgid "Authentication hash algorithm for the ipsec policy."
msgstr "ipsec 정책의 인증 해시 알고리즘입니다."
-msgid "Authorization failed."
-msgstr "권한 부여에 실패했습니다. "
-
msgid "AutoScaling group ID to apply policy to."
msgstr "정책을 적용할 AutoScaling 그룹 ID입니다."
@@ -966,14 +956,6 @@ msgstr "%s을(를) 검사할 수 없음. 스택이 작성되지 않음"
msgid "Cannot define the following properties at the same time: %(props)s."
msgstr "다음 특성을 동시에 정의할 수 없음: %(props)s."
-#, python-format
-msgid ""
-"Cannot establish connection to Heat endpoint at region \"%(region)s\" due to "
-"\"%(exc)s\""
-msgstr ""
-"다음 이유로 리젼 \"%(region)s\"에서 히트 엔드포인트에 연결을 설정할 수 없음: "
-"\"%(exc)s\""
-
msgid ""
"Cannot get stack domain user token, no stack domain id configured, please "
"fix your heat.conf"
@@ -1180,32 +1162,9 @@ msgstr "사용자 정의 로드 밸런서 템플리트를 찾을 수 없음"
msgid "DB instance restore point."
msgstr "DB 인스턴스 복원 지점입니다."
-msgid "DNS Domain id or name."
-msgstr "DNS 도메인 id 또는 이름."
-
msgid "DNS IP address used inside tenant's network."
msgstr "테넌트의 네트워크 내에서 사용하는 DNS IP 주소입니다."
-msgid "DNS Record type."
-msgstr "DNS 레코드 유형."
-
-msgid "DNS domain serial."
-msgstr "DNS 도메인 일련 번호."
-
-msgid ""
-"DNS record data, varies based on the type of record. For more details, "
-"please refer rfc 1035."
-msgstr ""
-"DNS 레코드 데이터는 레코드 타입에 따라 다양합니다. 자세한 내용은 rfc 1035를 "
-"참조하십시오."
-
-msgid ""
-"DNS record priority. It is considered only for MX and SRV types, otherwise, "
-"it is ignored."
-msgstr ""
-"DNS 레코드 우선순위입니다. MX 및 SRV 타입에만 고려되며, 그렇지 않은 경우 무시"
-"합니다."
-
#, python-format
msgid "Data supplied was not valid: %(reason)s"
msgstr "제공된 날짜가 올바르지 않음: %(reason)s"
@@ -1398,9 +1357,6 @@ msgstr "vpn 서비스에 대한 설명입니다."
msgid "Description for this interface."
msgstr "이 인터페이스에 대한 설명입니다."
-msgid "Description of domain."
-msgstr "도메인의 설명입니다."
-
msgid "Description of keystone group."
msgstr "keystone 그룹의 설명입니다."
@@ -1416,9 +1372,6 @@ msgstr "keystone 서비스의 설명입니다."
msgid "Description of keystone user."
msgstr "keystone 사용자의 설명입니다."
-msgid "Description of record."
-msgstr "레코드 설명."
-
msgid "Description of the Node Group Template."
msgstr "노드 그룹 템플리트의 설명입니다."
@@ -1561,12 +1514,6 @@ msgstr "이미지의 디스크 형식입니다."
msgid "Does not contain a valid AWS Access Key or certificate"
msgstr "올바른 AWS 액세스 키 또는 인증서를 포함하고 있지 않음"
-msgid "Domain email."
-msgstr "도메인 이메일."
-
-msgid "Domain name."
-msgstr "도메인 이름."
-
#, python-format
msgid "Duplicate names %s"
msgstr "중복된 이름 %s"
@@ -1833,14 +1780,6 @@ msgstr "Bay '%(name)s' - %(reason)s 업데이트 실패"
msgid "Failed to update, can not found port info."
msgstr "업데이트에 실패했고 포트 정보를 찾을 수 없습니다."
-#, python-format
-msgid ""
-"Failed validating stack template using Heat endpoint at region \"%(region)s"
-"\" due to \"%(exc)s\""
-msgstr ""
-"다음 리젼에서 히트 엔드포인트를 사용하여 스택 템플리트의 유효성을 검증하지 못"
-"함: \"%(region)s\", 이유: \"%(exc)s\""
-
msgid "Fake attribute !a."
msgstr "Fake 속성 !a."
@@ -2034,19 +1973,6 @@ msgstr ""
"를 작성합니다. NO_SIGNAL을 사용하면 신호를 기다리지 않고 자원이 COMPLETE 단계"
"로 이동합니다."
-msgid ""
-"How the user_data should be formatted for the server. For HEAT_CFNTOOLS, the "
-"user_data is bundled as part of the heat-cfntools cloud-init boot "
-"configuration data. For RAW the user_data is passed to Nova unmodified. For "
-"SOFTWARE_CONFIG user_data is bundled as part of the software config data, "
-"and metadata is derived from any associated SoftwareDeployment resources."
-msgstr ""
-"서버에 대해 user_data가 형식화되어야 하는 방법입니다. HEAT_CFNTOOLS의 경우 "
-"user_data는 heat-cfntools cloud-init 부트 구성 데이터의 파트로 번들화됩니다. "
-"RAW의 경우 user_data는 수정되지 않은 Nova로 전달됩니다.SOFTWARE_CONFIG의 경"
-"우 user_data는 소프트웨어 구성 데이터의 일부로 번들되며 메타데이터는 연관된 "
-"SoftwareDeployment 자원에서 간격입니다."
-
msgid "Human readable name for the secret."
msgstr "시크릿의 판독 가능한 이름입니다."
@@ -2980,6 +2906,13 @@ msgstr "클러스터의 최대 자원 수입니다. -1은 무제한을 나타냅
msgid "Maximum number of resources in the group."
msgstr "그룹의 최대 자원 수입니다."
+msgid ""
+"Maximum number of seconds for a monitor to wait for a connection to be "
+"established before it times out."
+msgstr ""
+"제한시간 초과되기 전에 연결이 설정될 때까지 대기하는 모니터에 대한 최대 시간"
+"(초)입니다."
+
msgid "Maximum number of stacks any one tenant may have active at one time."
msgstr "하나의 테넌트가 한 번에 활성으로 가질 수 있는 최대 스택 수입니다."
@@ -4073,9 +4006,6 @@ msgstr "RX/TX 요인입니다."
msgid "Rebuilding server failed, status '%s'"
msgstr "서버 다시 빌드 실패. 상태 '%s'"
-msgid "Record name."
-msgstr "레코드 이름."
-
#, python-format
msgid "Recursion depth exceeds %d."
msgstr "순환 깊이가 %d을(를) 초과합니다."
@@ -5541,6 +5471,9 @@ msgstr ""
"UDP인 경우 이 값은 port_range_max 속성과 같거나 작아야 합니다. 프로토콜이 "
"ICMP인 경우 이 값은 ICMP 유형이어야 합니다."
+msgid "The minimum time in seconds between regular connections of the member."
+msgstr "일반 멤버 연결 간의 최소 시간(초)입니다."
+
msgid "The name for the QoS policy."
msgstr "QoS 정책의 이름입니다."
diff --git a/heat/locale/pt_BR/LC_MESSAGES/heat.po b/heat/locale/pt_BR/LC_MESSAGES/heat.po
index e4fe2629a..b30c6e657 100644
--- a/heat/locale/pt_BR/LC_MESSAGES/heat.po
+++ b/heat/locale/pt_BR/LC_MESSAGES/heat.po
@@ -10,7 +10,7 @@ msgid ""
msgstr ""
"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-01-24 04:18+0000\n"
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -147,10 +147,6 @@ msgid "\"repeat\" syntax should be %s"
msgstr "a sintaxe \"repeat\" deve ser %s"
#, python-format
-msgid "%(a)s paused until Hook %(h)s is cleared"
-msgstr "%(a)s pausado até que o Hook %(h)s seja limpo"
-
-#, python-format
msgid "%(action)s is not supported for resource."
msgstr "%(action)s não é suportada para o recurso."
@@ -770,10 +766,6 @@ msgstr "ID do escopo de endereço para designar ao conjunto de sub-redes."
msgid "An application health check for the instances."
msgstr "Uma verificação de funcionamento do aplicativo para as instâncias."
-msgid "An ordered list of firewall rules to apply to the firewall."
-msgstr ""
-"Uma lista ordenada de regras do firewall a serem aplicadas no firewall."
-
msgid ""
"An ordered list of nics to be added to this server, with information about "
"connected networks, fixed ips, port etc."
@@ -915,9 +907,6 @@ msgstr "Algoritmo hash de autenticação para a política ike."
msgid "Authentication hash algorithm for the ipsec policy."
msgstr "Algoritmo hash de autenticação para a política ipsec."
-msgid "Authorization failed."
-msgstr "Falha de autorização."
-
msgid "AutoScaling group ID to apply policy to."
msgstr "ID do grupo AutoScaling para aplicar à política."
@@ -1033,14 +1022,6 @@ msgid "Cannot define the following properties at the same time: %(props)s."
msgstr ""
"Não é possível definir as propriedades a seguir ao mesmo tempo: %(props)s."
-#, python-format
-msgid ""
-"Cannot establish connection to Heat endpoint at region \"%(region)s\" due to "
-"\"%(exc)s\""
-msgstr ""
-"Não é possível estabelecer conexão com o terminal Heat na região \"%(region)s"
-"\" devido a \"%(exc)s\""
-
msgid ""
"Cannot get stack domain user token, no stack domain id configured, please "
"fix your heat.conf"
@@ -1249,32 +1230,9 @@ msgstr "Modelo de balanceador de carga customizado não pôde ser localizado"
msgid "DB instance restore point."
msgstr "Ponto de restauração da instância do BD."
-msgid "DNS Domain id or name."
-msgstr "ID ou nome do Domínio do DNS"
-
msgid "DNS IP address used inside tenant's network."
msgstr "Endereço IP do DNS usado dentro da rede do locatário. "
-msgid "DNS Record type."
-msgstr "Tipo de Registro do DNS"
-
-msgid "DNS domain serial."
-msgstr "Serial do domínio DNS."
-
-msgid ""
-"DNS record data, varies based on the type of record. For more details, "
-"please refer rfc 1035."
-msgstr ""
-"Os dados de registro do DNS variam com base no tipo de registro. Para obter "
-"mais detalhes, consulte rfc 1035"
-
-msgid ""
-"DNS record priority. It is considered only for MX and SRV types, otherwise, "
-"it is ignored."
-msgstr ""
-"A prioridade do registro do DNS. Ela é considerada para tipos MX e SRV, caso "
-"contrário, é ignorada."
-
#, python-format
msgid "Data supplied was not valid: %(reason)s"
msgstr "Dados fornecidos não eram válidos: %(reason)s"
@@ -1481,9 +1439,6 @@ msgstr "Descrição para o serviço de vpn."
msgid "Description for this interface."
msgstr "Descrição para esta interface."
-msgid "Description of domain."
-msgstr "Descrição do domínio."
-
msgid "Description of keystone group."
msgstr "Descrição do grupo de keystone."
@@ -1499,9 +1454,6 @@ msgstr "Descrição do serviço do keystone."
msgid "Description of keystone user."
msgstr "Descrição do usuário do keystone."
-msgid "Description of record."
-msgstr "Descrição do registro."
-
msgid "Description of the Node Group Template."
msgstr "Descrição do Modelo de Grupo de Nós."
@@ -1647,12 +1599,6 @@ msgstr "Formato do disco da imagem."
msgid "Does not contain a valid AWS Access Key or certificate"
msgstr "Não contém uma chave de acesso AWS ou um certificado válidos"
-msgid "Domain email."
-msgstr "E-mail do domínio."
-
-msgid "Domain name."
-msgstr "Nome do domínio."
-
#, python-format
msgid "Duplicate names %s"
msgstr "Nomes duplicados %s"
@@ -1932,14 +1878,6 @@ msgid "Failed to update, can not found port info."
msgstr ""
"Falha na atualização, não é possível localizar as informações da porta."
-#, python-format
-msgid ""
-"Failed validating stack template using Heat endpoint at region \"%(region)s"
-"\" due to \"%(exc)s\""
-msgstr ""
-"Falha ao validar o modelo de pilha usando o terminal Heat na região "
-"\"%(region)s\" devido a \"%(exc)s\""
-
msgid "Fake attribute !a."
msgstr "Atributo falso !a."
@@ -2138,20 +2076,6 @@ msgstr ""
"fornecidas. NO_SIGNAL resultará no recurso entrando no estado COMPLETE sem "
"aguardar nenhum sinal."
-msgid ""
-"How the user_data should be formatted for the server. For HEAT_CFNTOOLS, the "
-"user_data is bundled as part of the heat-cfntools cloud-init boot "
-"configuration data. For RAW the user_data is passed to Nova unmodified. For "
-"SOFTWARE_CONFIG user_data is bundled as part of the software config data, "
-"and metadata is derived from any associated SoftwareDeployment resources."
-msgstr ""
-"Como o user_data deve ser formatado para o servidor. Para HEAT_CFNTOOLS, o "
-"user_data é empacotado como parte dos dados de configuração de inicialização "
-"heat-cfntools cloud-init. Para RAW, o user_data é transmitido para Nova sem "
-"modificações. Por SOFTWARE_CONFIG, user_data é empacotado como parte dos "
-"dados de configuração de software e os metadados são derivados de qualquer "
-"recurso SoftwareDeployment associado."
-
msgid "Human readable name for the secret."
msgstr "Nome legível para o segredo."
@@ -3144,6 +3068,13 @@ msgstr "Número máximo de recursos no cluster. O valor -1 significa ilimitado.
msgid "Maximum number of resources in the group."
msgstr "Número máximo de recursos no grupo."
+msgid ""
+"Maximum number of seconds for a monitor to wait for a connection to be "
+"established before it times out."
+msgstr ""
+"Número máximo de segundos para um monitor aguardar que uma conexão seja "
+"estabelecida antes de atingir o tempo limite."
+
msgid "Maximum number of stacks any one tenant may have active at one time."
msgstr ""
"Número máximo de pilhas que um locatário pode ter ativas ao mesmo tempo."
@@ -4290,9 +4221,6 @@ msgstr "Fator RX/TX."
msgid "Rebuilding server failed, status '%s'"
msgstr "Falha ao reconstruir servidor, status '%s'"
-msgid "Record name."
-msgstr "Nome do registro."
-
#, python-format
msgid "Recursion depth exceeds %d."
msgstr "A espessura de recursão excede %d."
@@ -5834,6 +5762,9 @@ msgstr ""
"ou igual ao valor do atributo port_range_max. Se o protocolo for ICMP, esse "
"valor deve ser um tipo ICMP."
+msgid "The minimum time in seconds between regular connections of the member."
+msgstr "O tempo mínimo, em segundos, entre as conexões comuns do membro."
+
msgid "The name for the QoS policy."
msgstr "O nome da política do QoS."
diff --git a/heat/locale/ru/LC_MESSAGES/heat.po b/heat/locale/ru/LC_MESSAGES/heat.po
index f6dec4749..4dd1fc649 100644
--- a/heat/locale/ru/LC_MESSAGES/heat.po
+++ b/heat/locale/ru/LC_MESSAGES/heat.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-01-24 04:18+0000\n"
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -151,10 +151,6 @@ msgid "\"repeat\" syntax should be %s"
msgstr "Формат \"repeat\" должен быть %s"
#, python-format
-msgid "%(a)s paused until Hook %(h)s is cleared"
-msgstr "%(a)s приостанавливается до очистки Hook %(h)s"
-
-#, python-format
msgid "%(action)s is not supported for resource."
msgstr "%(action)s не поддерживается для ресурса."
@@ -757,9 +753,6 @@ msgstr "ИД адресной области для присвоения пул
msgid "An application health check for the instances."
msgstr "Проверка работоспособности приложений для экземпляров."
-msgid "An ordered list of firewall rules to apply to the firewall."
-msgstr "Упорядоченный список правил брандмауэра для применения к брандмауэру."
-
msgid ""
"An ordered list of nics to be added to this server, with information about "
"connected networks, fixed ips, port etc."
@@ -901,9 +894,6 @@ msgstr "Хэш-алгоритм идентификации для стратег
msgid "Authentication hash algorithm for the ipsec policy."
msgstr "Хэш-алгоритм идентификации для стратегии ipsec."
-msgid "Authorization failed."
-msgstr "Доступ не предоставлен."
-
msgid "AutoScaling group ID to apply policy to."
msgstr "ИД группы AutoScaling, к которой применяется стратегия."
@@ -1017,14 +1007,6 @@ msgstr "Проверить %s невозможно, стек не создан"
msgid "Cannot define the following properties at the same time: %(props)s."
msgstr "Не удается определить следующие свойства одновременно: %(props)s."
-#, python-format
-msgid ""
-"Cannot establish connection to Heat endpoint at region \"%(region)s\" due to "
-"\"%(exc)s\""
-msgstr ""
-"Не удалось установить соединение с конечной точкой Heat в области "
-"\"%(region)s\", причина: \"%(exc)s\""
-
msgid ""
"Cannot get stack domain user token, no stack domain id configured, please "
"fix your heat.conf"
@@ -1230,30 +1212,9 @@ msgstr "Не найден пользовательский шаблон LoadBala
msgid "DB instance restore point."
msgstr "Точка восстановления экземпляра БД."
-msgid "DNS Domain id or name."
-msgstr "ИД домена или имя DNS."
-
msgid "DNS IP address used inside tenant's network."
msgstr "IP-адрес сервера DNS, используемый в сети арендатора."
-msgid "DNS Record type."
-msgstr "Тип записи DNS."
-
-msgid "DNS domain serial."
-msgstr "Серийный номер домена в DNS."
-
-msgid ""
-"DNS record data, varies based on the type of record. For more details, "
-"please refer rfc 1035."
-msgstr "Данные записи DNS, зависят от типа записи. См. rfc 1035."
-
-msgid ""
-"DNS record priority. It is considered only for MX and SRV types, otherwise, "
-"it is ignored."
-msgstr ""
-"Приоритет записи DNS. Действует только для записей типа MX и SRV, для прочих "
-"- игнорируется."
-
#, python-format
msgid "Data supplied was not valid: %(reason)s"
msgstr "Предоставленные данные недопустимы: %(reason)s"
@@ -1459,9 +1420,6 @@ msgstr "Описание службы vpn."
msgid "Description for this interface."
msgstr "Описание этого интерфейса."
-msgid "Description of domain."
-msgstr "Описание домена."
-
msgid "Description of keystone group."
msgstr "Описание группы keystone."
@@ -1477,9 +1435,6 @@ msgstr "Описание службы keystone."
msgid "Description of keystone user."
msgstr "Описание пользователя keystone."
-msgid "Description of record."
-msgstr "Описание записи."
-
msgid "Description of the Node Group Template."
msgstr "Описание шаблона группы узлов."
@@ -1624,12 +1579,6 @@ msgstr "Дисковый формат образа."
msgid "Does not contain a valid AWS Access Key or certificate"
msgstr "Не содержится верный ключ доступа AWS или сертификат"
-msgid "Domain email."
-msgstr "Электронная почта домена."
-
-msgid "Domain name."
-msgstr "Имя домена."
-
#, python-format
msgid "Duplicate names %s"
msgstr "Повторяющиеся имена %s"
@@ -1906,14 +1855,6 @@ msgstr "Не удается обновить отсек '%(name)s' - %(reason)s"
msgid "Failed to update, can not found port info."
msgstr "Не удалось обновить: не найдена информация о портах."
-#, python-format
-msgid ""
-"Failed validating stack template using Heat endpoint at region \"%(region)s"
-"\" due to \"%(exc)s\""
-msgstr ""
-"Не удалось проверить шаблон стека с помощью конечной точки Heat в области "
-"\"%(region)s\", причина: \"%(exc)s\""
-
msgid "Fake attribute !a."
msgstr "Поддельный атрибут !a."
@@ -2114,20 +2055,6 @@ msgstr ""
"позволяет ресурсу перейти в состояние COMPLETE, не дожидаясь никакого "
"сигнала."
-msgid ""
-"How the user_data should be formatted for the server. For HEAT_CFNTOOLS, the "
-"user_data is bundled as part of the heat-cfntools cloud-init boot "
-"configuration data. For RAW the user_data is passed to Nova unmodified. For "
-"SOFTWARE_CONFIG user_data is bundled as part of the software config data, "
-"and metadata is derived from any associated SoftwareDeployment resources."
-msgstr ""
-"Способ форматирования user_data should для сервера. Для HEAT_CFNTOOLS "
-"user_data объединяется в комплект в составе данных конфигурации загрузки "
-"heat-cfntools cloud-init. Для RAW user_data передается в Nova без изменений. "
-"Для SOFTWARE_CONFIG user_data объединяется в комплект в составе данных "
-"конфигурации загрузки, а метаданные получают из любых связанных ресурсов "
-"SoftwareDeployment."
-
msgid "Human readable name for the secret."
msgstr "Описательное имя секретного ключа."
@@ -3117,6 +3044,13 @@ msgstr ""
msgid "Maximum number of resources in the group."
msgstr "Максимальное число ресурсов в группе."
+msgid ""
+"Maximum number of seconds for a monitor to wait for a connection to be "
+"established before it times out."
+msgstr ""
+"Максимальное время ожидания соединения монитором, в секундах, до наступления "
+"тайм-аута."
+
msgid "Maximum number of stacks any one tenant may have active at one time."
msgstr ""
"Максимальное число стеков, которые могут быть активны одновременно для "
@@ -4241,9 +4175,6 @@ msgstr "Фактор RX/TX."
msgid "Rebuilding server failed, status '%s'"
msgstr "Изменение конфигурации сервера не выполнено, состояние '%s'"
-msgid "Record name."
-msgstr "Имя записи."
-
#, python-format
msgid "Recursion depth exceeds %d."
msgstr "Глубина рекурсии превышает %d."
@@ -5758,6 +5689,9 @@ msgstr ""
"меньше или равно значению атрибута port_range_max. Если используется "
"протокол ICMP, это значение должно иметь тип ICMP."
+msgid "The minimum time in seconds between regular connections of the member."
+msgstr "Минимальное время в секундах между регулярными соединениями участника."
+
msgid "The name for the QoS policy."
msgstr "Имя стратегии QoS."
diff --git a/heat/locale/zh_CN/LC_MESSAGES/heat.po b/heat/locale/zh_CN/LC_MESSAGES/heat.po
index fdd494fec..2e0336eea 100644
--- a/heat/locale/zh_CN/LC_MESSAGES/heat.po
+++ b/heat/locale/zh_CN/LC_MESSAGES/heat.po
@@ -9,7 +9,7 @@ msgid ""
msgstr ""
"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-01-24 04:18+0000\n"
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -145,10 +145,6 @@ msgid "\"repeat\" syntax should be %s"
msgstr "“repeat”语法应该是 %s"
#, python-format
-msgid "%(a)s paused until Hook %(h)s is cleared"
-msgstr "%(a)s 暂停到清除挂钩 %(h)s 为止"
-
-#, python-format
msgid "%(action)s is not supported for resource."
msgstr "%(action)s 对于资源而言不受支持。"
@@ -685,9 +681,6 @@ msgstr "要分配给子网池的地址范围标识。"
msgid "An application health check for the instances."
msgstr "针对实例的应用程序运行状况检查。"
-msgid "An ordered list of firewall rules to apply to the firewall."
-msgstr "要对防火墙应用的防火墙规则的有序列表。"
-
msgid ""
"An ordered list of nics to be added to this server, with information about "
"connected networks, fixed ips, port etc."
@@ -815,9 +808,6 @@ msgstr "ike 策略的认证散列算法。"
msgid "Authentication hash algorithm for the ipsec policy."
msgstr "IPSec 策略的认证散列算法。"
-msgid "Authorization failed."
-msgstr "授权失败。"
-
msgid "AutoScaling group ID to apply policy to."
msgstr "要对其应用策略的 AutoScaling 组标识。"
@@ -925,13 +915,6 @@ msgstr "无法检查 %s,未创建堆栈"
msgid "Cannot define the following properties at the same time: %(props)s."
msgstr "无法同时定义下列属性:%(props)s。"
-#, python-format
-msgid ""
-"Cannot establish connection to Heat endpoint at region \"%(region)s\" due to "
-"\"%(exc)s\""
-msgstr ""
-"由于发生以下异常,无法与区域“%(region)s”上 Heat 端点建立连接:“%(exc)s”"
-
msgid ""
"Cannot get stack domain user token, no stack domain id configured, please "
"fix your heat.conf"
@@ -1127,29 +1110,9 @@ msgstr "找不到定制负载均衡器模板"
msgid "DB instance restore point."
msgstr "数据库实例复原点。"
-msgid "DNS Domain id or name."
-msgstr "DNS 域标识或名称。"
-
msgid "DNS IP address used inside tenant's network."
msgstr "租户网络中使用的 DNS IP 地址。"
-msgid "DNS Record type."
-msgstr "DNS 记录类型。"
-
-msgid "DNS domain serial."
-msgstr "DNS 域序列号。"
-
-msgid ""
-"DNS record data, varies based on the type of record. For more details, "
-"please refer rfc 1035."
-msgstr ""
-"DNS 记录数据,根据记录类型不同而变化。有关更多详细信息,请参阅 rfc 1035。"
-
-msgid ""
-"DNS record priority. It is considered only for MX and SRV types, otherwise, "
-"it is ignored."
-msgstr "DNS 记录优先级。仅针对 MX 和 SRV 类型考虑此项,否则它被忽略。"
-
#, python-format
msgid "Data supplied was not valid: %(reason)s"
msgstr "提供的数据无效:%(reason)s"
@@ -1331,9 +1294,6 @@ msgstr "VPN 服务的描述。"
msgid "Description for this interface."
msgstr "此接口的描述。"
-msgid "Description of domain."
-msgstr "域的描述。"
-
msgid "Description of keystone group."
msgstr "keystone 组的描述。"
@@ -1349,9 +1309,6 @@ msgstr "keystone 服务的描述。"
msgid "Description of keystone user."
msgstr "keystone 用户的描述。"
-msgid "Description of record."
-msgstr "记录的描述。"
-
msgid "Description of the Node Group Template."
msgstr "节点组模板的描述。"
@@ -1492,12 +1449,6 @@ msgstr "映像的磁盘格式。"
msgid "Does not contain a valid AWS Access Key or certificate"
msgstr "未包含有效 AWS 访问密钥或证书"
-msgid "Domain email."
-msgstr "域电子邮件。"
-
-msgid "Domain name."
-msgstr "域名。"
-
#, python-format
msgid "Duplicate names %s"
msgstr "名称 %s 重复"
@@ -1754,13 +1705,6 @@ msgstr "无法更新支架“%(name)s” - %(reason)s"
msgid "Failed to update, can not found port info."
msgstr "未能更新,找不到端口信息。"
-#, python-format
-msgid ""
-"Failed validating stack template using Heat endpoint at region \"%(region)s"
-"\" due to \"%(exc)s\""
-msgstr ""
-"由于发生以下异常,未能使用区域“%(region)s”上 Heat 端点验证堆栈模板:“%(exc)s”"
-
msgid "Fake attribute !a."
msgstr "伪属性 !a。"
@@ -1945,18 +1889,6 @@ msgstr ""
"resource-signal。ZAQAR_SIGNAL 指示将创建使用所提供的 keystone 证书通告的专用 "
"zaqar 队列。NO_SIGNAL 将导致资源进入 COMPLETE 状态而不等待任何信号。"
-msgid ""
-"How the user_data should be formatted for the server. For HEAT_CFNTOOLS, the "
-"user_data is bundled as part of the heat-cfntools cloud-init boot "
-"configuration data. For RAW the user_data is passed to Nova unmodified. For "
-"SOFTWARE_CONFIG user_data is bundled as part of the software config data, "
-"and metadata is derived from any associated SoftwareDeployment resources."
-msgstr ""
-"应该为服务器对 user_data 进行格式设置的方式。对于 HEAT_CFNTOOLS,user_data 作"
-"为 heat-cfntools cloud-init 引导配置数据的一部分捆绑。对于 RAW,会将 "
-"user_data 按原样传递至 Nova。对于 SOFTWARE_CONFIG,user_data 作为软件配置数据"
-"的一部分捆绑,并且元数据派生自任何关联的 SoftwareDeployment 资源。"
-
msgid "Human readable name for the secret."
msgstr "密钥的人类可读名称。"
@@ -2857,6 +2789,11 @@ msgstr "集群中的最大资源数。-1 表示无限制。"
msgid "Maximum number of resources in the group."
msgstr "组中的最大资源数。"
+msgid ""
+"Maximum number of seconds for a monitor to wait for a connection to be "
+"established before it times out."
+msgstr "在连接超时之前供监视器等待该连接建立的最长时间,以秒计。"
+
msgid "Maximum number of stacks any one tenant may have active at one time."
msgstr "一个租户可同时持有的最大活动堆栈数。"
@@ -3899,9 +3836,6 @@ msgstr "RX/TX 因子。"
msgid "Rebuilding server failed, status '%s'"
msgstr "重建服务器失败,状态为“%s”"
-msgid "Record name."
-msgstr "记录名称。"
-
#, python-format
msgid "Recursion depth exceeds %d."
msgstr "递归深度超过 %d。"
@@ -5311,6 +5245,9 @@ msgstr ""
"通过安全组规则匹配的范围中的最小端口号。如果协议为 TCP 或 UDP,那么此值必须小"
"于或等于 port_range_max 属性的值。如果协议为 ICMP,那么此值必须为 ICMP 类型。"
+msgid "The minimum time in seconds between regular connections of the member."
+msgstr "对成员进行常规连接的最短间隔时间,以秒计。"
+
msgid "The name for the QoS policy."
msgstr "QoS 策略的名称。"
diff --git a/heat/locale/zh_TW/LC_MESSAGES/heat.po b/heat/locale/zh_TW/LC_MESSAGES/heat.po
index 5e50707b8..5ec103824 100644
--- a/heat/locale/zh_TW/LC_MESSAGES/heat.po
+++ b/heat/locale/zh_TW/LC_MESSAGES/heat.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: heat VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-01-24 04:18+0000\n"
+"POT-Creation-Date: 2019-12-20 05:37+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -144,10 +144,6 @@ msgid "\"repeat\" syntax should be %s"
msgstr "\"repeat\" 語法應該為 %s"
#, python-format
-msgid "%(a)s paused until Hook %(h)s is cleared"
-msgstr "%(a)s 已暫停,直到清除連結鉤 %(h)s 為止"
-
-#, python-format
msgid "%(action)s is not supported for resource."
msgstr "資源不支援 %(action)s。"
@@ -684,9 +680,6 @@ msgstr "要指派給子網路儲存區的位址範圍 ID。"
msgid "An application health check for the instances."
msgstr "實例的應用程式性能檢查。"
-msgid "An ordered list of firewall rules to apply to the firewall."
-msgstr "要套用至防火牆的防火牆規則有序清單。"
-
msgid ""
"An ordered list of nics to be added to this server, with information about "
"connected networks, fixed ips, port etc."
@@ -815,9 +808,6 @@ msgstr "IKE 原則的鑑別雜湊演算法。"
msgid "Authentication hash algorithm for the ipsec policy."
msgstr "IPSec 原則的鑑別雜湊演算法。"
-msgid "Authorization failed."
-msgstr "授權失敗。"
-
msgid "AutoScaling group ID to apply policy to."
msgstr "要套用原則的 AutoScaling 群組 ID。"
@@ -925,12 +915,6 @@ msgstr "無法檢查 %s,未建立堆疊"
msgid "Cannot define the following properties at the same time: %(props)s."
msgstr "無法同時定義下列內容:%(props)s。"
-#, python-format
-msgid ""
-"Cannot establish connection to Heat endpoint at region \"%(region)s\" due to "
-"\"%(exc)s\""
-msgstr "無法建立與區域 \"%(region)s\" 中 Heat 端點的連線,因為\"%(exc)s\""
-
msgid ""
"Cannot get stack domain user token, no stack domain id configured, please "
"fix your heat.conf"
@@ -1127,29 +1111,9 @@ msgstr "找不到自訂負載平衡器範本"
msgid "DB instance restore point."
msgstr "DB 實例還原點。"
-msgid "DNS Domain id or name."
-msgstr "DNS 網域 ID 或名稱。"
-
msgid "DNS IP address used inside tenant's network."
msgstr "租戶網路內使用的 DNS IP 位址。"
-msgid "DNS Record type."
-msgstr "DNS 記錄類型。"
-
-msgid "DNS domain serial."
-msgstr "DNS 網域序列。"
-
-msgid ""
-"DNS record data, varies based on the type of record. For more details, "
-"please refer rfc 1035."
-msgstr ""
-"DNS 記錄資料,根據記錄的類型而有所不同。 如需更多詳細資料,請參閱 RFC 1035。"
-
-msgid ""
-"DNS record priority. It is considered only for MX and SRV types, otherwise, "
-"it is ignored."
-msgstr "DNS 記錄優先順序。只將其視為 MX 和 SRV 類型,否則會將其忽略。"
-
#, python-format
msgid "Data supplied was not valid: %(reason)s"
msgstr "提供的資料無效:%(reason)s"
@@ -1331,9 +1295,6 @@ msgstr "VPN 服務的說明。"
msgid "Description for this interface."
msgstr "此介面的說明。"
-msgid "Description of domain."
-msgstr "網域的說明。"
-
msgid "Description of keystone group."
msgstr "Keystone 群組的說明。"
@@ -1349,9 +1310,6 @@ msgstr "Keystone 服務的說明。"
msgid "Description of keystone user."
msgstr "Keystone 使用者的說明。"
-msgid "Description of record."
-msgstr "記錄的說明。"
-
msgid "Description of the Node Group Template."
msgstr "節點群組範本的說明。"
@@ -1492,12 +1450,6 @@ msgstr "映像檔的磁碟格式。"
msgid "Does not contain a valid AWS Access Key or certificate"
msgstr "不包含有效的「AWS 存取金鑰」或憑證"
-msgid "Domain email."
-msgstr "網域電子郵件。"
-
-msgid "Domain name."
-msgstr "地域名稱。"
-
#, python-format
msgid "Duplicate names %s"
msgstr "重複名稱 %s"
@@ -1756,13 +1708,6 @@ msgstr "無法更新機架 '%(name)s' - %(reason)s"
msgid "Failed to update, can not found port info."
msgstr "無法更新,找不到埠資訊。"
-#, python-format
-msgid ""
-"Failed validating stack template using Heat endpoint at region \"%(region)s"
-"\" due to \"%(exc)s\""
-msgstr ""
-"使用區域 \"%(region)s\" 中的 Heat 端點來驗證堆疊範本失敗,因為 \"%(exc)s\""
-
msgid "Fake attribute !a."
msgstr "偽造屬性 !a。"
@@ -1948,18 +1893,6 @@ msgstr ""
"號的專用 zaqar 佇列。NO_SIGNAL 將導致資源跳至「完成」狀態,而不會等待任何信"
"號。"
-msgid ""
-"How the user_data should be formatted for the server. For HEAT_CFNTOOLS, the "
-"user_data is bundled as part of the heat-cfntools cloud-init boot "
-"configuration data. For RAW the user_data is passed to Nova unmodified. For "
-"SOFTWARE_CONFIG user_data is bundled as part of the software config data, "
-"and metadata is derived from any associated SoftwareDeployment resources."
-msgstr ""
-"應該如何給伺服器格式化 user_data。對於 HEAT_CFNTOOLS,user_data 會組合到 "
-"heat-cfntools cloud-init 啟動配置資料,成為其一部分。對於 RAW,user_data 會按"
-"原狀傳遞給 Nova。對於 SOFTWARE_CONFIG,user_data 會組合到軟體配置資料,成為其"
-"一部分,而 meta 資料衍生自任何相關聯的 SoftwareDeployment 資源。"
-
msgid "Human readable name for the secret."
msgstr "人類可讀的密碼名稱。"
@@ -2865,6 +2798,11 @@ msgstr "叢集中的資源數目上限。-1 表示無限。"
msgid "Maximum number of resources in the group."
msgstr "群組中的資源數目上限。"
+msgid ""
+"Maximum number of seconds for a monitor to wait for a connection to be "
+"established before it times out."
+msgstr "監視器等待建立連線發生逾時之前的秒數上限。"
+
msgid "Maximum number of stacks any one tenant may have active at one time."
msgstr "任何一個承租人一次可具有的作用中堆疊數目上限。"
@@ -3907,9 +3845,6 @@ msgstr "RX/TX 因數。"
msgid "Rebuilding server failed, status '%s'"
msgstr "重建伺服器時失敗,狀態為 '%s'"
-msgid "Record name."
-msgstr "記錄名稱。"
-
#, python-format
msgid "Recursion depth exceeds %d."
msgstr "遞迴深度超過 %d。"
@@ -5322,6 +5257,9 @@ msgstr ""
"範圍中安全群組規則所符合的埠號下限。如果通訊協定是 TCP 或 UDP,此值必須小於或"
"等於 port_range_max 屬性的值。如果通訊協定是ICMP,則此值必須是 ICMP 類型。"
+msgid "The minimum time in seconds between regular connections of the member."
+msgstr "成員的兩次定期連線之間的最短時間(以秒為單位)。"
+
msgid "The name for the QoS policy."
msgstr "服務品質原則的名稱。"
diff --git a/heat/objects/event.py b/heat/objects/event.py
index eb4f7587c..f51a6ac8b 100644
--- a/heat/objects/event.py
+++ b/heat/objects/event.py
@@ -51,7 +51,7 @@ class Event(
event._resource_properties = None
for field in event.fields:
if field == 'resource_status_reason':
- # this works whether db_event is a dict or db ref
+ # this works whether db_event is a dict or DB ref
event[field] = db_event['_resource_status_reason']
else:
event[field] = db_event[field]
diff --git a/heat/objects/fields.py b/heat/objects/fields.py
index 88290150d..19680f6d9 100644
--- a/heat/objects/fields.py
+++ b/heat/objects/fields.py
@@ -15,12 +15,11 @@
from oslo_serialization import jsonutils as json
from oslo_versionedobjects import fields
-import six
class Json(fields.FieldType):
def coerce(self, obj, attr, value):
- if isinstance(value, six.string_types):
+ if isinstance(value, str):
loaded = json.loads(value)
return loaded
return value
diff --git a/heat/objects/resource.py b/heat/objects/resource.py
index 3aba3405f..c99e572dc 100644
--- a/heat/objects/resource.py
+++ b/heat/objects/resource.py
@@ -21,7 +21,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
-import six
import tenacity
from heat.common import crypt
@@ -41,7 +40,7 @@ LOG = logging.getLogger(__name__)
def retry_on_conflict(func):
wrapper = tenacity.retry(
stop=tenacity.stop_after_attempt(11),
- wait=tenacity.wait_random(max=2),
+ wait=tenacity.wait_random_exponential(multiplier=0.5, max=60),
retry=tenacity.retry_if_exception_type(
exception.ConcurrentTransaction),
reraise=True)
@@ -57,7 +56,7 @@ class ResourceCache(object):
self.by_stack_id_name = collections.defaultdict(dict)
def set_by_stack_id(self, resources):
- for res in six.itervalues(resources):
+ for res in resources.values():
self.by_stack_id_name[res.stack_id][res.name] = res
@@ -190,7 +189,7 @@ class Resource(
resource_name,
cls._from_db_object(cls(context), context, resource_db)
)
- for resource_name, resource_db in six.iteritems(resources_db)
+ for resource_name, resource_db in resources_db.items()
]
return dict(resources)
@@ -246,7 +245,7 @@ class Resource(
resource_name,
cls._from_db_object(cls(context), context, resource_db)
)
- for resource_name, resource_db in six.iteritems(resources_db)
+ for resource_name, resource_db in resources_db.items()
]
return dict(resources)
@@ -259,7 +258,7 @@ class Resource(
resource_id,
cls._from_db_object(cls(context), context, resource_db)
)
- for resource_id, resource_db in six.iteritems(resources_db)
+ for resource_id, resource_db in resources_db.items()
]
return dict(resources)
@@ -280,7 +279,7 @@ class Resource(
context,
stack_id,
stack_id_only=True)
- return {db_res.stack_id for db_res in six.itervalues(resources_db)}
+ return {db_res.stack_id for db_res in resources_db.values()}
@classmethod
def purge_deleted(cls, context, stack_id):
diff --git a/heat/objects/stack.py b/heat/objects/stack.py
index a6904865d..349aaada8 100644
--- a/heat/objects/stack.py
+++ b/heat/objects/stack.py
@@ -18,7 +18,6 @@
from oslo_log import log as logging
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -117,7 +116,7 @@ class Stack(
def get_by_name_and_owner_id(cls, context, stack_name, owner_id):
db_stack = db_api.stack_get_by_name_and_owner_id(
context,
- six.text_type(stack_name),
+ str(stack_name),
owner_id
)
if not db_stack:
@@ -127,7 +126,7 @@ class Stack(
@classmethod
def get_by_name(cls, context, stack_name):
- db_stack = db_api.stack_get_by_name(context, six.text_type(stack_name))
+ db_stack = db_api.stack_get_by_name(context, str(stack_name))
if not db_stack:
return None
stack = cls._from_db_object(context, cls(context), db_stack)
diff --git a/heat/objects/user_creds.py b/heat/objects/user_creds.py
index 718e46c95..abfa3610a 100644
--- a/heat/objects/user_creds.py
+++ b/heat/objects/user_creds.py
@@ -49,7 +49,7 @@ class UserCreds(
return db_ucreds
ucreds._context = context
for field in ucreds.fields:
- # TODO(Shao HE Feng), now the db layer delete the decrypt_method
+ # TODO(Shao HE Feng), now the DB layer delete the decrypt_method
# field, just skip it here. and will add an encrypted_field later.
if field == "decrypt_method":
continue
diff --git a/heat/policies/actions.py b/heat/policies/actions.py
index 4dd45fcb0..1aa53fd70 100644
--- a/heat/policies/actions.py
+++ b/heat/policies/actions.py
@@ -10,25 +10,140 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import versionutils
from oslo_policy import policy
from heat.policies import base
POLICY_ROOT = 'actions:%s'
+DEPRECATED_REASON = """
+The actions API now supports system scope and default roles.
+"""
+
+deprecated_action = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'action',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_snapshot = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'snapshot',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_suspend = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'suspend',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_resume = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'resume',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_check = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'check',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_cancel_update = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'cancel_update',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_cancel_without_rollback = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'cancel_without_rollback',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+
+
actions_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'action',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
description='Performs non-lifecycle operations on the stack '
- '(Snapshot, Resume, Cancel update, or check stack resources).',
- operations=[
- {
- 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/'
- 'actions',
- 'method': 'POST'
- }
- ]
+ '(Snapshot, Resume, Cancel update, or check stack resources). '
+ 'This is the default for all actions but can be overridden by more '
+ 'specific policies for individual actions.',
+ operations=[{
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/actions',
+ 'method': 'POST',
+ }],
+ deprecated_rule=deprecated_action
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'snapshot',
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
+ description='Create stack snapshot',
+ operations=[{
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/actions',
+ 'method': 'POST',
+ }],
+ deprecated_rule=deprecated_snapshot
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'suspend',
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
+ description='Suspend a stack.',
+ operations=[{
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/actions',
+ 'method': 'POST',
+ }],
+ deprecated_rule=deprecated_suspend
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'resume',
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
+ description='Resume a suspended stack.',
+ operations=[{
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/actions',
+ 'method': 'POST',
+ }],
+ deprecated_rule=deprecated_resume
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'check',
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
+ description='Check stack resources.',
+ operations=[{
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/actions',
+ 'method': 'POST',
+ }],
+ deprecated_rule=deprecated_check
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'cancel_update',
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
+ description='Cancel stack operation and roll back.',
+ operations=[{
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/actions',
+ 'method': 'POST',
+ }],
+ deprecated_rule=deprecated_cancel_update
+ ),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'cancel_without_rollback',
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
+ description='Cancel stack operation without rolling back.',
+ operations=[{
+ 'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}/actions',
+ 'method': 'POST',
+ }],
+ deprecated_rule=deprecated_cancel_without_rollback
)
]
diff --git a/heat/policies/base.py b/heat/policies/base.py
index 7f4d8643d..cdc3b9f7d 100644
--- a/heat/policies/base.py
+++ b/heat/policies/base.py
@@ -18,11 +18,45 @@ RULE_DENY_STACK_USER = 'rule:deny_stack_user'
RULE_DENY_EVERYBODY = 'rule:deny_everybody'
RULE_ALLOW_EVERYBODY = 'rule:allow_everybody'
+# Check strings that embody common personas
+SYSTEM_ADMIN = 'role:admin and system_scope:all'
+SYSTEM_READER = 'role:reader and system_scope:all'
+PROJECT_MEMBER = 'role:member and project_id:%(project_id)s'
+PROJECT_READER = 'role:reader and project_id:%(project_id)s'
+
+# Heat personas
+PROJECT_ADMIN = 'role:admin and project_id:%(project_id)s'
+PROJECT_STACK_USER = 'role:heat_stack_user and project_id:%(project_id)s'
+
+# Composite check strings that are useful for policies that protect APIs that
+# operate at different scopes.
+SYSTEM_ADMIN_OR_PROJECT_MEMBER = (
+ '(' + SYSTEM_ADMIN + ')'
+ ' or (' + PROJECT_MEMBER + ')'
+)
+SYSTEM_OR_PROJECT_READER = (
+ '(' + SYSTEM_READER + ')'
+ ' or (' + PROJECT_READER + ')'
+)
+SYSTEM_ADMIN_OR_PROJECT_MEMBER_OR_STACK_USER = (
+ '(' + SYSTEM_ADMIN + ')'
+ ' or (' + PROJECT_MEMBER + ')'
+ ' or (' + PROJECT_STACK_USER + ')'
+)
+SYSTEM_OR_PROJECT_READER_OR_STACK_USER = (
+ '(' + SYSTEM_READER + ')'
+ ' or (' + PROJECT_READER + ')'
+ ' or (' + PROJECT_STACK_USER + ')'
+)
+
rules = [
policy.RuleDefault(
name="context_is_admin",
- check_str="role:admin and is_admin_project:True",
+ check_str=(
+ "(role:admin and is_admin_project:True) OR "
+ "(" + SYSTEM_ADMIN + ")"
+ ),
description="Decides what is required for the 'is_admin:True' check "
"to succeed."),
policy.RuleDefault(
diff --git a/heat/policies/build_info.py b/heat/policies/build_info.py
index 066bf7bdb..5bc8e21da 100644
--- a/heat/policies/build_info.py
+++ b/heat/policies/build_info.py
@@ -10,23 +10,38 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import versionutils
from oslo_policy import policy
from heat.policies import base
+DEPRECATED_REASON = """
+The build API now supports system scope and default roles.
+"""
+
POLICY_ROOT = 'build_info:%s'
+deprecated_build_info = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'build_info',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+
+
build_info_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'build_info',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Show build information.',
operations=[
{
'path': '/v1/{tenant_id}/build_info',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_build_info
)
]
diff --git a/heat/policies/cloudformation.py b/heat/policies/cloudformation.py
index aa61fa9a0..2508d8d92 100644
--- a/heat/policies/cloudformation.py
+++ b/heat/policies/cloudformation.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import versionutils
from oslo_policy import policy
from heat.policies import base
@@ -17,48 +18,170 @@ from heat.policies import base
# These policies are for AWS CloudFormation-like APIs, so we won't list out
# the URI paths in rules.
+DEPRECATED_REASON = """
+The cloud formation API now supports system scope and default roles.
+"""
+
POLICY_ROOT = 'cloudformation:%s'
+deprecated_list_stacks = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'ListStacks',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_create_stack = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'CreateStack',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_describe_stacks = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'DescribeStacks',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_delete_stack = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'DeleteStack',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_update_stack = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'UpdateStack',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_cancel_update_stack = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'CancelUpdateStack',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_describe_stack_events = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'DescribeStackEvents',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_validate_template = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'ValidateTemplate',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_get_template = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'GetTemplate',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_estimate_template_cost = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'EstimateTemplateCost',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_describe_stack_resource = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'DescribeStackResource',
+ check_str=base.RULE_ALLOW_EVERYBODY,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_describe_stack_resources = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'DescribeStackResources',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_list_stack_resources = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'ListStackResources',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+
cloudformation_policies = [
policy.RuleDefault(
name=POLICY_ROOT % 'ListStacks',
- check_str=base.RULE_DENY_STACK_USER),
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
+ deprecated_rule=deprecated_list_stacks
+ ),
policy.RuleDefault(
name=POLICY_ROOT % 'CreateStack',
- check_str=base.RULE_DENY_STACK_USER),
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
+ deprecated_rule=deprecated_create_stack
+ ),
policy.RuleDefault(
name=POLICY_ROOT % 'DescribeStacks',
- check_str=base.RULE_DENY_STACK_USER),
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
+ deprecated_rule=deprecated_describe_stacks
+ ),
policy.RuleDefault(
name=POLICY_ROOT % 'DeleteStack',
- check_str=base.RULE_DENY_STACK_USER),
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
+ deprecated_rule=deprecated_delete_stack
+ ),
policy.RuleDefault(
name=POLICY_ROOT % 'UpdateStack',
- check_str=base.RULE_DENY_STACK_USER),
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
+ deprecated_rule=deprecated_update_stack
+ ),
policy.RuleDefault(
name=POLICY_ROOT % 'CancelUpdateStack',
- check_str=base.RULE_DENY_STACK_USER),
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
+ deprecated_rule=deprecated_cancel_update_stack
+ ),
policy.RuleDefault(
name=POLICY_ROOT % 'DescribeStackEvents',
- check_str=base.RULE_DENY_STACK_USER),
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
+ deprecated_rule=deprecated_describe_stack_events
+ ),
policy.RuleDefault(
name=POLICY_ROOT % 'ValidateTemplate',
- check_str=base.RULE_DENY_STACK_USER),
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
+ deprecated_rule=deprecated_validate_template
+ ),
policy.RuleDefault(
name=POLICY_ROOT % 'GetTemplate',
- check_str=base.RULE_DENY_STACK_USER),
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
+ deprecated_rule=deprecated_get_template
+ ),
policy.RuleDefault(
name=POLICY_ROOT % 'EstimateTemplateCost',
- check_str=base.RULE_DENY_STACK_USER),
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
+ deprecated_rule=deprecated_estimate_template_cost
+ ),
policy.RuleDefault(
name=POLICY_ROOT % 'DescribeStackResource',
- check_str=base.RULE_ALLOW_EVERYBODY),
+ check_str=base.SYSTEM_OR_PROJECT_READER_OR_STACK_USER,
+ scope_types=['system', 'project'],
+ deprecated_rule=deprecated_describe_stack_resource
+ ),
policy.RuleDefault(
name=POLICY_ROOT % 'DescribeStackResources',
- check_str=base.RULE_DENY_STACK_USER),
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
+ deprecated_rule=deprecated_describe_stack_resources
+ ),
policy.RuleDefault(
name=POLICY_ROOT % 'ListStackResources',
- check_str=base.RULE_DENY_STACK_USER)
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
+ deprecated_rule=deprecated_list_stack_resources
+ )
]
diff --git a/heat/policies/events.py b/heat/policies/events.py
index b6c1f21fa..b314e7003 100644
--- a/heat/policies/events.py
+++ b/heat/policies/events.py
@@ -10,16 +10,36 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import versionutils
from oslo_policy import policy
from heat.policies import base
POLICY_ROOT = 'events:%s'
+DEPRECATED_REASON = """
+The events API now supports system scope and default roles.
+"""
+
+deprecated_index = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'index',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_show = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'show',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+
+
events_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='List events.',
operations=[
{
@@ -27,11 +47,13 @@ events_policies = [
'events',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_index
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Show event.',
operations=[
{
@@ -39,7 +61,8 @@ events_policies = [
'resources/{resource_name}/events/{event_id}',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_show
)
]
diff --git a/heat/policies/resource.py b/heat/policies/resource.py
index 8be1c2a40..85f582155 100644
--- a/heat/policies/resource.py
+++ b/heat/policies/resource.py
@@ -10,16 +10,53 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import versionutils
from oslo_policy import policy
from heat.policies import base
POLICY_ROOT = 'resource:%s'
+DEPRECATED_REASON = """
+The resources API now supports system scope and default roles.
+"""
+
+deprecated_list_resources = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'index',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_mark_unhealthy = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'mark_unhealthy',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_show_resource = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'show',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY,
+)
+deprecated_metadata = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'metadata',
+ check_str=base.RULE_ALLOW_EVERYBODY,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY,
+)
+deprecated_signal = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'signal',
+ check_str=base.RULE_ALLOW_EVERYBODY,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY,
+)
+
resource_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='List resources.',
operations=[
{
@@ -27,11 +64,13 @@ resource_policies = [
'resources',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_list_resources
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'metadata',
- check_str=base.RULE_ALLOW_EVERYBODY,
+ check_str=base.SYSTEM_OR_PROJECT_READER_OR_STACK_USER,
+ scope_types=['system', 'project'],
description='Show resource metadata.',
operations=[
{
@@ -39,11 +78,13 @@ resource_policies = [
'resources/{resource_name}/metadata',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_metadata
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'signal',
- check_str=base.RULE_ALLOW_EVERYBODY,
+ check_str=base.SYSTEM_OR_PROJECT_READER_OR_STACK_USER,
+ scope_types=['system', 'project'],
description='Signal resource.',
operations=[
{
@@ -51,11 +92,13 @@ resource_policies = [
'resources/{resource_name}/signal',
'method': 'POST'
}
- ]
+ ],
+ deprecated_rule=deprecated_signal
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'mark_unhealthy',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Mark resource as unhealthy.',
operations=[
{
@@ -63,11 +106,13 @@ resource_policies = [
'resources/{resource_name_or_physical_id}',
'method': 'PATCH'
}
- ]
+ ],
+ deprecated_rule=deprecated_mark_unhealthy
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Show resource.',
operations=[
{
@@ -75,7 +120,8 @@ resource_policies = [
'resources/{resource_name}',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_show_resource
)
]
diff --git a/heat/policies/resource_types.py b/heat/policies/resource_types.py
index 27b067c28..39e6d2596 100644
--- a/heat/policies/resource_types.py
+++ b/heat/policies/resource_types.py
@@ -36,6 +36,9 @@ resource_types_policies = [
name=POLICY_ROOT % 'OS::Nova::Quota',
check_str=base.RULE_PROJECT_ADMIN),
policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Octavia::Quota',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
name=POLICY_ROOT % 'OS::Manila::ShareType',
check_str=base.RULE_PROJECT_ADMIN),
policy.RuleDefault(
@@ -48,6 +51,12 @@ resource_types_policies = [
name=POLICY_ROOT % 'OS::Neutron::QoSBandwidthLimitRule',
check_str=base.RULE_PROJECT_ADMIN),
policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Neutron::QoSDscpMarkingRule',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Neutron::QoSMinimumBandwidthRule',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
name=POLICY_ROOT % 'OS::Neutron::Segment',
check_str=base.RULE_PROJECT_ADMIN),
policy.RuleDefault(
@@ -64,6 +73,12 @@ resource_types_policies = [
check_str=base.RULE_PROJECT_ADMIN),
policy.RuleDefault(
name=POLICY_ROOT % 'OS::Blazar::Host',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Octavia::Flavor',
+ check_str=base.RULE_PROJECT_ADMIN),
+ policy.RuleDefault(
+ name=POLICY_ROOT % 'OS::Octavia::FlavorProfile',
check_str=base.RULE_PROJECT_ADMIN)
]
diff --git a/heat/policies/service.py b/heat/policies/service.py
index 9bf86a696..3c3f42279 100644
--- a/heat/policies/service.py
+++ b/heat/policies/service.py
@@ -10,16 +10,30 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import versionutils
from oslo_policy import policy
from heat.policies import base
+DEPRECATED_REASON = """
+The service API now supports system scope and default roles.
+"""
+
POLICY_ROOT = 'service:%s'
+deprecated_index = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'index',
+ check_str=base.RULE_CONTEXT_IS_ADMIN,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+
service_policies = [
policy.RuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.RULE_CONTEXT_IS_ADMIN)
+ check_str=base.SYSTEM_READER,
+ deprecated_rule=deprecated_index
+ )
]
diff --git a/heat/policies/software_configs.py b/heat/policies/software_configs.py
index 72f6f2c99..be5ca0441 100644
--- a/heat/policies/software_configs.py
+++ b/heat/policies/software_configs.py
@@ -10,67 +10,113 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import versionutils
from oslo_policy import policy
from heat.policies import base
+DEPRECATED_REASON = """
+The software configuration API now support system scope and default roles.
+"""
+
POLICY_ROOT = 'software_configs:%s'
+deprecated_global_index = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'global_index',
+ check_str=base.RULE_DENY_EVERYBODY,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_index = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'index',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_create = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'create',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_show = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'show',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_delete = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'delete',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+
software_configs_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'global_index',
- check_str=base.RULE_DENY_EVERYBODY,
+ check_str=base.SYSTEM_READER,
+ scope_types=['system', 'project'],
description='List configs globally.',
operations=[
{
'path': '/v1/{tenant_id}/software_configs',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_global_index
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='List configs.',
operations=[
{
'path': '/v1/{tenant_id}/software_configs',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_index
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Create config.',
operations=[
{
'path': '/v1/{tenant_id}/software_configs',
'method': 'POST'
}
- ]
+ ],
+ deprecated_rule=deprecated_create
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Show config details.',
operations=[
{
'path': '/v1/{tenant_id}/software_configs/{config_id}',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_show
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Delete config.',
operations=[
{
'path': '/v1/{tenant_id}/software_configs/{config_id}',
'method': 'DELETE'
}
- ]
+ ],
+ deprecated_rule=deprecated_delete
)
]
diff --git a/heat/policies/software_deployments.py b/heat/policies/software_deployments.py
index 05f73d586..08e59c6eb 100644
--- a/heat/policies/software_deployments.py
+++ b/heat/policies/software_deployments.py
@@ -10,71 +10,119 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import versionutils
from oslo_policy import policy
from heat.policies import base
+DEPRECATED_REASON = """
+The software deployment API now supports system scope and default roles.
+"""
+
POLICY_ROOT = 'software_deployments:%s'
+deprecated_index = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'index',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_create = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'create',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_show = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'show',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_update = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'update',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_delete = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'delete',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+
+
software_deployments_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='List deployments.',
operations=[
{
'path': '/v1/{tenant_id}/software_deployments',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_index
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Create deployment.',
operations=[
{
'path': '/v1/{tenant_id}/software_deployments',
'method': 'POST'
}
- ]
+ ],
+ deprecated_rule=deprecated_create
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Show deployment details.',
operations=[
{
'path': '/v1/{tenant_id}/software_deployments/{deployment_id}',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_show
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Update deployment.',
operations=[
{
'path': '/v1/{tenant_id}/software_deployments/{deployment_id}',
'method': 'PUT'
}
- ]
+ ],
+ deprecated_rule=deprecated_update
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Delete deployment.',
operations=[
{
'path': '/v1/{tenant_id}/software_deployments/{deployment_id}',
'method': 'DELETE'
}
- ]
+ ],
+ deprecated_rule=deprecated_delete
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'metadata',
- check_str=base.RULE_ALLOW_EVERYBODY,
+ check_str=base.SYSTEM_OR_PROJECT_READER_OR_STACK_USER,
+ scope_types=['system', 'project'],
description='Show server configuration metadata.',
operations=[
{
diff --git a/heat/policies/stacks.py b/heat/policies/stacks.py
index 7332a69a3..5591ba5ff 100644
--- a/heat/policies/stacks.py
+++ b/heat/policies/stacks.py
@@ -10,16 +10,204 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import versionutils
from oslo_policy import policy
from heat.policies import base
+DEPRECATED_REASON = """
+The stack API now supports system scope and default roles.
+"""
+
POLICY_ROOT = 'stacks:%s'
+deprecated_abandon = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'abandon',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_create = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'create',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_delete = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'delete',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_detail = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'detail',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_export = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'export',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_generate_template = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'generate_template',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_global_index = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'global_index',
+ check_str=base.RULE_DENY_EVERYBODY,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_index = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'index',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_list_resource_types = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'list_resource_types',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_list_template_versions = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'list_template_versions',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_list_template_functions = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'list_template_functions',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_preview = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'preview',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_resource_schema = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'resource_schema',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_show = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'show',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_template = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'template',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_environment = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'environment',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_files = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'files',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_update = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'update',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_update_patch = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'update_patch',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_preview_update = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'preview_update',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_preview_update_patch = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'preview_update_patch',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_validate_template = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'validate_template',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_snapshot = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'snapshot',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_show_snapshot = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'show_snapshot',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_delete_snapshot = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'delete_snapshot',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_list_snapshots = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'list_snapshots',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_restore_snapshot = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'restore_snapshot',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_list_outputs = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'list_outputs',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_show_output = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'show_output',
+ check_str=base.RULE_DENY_STACK_USER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+deprecated_lookup = policy.DeprecatedRule(
+ name=POLICY_ROOT % 'lookup',
+ check_str=base.RULE_ALLOW_EVERYBODY,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.WALLABY
+)
+
+
stacks_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'abandon',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Abandon stack.',
operations=[
{
@@ -27,44 +215,52 @@ stacks_policies = [
'abandon',
'method': 'DELETE'
}
- ]
+ ],
+ deprecated_rule=deprecated_abandon
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Create stack.',
operations=[
{
'path': '/v1/{tenant_id}/stacks',
'method': 'POST'
}
- ]
+ ],
+ deprecated_rule=deprecated_create
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Delete stack.',
operations=[
{
'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}',
'method': 'DELETE'
}
- ]
+ ],
+ deprecated_rule=deprecated_delete
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'detail',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='List stacks in detail.',
operations=[
{
'path': '/v1/{tenant_id}/stacks',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_detail
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'export',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Export stack.',
operations=[
{
@@ -72,11 +268,13 @@ stacks_policies = [
'export',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_export
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'generate_template',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Generate stack template.',
operations=[
{
@@ -84,55 +282,65 @@ stacks_policies = [
'template',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_generate_template
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'global_index',
- check_str=base.RULE_DENY_EVERYBODY,
+ check_str=base.SYSTEM_READER,
+ scope_types=['system', 'project'],
description='List stacks globally.',
operations=[
{
'path': '/v1/{tenant_id}/stacks',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_global_index
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='List stacks.',
operations=[
{
'path': '/v1/{tenant_id}/stacks',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_index
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'list_resource_types',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='List resource types.',
operations=[
{
'path': '/v1/{tenant_id}/resource_types',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_list_resource_types
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'list_template_versions',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='List template versions.',
operations=[
{
'path': '/v1/{tenant_id}/template_versions',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_list_template_versions
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'list_template_functions',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='List template functions.',
operations=[
{
@@ -140,55 +348,65 @@ stacks_policies = [
'{template_version}/functions',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_list_template_functions
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'lookup',
- check_str=base.RULE_ALLOW_EVERYBODY,
+ check_str=base.SYSTEM_OR_PROJECT_READER_OR_STACK_USER,
+ scope_types=['system', 'project'],
description='Find stack.',
operations=[
{
'path': '/v1/{tenant_id}/stacks/{stack_identity}',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_lookup
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'preview',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Preview stack.',
operations=[
{
'path': '/v1/{tenant_id}/stacks/preview',
'method': 'POST'
}
- ]
+ ],
+ deprecated_rule=deprecated_preview
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'resource_schema',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Show resource type schema.',
operations=[
{
'path': '/v1/{tenant_id}/resource_types/{type_name}',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_resource_schema
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Show stack.',
operations=[
{
'path': '/v1/{tenant_id}/stacks/{stack_identity}',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_show
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'template',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Get stack template.',
operations=[
{
@@ -196,11 +414,13 @@ stacks_policies = [
'template',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_template
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'environment',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Get stack environment.',
operations=[
{
@@ -208,11 +428,13 @@ stacks_policies = [
'environment',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_environment
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'files',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Get stack files.',
operations=[
{
@@ -220,33 +442,39 @@ stacks_policies = [
'files',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_files
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Update stack.',
operations=[
{
'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}',
'method': 'PUT'
}
- ]
+ ],
+ deprecated_rule=deprecated_update
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update_patch',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Update stack (PATCH).',
operations=[
{
'path': '/v1/{tenant_id}/stacks/{stack_name}/{stack_id}',
'method': 'PATCH'
}
- ]
+ ],
+ deprecated_rule=deprecated_update_patch
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'preview_update',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Preview update stack.',
operations=[
{
@@ -254,11 +482,13 @@ stacks_policies = [
'preview',
'method': 'PUT'
}
- ]
+ ],
+ deprecated_rule=deprecated_preview_update
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'preview_update_patch',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Preview update stack (PATCH).',
operations=[
{
@@ -266,22 +496,26 @@ stacks_policies = [
'preview',
'method': 'PATCH'
}
- ]
+ ],
+ deprecated_rule=deprecated_preview_update_patch
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'validate_template',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Validate template.',
operations=[
{
'path': '/v1/{tenant_id}/validate',
'method': 'POST'
}
- ]
+ ],
+ deprecated_rule=deprecated_validate_template
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'snapshot',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Snapshot Stack.',
operations=[
{
@@ -289,11 +523,13 @@ stacks_policies = [
'snapshots',
'method': 'POST'
}
- ]
+ ],
+ deprecated_rule=deprecated_snapshot
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show_snapshot',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Show snapshot.',
operations=[
{
@@ -301,11 +537,13 @@ stacks_policies = [
'snapshots/{snapshot_id}',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_show_snapshot
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete_snapshot',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Delete snapshot.',
operations=[
{
@@ -313,11 +551,13 @@ stacks_policies = [
'snapshots/{snapshot_id}',
'method': 'DELETE'
}
- ]
+ ],
+ deprecated_rule=deprecated_delete_snapshot
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'list_snapshots',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='List snapshots.',
operations=[
{
@@ -325,11 +565,13 @@ stacks_policies = [
'snapshots',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_list_snapshots
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'restore_snapshot',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
+ scope_types=['system', 'project'],
description='Restore snapshot.',
operations=[
{
@@ -337,11 +579,13 @@ stacks_policies = [
'snapshots/{snapshot_id}/restore',
'method': 'POST'
}
- ]
+ ],
+ deprecated_rule=deprecated_restore_snapshot
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'list_outputs',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='List outputs.',
operations=[
{
@@ -349,11 +593,13 @@ stacks_policies = [
'outputs',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_list_outputs
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show_output',
- check_str=base.RULE_DENY_STACK_USER,
+ check_str=base.SYSTEM_OR_PROJECT_READER,
+ scope_types=['system', 'project'],
description='Show outputs.',
operations=[
{
@@ -361,7 +607,8 @@ stacks_policies = [
'outputs/{output_key}',
'method': 'GET'
}
- ]
+ ],
+ deprecated_rule=deprecated_show_output
)
]
diff --git a/heat/scaling/cooldown.py b/heat/scaling/cooldown.py
index 452d64ada..df3dc634f 100644
--- a/heat/scaling/cooldown.py
+++ b/heat/scaling/cooldown.py
@@ -18,7 +18,6 @@ from heat.common.i18n import _
from heat.engine import resource
from oslo_log import log as logging
from oslo_utils import timeutils
-import six
LOG = logging.getLogger(__name__)
@@ -48,19 +47,19 @@ class CooldownMixin(object):
# Note: this is for supporting old version cooldown checking
metadata.pop('scaling_in_progress', None)
if metadata and cooldown != 0:
- last_adjust = next(six.iterkeys(metadata))
+ last_adjust = next(iter(metadata.keys()))
if not timeutils.is_older_than(last_adjust, cooldown):
self._log_and_raise_no_action(cooldown)
elif 'cooldown_end' in metadata:
- cooldown_end = next(six.iterkeys(metadata['cooldown_end']))
+ cooldown_end = next(iter(metadata['cooldown_end'].keys()))
now = timeutils.utcnow().isoformat()
if now < cooldown_end:
self._log_and_raise_no_action(cooldown)
elif cooldown != 0:
# Note: this is also for supporting old version cooldown checking
- last_adjust = next(six.iterkeys(metadata['cooldown']))
+ last_adjust = next(iter(metadata['cooldown'].keys()))
if not timeutils.is_older_than(last_adjust, cooldown):
self._log_and_raise_no_action(cooldown)
@@ -91,7 +90,7 @@ class CooldownMixin(object):
seconds=cooldown)).isoformat()
if 'cooldown_end' in metadata:
cooldown_end = max(
- next(six.iterkeys(metadata['cooldown_end'])),
+ next(iter(metadata['cooldown_end'].keys())),
cooldown_end)
metadata['cooldown_end'] = {cooldown_end: cooldown_reason}
metadata['scaling_in_progress'] = False
diff --git a/heat/tests/__init__.py b/heat/tests/__init__.py
index bac6de060..efeae0bf6 100644
--- a/heat/tests/__init__.py
+++ b/heat/tests/__init__.py
@@ -18,6 +18,7 @@ import oslo_i18n as i18n
def fake_translate_msgid(msgid, domain, desired_locale=None):
return msgid
+
i18n.enable_lazy()
# To ensure messages don't really get translated while running tests.
diff --git a/heat/tests/api/aws/test_api_ec2token.py b/heat/tests/api/aws/test_api_ec2token.py
index 135b57a17..af1305f60 100644
--- a/heat/tests/api/aws/test_api_ec2token.py
+++ b/heat/tests/api/aws/test_api_ec2token.py
@@ -13,12 +13,11 @@
import json
-import mock
+from unittest import mock
from oslo_config import cfg
from oslo_utils import importutils
import requests
-import six
from heat.api.aws import ec2token
from heat.api.aws import exception
@@ -520,7 +519,7 @@ class Ec2TokenTest(common.HeatTestCase):
ex = self.assertRaises(exception.HeatInternalFailureError,
ec2.__call__, dummy_req)
- self.assertEqual('Service misconfigured', six.text_type(ex))
+ self.assertEqual('Service misconfigured', str(ex))
def test_call_ok_auth_uri_ec2authtoken(self):
dummy_url = 'http://123:5000/v2.0'
diff --git a/heat/tests/api/cfn/test_api_cfn_v1.py b/heat/tests/api/cfn/test_api_cfn_v1.py
index 7b444f4dd..88d3764c6 100644
--- a/heat/tests/api/cfn/test_api_cfn_v1.py
+++ b/heat/tests/api/cfn/test_api_cfn_v1.py
@@ -13,10 +13,9 @@
import json
import os
+from unittest import mock
-import mock
from oslo_config import fixture as config_fixture
-import six
from heat.api.aws import exception
import heat.api.cfn.v1.stacks as stacks
@@ -1210,7 +1209,7 @@ class CfnStackControllerTest(common.HeatTestCase):
expected = {'DescribeStackEventsResponse':
{'DescribeStackEventsResult':
{'StackEvents':
- [{'EventId': six.text_type(event_id),
+ [{'EventId': str(event_id),
'StackId': u'arn:openstack:heat::t:stacks/wordpress/6',
'ResourceStatus': u'TEST_IN_PROGRESS',
'ResourceType': u'AWS::EC2::Instance',
diff --git a/heat/tests/api/openstack_v1/test_actions.py b/heat/tests/api/openstack_v1/test_actions.py
index 98e8f93a1..4910bf46a 100644
--- a/heat/tests/api/openstack_v1/test_actions.py
+++ b/heat/tests/api/openstack_v1/test_actions.py
@@ -12,9 +12,8 @@
# under the License.
import json
+from unittest import mock
-import mock
-import six
import webob.exc
import heat.api.middleware.fault as fault
@@ -45,7 +44,7 @@ class ActionControllerTest(tools.ControllerTest, common.HeatTestCase):
self.controller = actions.ActionController(options=cfgopts)
def test_action_suspend(self, mock_enforce):
- self._mock_enforce_setup(mock_enforce, 'action', True)
+ self._mock_enforce_setup(mock_enforce, 'suspend', True)
stack_identity = identifier.HeatIdentifier(self.tenant,
'wordpress', '1')
body = {'suspend': None}
@@ -67,7 +66,7 @@ class ActionControllerTest(tools.ControllerTest, common.HeatTestCase):
)
def test_action_resume(self, mock_enforce):
- self._mock_enforce_setup(mock_enforce, 'action', True)
+ self._mock_enforce_setup(mock_enforce, 'resume', True)
stack_identity = identifier.HeatIdentifier(self.tenant,
'wordpress', '1')
body = {'resume': None}
@@ -88,14 +87,34 @@ class ActionControllerTest(tools.ControllerTest, common.HeatTestCase):
('stack_resume', {'stack_identity': stack_identity})
)
+ def test_action_check(self, mock_enforce):
+ self._mock_enforce_setup(mock_enforce, 'check', True)
+ stack_identity = identifier.HeatIdentifier(self.tenant,
+ 'wordpress', '1')
+ body = {'check': None}
+ req = self._post(stack_identity._tenant_path() + '/actions',
+ data=json.dumps(body))
+
+ mock_call = self.patchobject(rpc_client.EngineClient, 'call',
+ return_value=None)
+
+ result = self.controller.action(req, tenant_id=self.tenant,
+ stack_name=stack_identity.stack_name,
+ stack_id=stack_identity.stack_id,
+ body=body)
+ self.assertIsNone(result)
+
+ mock_call.assert_called_once_with(
+ req.context,
+ ('stack_check', {'stack_identity': stack_identity})
+ )
+
def _test_action_cancel_update(self, mock_enforce, with_rollback=True):
- self._mock_enforce_setup(mock_enforce, 'action', True)
+ act = 'cancel_update' if with_rollback else 'cancel_without_rollback'
+ self._mock_enforce_setup(mock_enforce, act, True)
stack_identity = identifier.HeatIdentifier(self.tenant,
'wordpress', '1')
- if with_rollback:
- body = {'cancel_update': None}
- else:
- body = {'cancel_without_rollback': None}
+ body = {act: None}
req = self._post(stack_identity._tenant_path() + '/actions',
data=json.dumps(body))
@@ -119,7 +138,6 @@ class ActionControllerTest(tools.ControllerTest, common.HeatTestCase):
self._test_action_cancel_update(mock_enforce, False)
def test_action_badaction(self, mock_enforce):
- self._mock_enforce_setup(mock_enforce, 'action', True)
stack_identity = identifier.HeatIdentifier(self.tenant,
'wordpress', '1')
body = {'notallowed': None}
@@ -133,7 +151,6 @@ class ActionControllerTest(tools.ControllerTest, common.HeatTestCase):
body=body)
def test_action_badaction_empty(self, mock_enforce):
- self._mock_enforce_setup(mock_enforce, 'action', True)
stack_identity = identifier.HeatIdentifier(self.tenant,
'wordpress', '1')
body = {}
@@ -147,7 +164,6 @@ class ActionControllerTest(tools.ControllerTest, common.HeatTestCase):
body=body)
def test_action_badaction_multiple(self, mock_enforce):
- self._mock_enforce_setup(mock_enforce, 'action', True)
stack_identity = identifier.HeatIdentifier(self.tenant,
'wordpress', '1')
body = {'one': None, 'two': None}
@@ -161,7 +177,7 @@ class ActionControllerTest(tools.ControllerTest, common.HeatTestCase):
body=body)
def test_action_rmt_aterr(self, mock_enforce):
- self._mock_enforce_setup(mock_enforce, 'action', True)
+ self._mock_enforce_setup(mock_enforce, 'suspend', True)
stack_identity = identifier.HeatIdentifier(self.tenant,
'wordpress', '1')
body = {'suspend': None}
@@ -189,7 +205,7 @@ class ActionControllerTest(tools.ControllerTest, common.HeatTestCase):
)
def test_action_err_denied_policy(self, mock_enforce):
- self._mock_enforce_setup(mock_enforce, 'action', False)
+ self._mock_enforce_setup(mock_enforce, 'suspend', False)
stack_identity = identifier.HeatIdentifier(self.tenant,
'wordpress', '1')
body = {'suspend': None}
@@ -204,10 +220,9 @@ class ActionControllerTest(tools.ControllerTest, common.HeatTestCase):
stack_id=stack_identity.stack_id,
body=body)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_action_badaction_ise(self, mock_enforce):
- self._mock_enforce_setup(mock_enforce, 'action', True)
stack_identity = identifier.HeatIdentifier(self.tenant,
'wordpress', '1')
body = {'oops': None}
diff --git a/heat/tests/api/openstack_v1/test_build_info.py b/heat/tests/api/openstack_v1/test_build_info.py
index c7fb45043..01cd9e9b8 100644
--- a/heat/tests/api/openstack_v1/test_build_info.py
+++ b/heat/tests/api/openstack_v1/test_build_info.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
import heat.api.middleware.fault as fault
import heat.api.openstack.v1.build_info as build_info
@@ -72,4 +71,4 @@ class BuildInfoControllerTest(tools.ControllerTest, common.HeatTestCase):
self.controller.build_info,
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
diff --git a/heat/tests/api/openstack_v1/test_events.py b/heat/tests/api/openstack_v1/test_events.py
index c3a0b54ab..22e0eecd9 100644
--- a/heat/tests/api/openstack_v1/test_events.py
+++ b/heat/tests/api/openstack_v1/test_events.py
@@ -11,8 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
+
import webob.exc
import heat.api.middleware.fault as fault
@@ -341,7 +341,7 @@ class EventControllerTest(tools.ControllerTest, common.HeatTestCase):
stack_id=stack_identity.stack_id)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_index_resource_nonexist(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
@@ -374,7 +374,7 @@ class EventControllerTest(tools.ControllerTest, common.HeatTestCase):
)
@mock.patch.object(rpc_client.EngineClient, 'call')
- def test_index_whitelists_pagination_params(self, mock_call, mock_enforce):
+ def test_index_bogus_pagination_param(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'limit': 10,
@@ -424,11 +424,11 @@ class EventControllerTest(tools.ControllerTest, common.HeatTestCase):
stack_name=sid.stack_name,
stack_id=sid.stack_id)
self.assertEqual("Only integer is acceptable by 'limit'.",
- six.text_type(ex))
+ str(ex))
self.assertFalse(mock_call.called)
@mock.patch.object(rpc_client.EngineClient, 'call')
- def test_index_whitelist_filter_params(self, mock_call, mock_enforce):
+ def test_index_bogus_filter_param(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'resource_status': 'COMPLETE',
@@ -631,7 +631,7 @@ class EventControllerTest(tools.ControllerTest, common.HeatTestCase):
event_id=event_id)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_show_multiple_resource_names(self, mock_call, mock_enforce):
diff --git a/heat/tests/api/openstack_v1/test_resources.py b/heat/tests/api/openstack_v1/test_resources.py
index c8e394578..63426a3ab 100644
--- a/heat/tests/api/openstack_v1/test_resources.py
+++ b/heat/tests/api/openstack_v1/test_resources.py
@@ -11,8 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
+
import webob.exc
import heat.api.middleware.fault as fault
@@ -147,8 +147,8 @@ class ResourceControllerTest(tools.ControllerTest, common.HeatTestCase):
stack_id=stack_identity.stack_id)
self.assertIn("Invalid filter parameters %s" %
- [six.text_type('invalid_key')],
- six.text_type(ex))
+ [str('invalid_key')],
+ str(ex))
self.assertFalse(mock_call.called)
def test_index_nested_depth(self, mock_enforce):
@@ -192,7 +192,7 @@ class ResourceControllerTest(tools.ControllerTest, common.HeatTestCase):
stack_id=stack_identity.stack_id)
self.assertEqual("Only integer is acceptable by 'nested_depth'.",
- six.text_type(ex))
+ str(ex))
self.assertFalse(mock_call.called)
def test_index_denied_policy(self, mock_enforce):
@@ -213,7 +213,7 @@ class ResourceControllerTest(tools.ControllerTest, common.HeatTestCase):
stack_id=stack_identity.stack_id)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_index_detail(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
@@ -562,7 +562,7 @@ class ResourceControllerTest(tools.ControllerTest, common.HeatTestCase):
resource_name=res_name)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_metadata_show(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'metadata', True)
@@ -693,7 +693,7 @@ class ResourceControllerTest(tools.ControllerTest, common.HeatTestCase):
resource_name=res_name)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_signal(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'signal', True)
@@ -808,9 +808,9 @@ class ResourceControllerTest(tools.ControllerTest, common.HeatTestCase):
resource_name=res_name,
body=body)
- self.assertIn(expected, six.text_type(actual))
- self.assertIn('invalid_key1', six.text_type(actual))
- self.assertIn('invalid_key2', six.text_type(actual))
+ self.assertIn(expected, str(actual))
+ self.assertIn('invalid_key1', str(actual))
+ self.assertIn('invalid_key2', str(actual))
mock_call.assert_not_called()
def test_mark_unhealthy_with_invalid_value(self, mock_enforce):
@@ -837,7 +837,7 @@ class ResourceControllerTest(tools.ControllerTest, common.HeatTestCase):
resource_name=res_name,
body=body)
- self.assertIn(expected, six.text_type(actual))
+ self.assertIn(expected, str(actual))
mock_call.assert_not_called()
def test_mark_unhealthy_without_mark_unhealthy_key(self, mock_enforce):
@@ -863,5 +863,5 @@ class ResourceControllerTest(tools.ControllerTest, common.HeatTestCase):
resource_name=res_name,
body=body)
- self.assertIn(expected, six.text_type(actual))
+ self.assertIn(expected, str(actual))
mock_call.assert_not_called()
diff --git a/heat/tests/api/openstack_v1/test_services.py b/heat/tests/api/openstack_v1/test_services.py
index 1b078d013..ef659ddcb 100644
--- a/heat/tests/api/openstack_v1/test_services.py
+++ b/heat/tests/api/openstack_v1/test_services.py
@@ -11,7 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_messaging import exceptions
import webob.exc
diff --git a/heat/tests/api/openstack_v1/test_software_configs.py b/heat/tests/api/openstack_v1/test_software_configs.py
index aa644cf23..c1516eda0 100644
--- a/heat/tests/api/openstack_v1/test_software_configs.py
+++ b/heat/tests/api/openstack_v1/test_software_configs.py
@@ -12,8 +12,8 @@
# under the License.
import json
+from unittest import mock
-import mock
import webob.exc
import heat.api.middleware.fault as fault
diff --git a/heat/tests/api/openstack_v1/test_software_deployments.py b/heat/tests/api/openstack_v1/test_software_deployments.py
index 9f4023a27..de91eab80 100644
--- a/heat/tests/api/openstack_v1/test_software_deployments.py
+++ b/heat/tests/api/openstack_v1/test_software_deployments.py
@@ -12,8 +12,8 @@
# under the License.
import json
+from unittest import mock
-import mock
import webob.exc
import heat.api.middleware.fault as fault
@@ -48,8 +48,8 @@ class SoftwareDeploymentControllerTest(tools.ControllerTest,
resp = self.controller.index(req, tenant_id=self.tenant)
self.assertEqual(
{'software_deployments': []}, resp)
- whitelist = mock_call.call_args[1]
- self.assertEqual({}, whitelist)
+ params = mock_call.call_args[1]
+ self.assertEqual({}, params)
server_id = 'fb322564-7927-473d-8aad-68ae7fbf2abf'
req = self._get('/software_deployments', {'server_id': server_id})
with mock.patch.object(
@@ -59,8 +59,8 @@ class SoftwareDeploymentControllerTest(tools.ControllerTest,
resp = self.controller.index(req, tenant_id=self.tenant)
self.assertEqual(
{'software_deployments': []}, resp)
- whitelist = mock_call.call_args[1]
- self.assertEqual({'server_id': server_id}, whitelist)
+ params = mock_call.call_args[1]
+ self.assertEqual({'server_id': server_id}, params)
@mock.patch.object(policy.Enforcer, 'enforce')
def test_show(self, mock_enforce):
diff --git a/heat/tests/api/openstack_v1/test_stacks.py b/heat/tests/api/openstack_v1/test_stacks.py
index b0c51799a..48d1ebd61 100644
--- a/heat/tests/api/openstack_v1/test_stacks.py
+++ b/heat/tests/api/openstack_v1/test_stacks.py
@@ -12,10 +12,9 @@
# under the License.
import json
+from unittest import mock
-import mock
from oslo_config import cfg
-import six
import webob.exc
import heat.api.middleware.fault as fault
@@ -61,7 +60,7 @@ parameters:
template_format.parse(bad_temp)
parse_ex = self.assertRaises(webob.exc.HTTPBadRequest, generate_error)
- self.assertIn('foo', six.text_type(parse_ex))
+ self.assertIn('foo', str(parse_ex))
def test_stack_name(self):
body = {'stack_name': 'wibble'}
@@ -148,7 +147,7 @@ blarg: wibble
'bytes) exceeds maximum allowed size (%(limit)s bytes).') % {
'actual_len': len(str(template)),
'limit': cfg.CONF.max_template_size}
- self.assertEqual(msg, six.text_type(error))
+ self.assertEqual(msg, str(error))
def test_parameters(self):
params = {'foo': 'bar', 'blarg': 'wibble'}
@@ -314,7 +313,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
req.context, ('list_stacks', default_args), version='1.33')
@mock.patch.object(rpc_client.EngineClient, 'call')
- def test_index_whitelists_pagination_params(self, mock_call, mock_enforce):
+ def test_index_bogus_pagination_param(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'limit': 10,
@@ -348,11 +347,11 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
self.controller.index, req,
tenant_id=self.tenant)
self.assertEqual("Only integer is acceptable by 'limit'.",
- six.text_type(ex))
+ str(ex))
self.assertFalse(mock_call.called)
@mock.patch.object(rpc_client.EngineClient, 'call')
- def test_index_whitelist_filter_params(self, mock_call, mock_enforce):
+ def test_index_bogus_filter_param(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'id': 'fake id',
@@ -382,7 +381,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
'parent': 'fake parent',
'stack_user_project_id': 'fake project id',
'tags': 'fake tags',
- 'barlog': 'you shall not pass!'
+ 'balrog': 'you shall not pass!'
}
req = self._get('/stacks', params=params)
mock_call.return_value = []
@@ -404,7 +403,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
for key in ('stack_identity', 'creation_time', 'updated_time',
'deletion_time', 'notification_topics', 'description',
'template_description', 'parameters', 'outputs',
- 'capabilities', 'tags', 'barlog'):
+ 'capabilities', 'tags', 'balrog'):
self.assertNotIn(key, filters)
def test_index_returns_stack_count_if_with_count_is_true(
@@ -444,7 +443,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
req, tenant_id=self.tenant)
excepted = ('Unrecognized value "invalid_value" for "with_count", '
'acceptable values are: true, false')
- self.assertIn(excepted, six.text_type(exc))
+ self.assertIn(excepted, str(exc))
@mock.patch.object(rpc_client.EngineClient, 'count_stacks')
def test_index_doesnt_break_with_old_engine(self, mock_count_stacks,
@@ -472,7 +471,9 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
mock_enforce.assert_called_with(action='global_index',
scope=self.controller.REQUEST_SCOPE,
is_registered_policy=True,
- context=self.context)
+ context=self.context,
+ target={"project_id": self.tenant}
+ )
def test_global_index_uses_admin_context(self, mock_enforce):
rpc_client = self.controller.rpc_client
@@ -701,7 +702,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_rmt_interr(self, mock_call, mock_enforce):
@@ -900,7 +901,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
tenant_id=self.tenant, body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
- six.text_type(ex))
+ str(ex))
mock_call.assert_not_called()
def test_adopt_error(self, mock_enforce):
@@ -1106,7 +1107,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
tenant_id=self.tenant, body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
- six.text_type(ex))
+ str(ex))
mock_call.assert_not_called()
def test_create_err_denied_policy(self, mock_enforce):
@@ -1127,7 +1128,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
body=body)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_create_err_engine(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
@@ -1365,7 +1366,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
self.assertEqual('ImmutableParameterModified',
resp.json['error']['type'])
self.assertIn("The following parameters are immutable",
- six.text_type(resp.json['error']['message']))
+ str(resp.json['error']['message']))
mock_call.assert_called_once_with(
req.context,
@@ -1452,7 +1453,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
stack_name=stack_name)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_lookup_resource(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
@@ -1514,7 +1515,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
path='resources')
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_show(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
@@ -1676,7 +1677,9 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
version='1.20'
)
- def test_show_invalidtenant(self, mock_enforce):
+ # the test_show_invalidtenant for stacks is now dealt with srbac
+ # more generic approach
+ def test_deprecated_show_invalidtenant(self, mock_enforce):
identity = identifier.HeatIdentifier('wibble', 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
@@ -1688,7 +1691,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_show_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', False)
@@ -1703,7 +1706,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_get_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', True)
@@ -1780,7 +1783,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_get_template_err_notfound(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', True)
@@ -1954,7 +1957,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
stack_id=identity.stack_id,
body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
- six.text_type(ex))
+ str(ex))
self.assertFalse(mock_call.called)
def test_update_err_denied_policy(self, mock_enforce):
@@ -1978,7 +1981,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
body=body)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_update_with_existing_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
@@ -2167,7 +2170,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
stack_id=identity.stack_id,
body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
- six.text_type(ex))
+ str(ex))
self.assertFalse(mock_call.called)
def test_update_with_existing_and_default_parameters(
@@ -2295,7 +2298,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_export(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'export', True)
@@ -2354,7 +2357,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_delete_bad_name(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', True)
@@ -2468,7 +2471,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
req, tenant_id=self.tenant, body=body)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_list_resource_types(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', True)
@@ -2534,7 +2537,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_list_outputs(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_outputs', True)
@@ -2732,7 +2735,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
req, tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
def test_generate_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
@@ -2766,7 +2769,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
type_name='TEST_TYPE')
self.assertIn('Template type is not supported: Invalid template '
'type "invalid", valid types are: cfn, hot.',
- six.text_type(ex))
+ str(ex))
self.assertFalse(mock_call.called)
def test_generate_template_not_found(self, mock_enforce):
@@ -2800,7 +2803,7 @@ class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
req, tenant_id=self.tenant,
type_name='blah')
self.assertEqual(403, resp.status_int)
- self.assertIn('403 Forbidden', six.text_type(resp))
+ self.assertIn('403 Forbidden', str(resp))
class StackSerializerTest(common.HeatTestCase):
diff --git a/heat/tests/api/openstack_v1/test_util.py b/heat/tests/api/openstack_v1/test_util.py
index ff286a596..93930eccc 100644
--- a/heat/tests/api/openstack_v1/test_util.py
+++ b/heat/tests/api/openstack_v1/test_util.py
@@ -11,7 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from webob import exc
from heat.api.openstack.v1 import util
@@ -27,60 +28,60 @@ class TestGetAllowedParams(common.HeatTestCase):
req = wsgi.Request({})
self.params = req.params.copy()
self.params.add('foo', 'foo value')
- self.whitelist = {'foo': util.PARAM_TYPE_SINGLE}
+ self.param_types = {'foo': util.PARAM_TYPE_SINGLE}
def test_returns_empty_dict(self):
- self.whitelist = {}
+ self.param_types = {}
- result = util.get_allowed_params(self.params, self.whitelist)
+ result = util.get_allowed_params(self.params, self.param_types)
self.assertEqual({}, result)
- def test_only_adds_whitelisted_params_if_param_exists(self):
- self.whitelist = {'foo': util.PARAM_TYPE_SINGLE}
+ def test_only_adds_allowed_param_if_param_exists(self):
+ self.param_types = {'foo': util.PARAM_TYPE_SINGLE}
self.params.clear()
- result = util.get_allowed_params(self.params, self.whitelist)
+ result = util.get_allowed_params(self.params, self.param_types)
self.assertNotIn('foo', result)
- def test_returns_only_whitelisted_params(self):
+ def test_returns_only_allowed_params(self):
self.params.add('bar', 'bar value')
- result = util.get_allowed_params(self.params, self.whitelist)
+ result = util.get_allowed_params(self.params, self.param_types)
self.assertIn('foo', result)
self.assertNotIn('bar', result)
def test_handles_single_value_params(self):
- result = util.get_allowed_params(self.params, self.whitelist)
+ result = util.get_allowed_params(self.params, self.param_types)
self.assertEqual('foo value', result['foo'])
def test_handles_multiple_value_params(self):
- self.whitelist = {'foo': util.PARAM_TYPE_MULTI}
+ self.param_types = {'foo': util.PARAM_TYPE_MULTI}
self.params.add('foo', 'foo value 2')
- result = util.get_allowed_params(self.params, self.whitelist)
+ result = util.get_allowed_params(self.params, self.param_types)
self.assertEqual(2, len(result['foo']))
self.assertIn('foo value', result['foo'])
self.assertIn('foo value 2', result['foo'])
def test_handles_mixed_value_param_with_multiple_entries(self):
- self.whitelist = {'foo': util.PARAM_TYPE_MIXED}
+ self.param_types = {'foo': util.PARAM_TYPE_MIXED}
self.params.add('foo', 'foo value 2')
- result = util.get_allowed_params(self.params, self.whitelist)
+ result = util.get_allowed_params(self.params, self.param_types)
self.assertEqual(2, len(result['foo']))
self.assertIn('foo value', result['foo'])
self.assertIn('foo value 2', result['foo'])
def test_handles_mixed_value_param_with_single_entry(self):
- self.whitelist = {'foo': util.PARAM_TYPE_MIXED}
+ self.param_types = {'foo': util.PARAM_TYPE_MIXED}
- result = util.get_allowed_params(self.params, self.whitelist)
+ result = util.get_allowed_params(self.params, self.param_types)
self.assertEqual('foo value', result['foo'])
- def test_bogus_whitelist_items(self):
- self.whitelist = {'foo': 'blah'}
+ def test_bogus_param_type(self):
+ self.param_types = {'foo': 'blah'}
self.assertRaises(AssertionError, util.get_allowed_params,
- self.params, self.whitelist)
+ self.params, self.param_types)
class TestPolicyEnforce(common.HeatTestCase):
@@ -93,7 +94,7 @@ class TestPolicyEnforce(common.HeatTestCase):
class DummyController(object):
REQUEST_SCOPE = 'test'
- @util.policy_enforce
+ @util.registered_policy_enforce
def an_action(self, req):
return 'woot'
diff --git a/heat/tests/api/openstack_v1/test_views_common.py b/heat/tests/api/openstack_v1/test_views_common.py
index 9b69efb8f..df46178fc 100644
--- a/heat/tests/api/openstack_v1/test_views_common.py
+++ b/heat/tests/api/openstack_v1/test_views_common.py
@@ -11,8 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-from six.moves.urllib import parse as urlparse
+from unittest import mock
+from urllib import parse as urlparse
from heat.api.openstack.v1.views import views_common
from heat.tests import common
diff --git a/heat/tests/api/openstack_v1/test_views_stacks_view.py b/heat/tests/api/openstack_v1/test_views_stacks_view.py
index 5f71c0fee..00c56a564 100644
--- a/heat/tests/api/openstack_v1/test_views_stacks_view.py
+++ b/heat/tests/api/openstack_v1/test_views_stacks_view.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.api.openstack.v1.views import stacks_view
from heat.common import identifier
diff --git a/heat/tests/api/openstack_v1/tools.py b/heat/tests/api/openstack_v1/tools.py
index 3a27b8697..0f5247a58 100644
--- a/heat/tests/api/openstack_v1/tools.py
+++ b/heat/tests/api/openstack_v1/tools.py
@@ -11,11 +11,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_log import log
from oslo_messaging._drivers import common as rpc_common
-import six
import webob.exc
from heat.common import wsgi
@@ -94,7 +94,7 @@ class ControllerTest(object):
req = wsgi.Request(environ)
req.context = utils.dummy_context('api_test_user', self.tenant)
self.context = req.context
- req.body = six.b(data)
+ req.body = data.encode('latin-1')
return req
def _post(self, path, data, content_type='application/json'):
@@ -119,6 +119,7 @@ class ControllerTest(object):
action=self.action,
context=self.context,
scope=self.controller.REQUEST_SCOPE,
+ target={'project_id': self.tenant},
is_registered_policy=mock.ANY
)
self.assertEqual(self.expected_request_count,
diff --git a/heat/tests/api/test_wsgi.py b/heat/tests/api/test_wsgi.py
index 4780d8fbe..97ec3b3eb 100644
--- a/heat/tests/api/test_wsgi.py
+++ b/heat/tests/api/test_wsgi.py
@@ -15,15 +15,14 @@
# under the License.
+from unittest import mock
+
import fixtures
import json
-import mock
-import six
+from oslo_config import cfg
import socket
import webob
-from oslo_config import cfg
-
from heat.api.aws import exception as aws_exception
from heat.common import exception
from heat.common import wsgi
@@ -214,7 +213,7 @@ class ResourceTest(common.HeatTestCase):
None)
e = self.assertRaises(exception.HTTPExceptionDisguise,
resource, request)
- self.assertEqual(message_es, six.text_type(e.exc))
+ self.assertEqual(message_es, str(e.exc))
class ResourceExceptionHandlingTest(common.HeatTestCase):
@@ -235,11 +234,11 @@ class ResourceExceptionHandlingTest(common.HeatTestCase):
def test_resource_client_exceptions_dont_log_error(self):
class Controller(object):
- def __init__(self, excpetion_to_raise):
- self.excpetion_to_raise = excpetion_to_raise
+ def __init__(self, exception_to_raise):
+ self.exception_to_raise = exception_to_raise
def raise_exception(self, req, body):
- raise self.excpetion_to_raise()
+ raise self.exception_to_raise()
actions = {'action': 'raise_exception', 'body': 'data'}
env = {'wsgiorg.routing_args': [None, actions]}
@@ -250,7 +249,7 @@ class ResourceExceptionHandlingTest(common.HeatTestCase):
None)
e = self.assertRaises(self.exception_catch, resource, request)
e = e.exc if hasattr(e, 'exc') else e
- self.assertNotIn(six.text_type(e), self.LOG.output)
+ self.assertNotIn(str(e), self.LOG.output)
class JSONRequestDeserializerTest(common.HeatTestCase):
@@ -387,7 +386,7 @@ class JSONRequestDeserializerTest(common.HeatTestCase):
msg = ('Request limit exceeded: JSON body size '
'(%s bytes) exceeds maximum allowed size (%s bytes).' % (
len(body), cfg.CONF.max_json_body_size))
- self.assertEqual(msg, six.text_type(error))
+ self.assertEqual(msg, str(error))
class GetSocketTestCase(common.HeatTestCase):
@@ -442,10 +441,12 @@ class GetSocketTestCase(common.HeatTestCase):
wsgi.cfg.CONF.heat_api, 1234)
def test_get_socket_with_bind_problems(self):
+ err = wsgi.socket.error(
+ socket.errno.EADDRINUSE, 'Address already in use')
self.useFixture(fixtures.MonkeyPatch(
'heat.common.wsgi.eventlet.listen',
mock.Mock(side_effect=(
- [wsgi.socket.error(socket.errno.EADDRINUSE)] * 3 + [None]))))
+ [err] * 3 + [None]))))
self.useFixture(fixtures.MonkeyPatch(
'heat.common.wsgi.ssl.wrap_socket',
lambda *x, **y: None))
diff --git a/heat/tests/autoscaling/test_heat_scaling_group.py b/heat/tests/autoscaling/test_heat_scaling_group.py
index 8a107ec25..b91c33312 100644
--- a/heat/tests/autoscaling/test_heat_scaling_group.py
+++ b/heat/tests/autoscaling/test_heat_scaling_group.py
@@ -12,10 +12,9 @@
import datetime
import json
+from unittest import mock
-import mock
from oslo_utils import timeutils
-import six
from heat.common import exception
from heat.common import grouputils
@@ -499,7 +498,7 @@ class HeatScalingGroupAttrFallbackTest(common.HeatTestCase):
mock_members = self.patchobject(grouputils, 'get_members')
members = []
output = []
- for ip_ex in six.moves.range(1, 4):
+ for ip_ex in range(1, 4):
inst = mock.Mock()
inst.FnGetAtt.return_value = '2.1.3.%d' % ip_ex
output.append('2.1.3.%d' % ip_ex)
@@ -543,7 +542,7 @@ class HeatScalingGroupAttrFallbackTest(common.HeatTestCase):
mock_members = self.patchobject(grouputils, 'get_members')
members = []
output = {}
- for ip_ex in six.moves.range(1, 4):
+ for ip_ex in range(1, 4):
inst = mock.Mock()
inst.name = str(ip_ex)
inst.FnGetAtt.return_value = '2.1.3.%d' % ip_ex
@@ -559,7 +558,7 @@ class HeatScalingGroupAttrFallbackTest(common.HeatTestCase):
self.group.nested = mock.Mock()
members = []
output = []
- for ip_ex in six.moves.range(0, 2):
+ for ip_ex in range(0, 2):
inst = mock.Mock()
inst.name = 'ab'[ip_ex]
inst.FnGetAtt.return_value = '2.1.3.%d' % ip_ex
@@ -644,7 +643,7 @@ class RollingUpdatePolicyTest(common.HeatTestCase):
stack = utils.parse_stack(tmpl)
error = self.assertRaises(
exception.StackValidationFailed, stack.validate)
- self.assertIn("foo", six.text_type(error))
+ self.assertIn("foo", str(error))
def test_parse_with_bad_pausetime_in_update_policy(self):
tmpl = template_format.parse(asg_tmpl_with_default_updt_policy())
@@ -654,7 +653,7 @@ class RollingUpdatePolicyTest(common.HeatTestCase):
error = self.assertRaises(
exception.StackValidationFailed, stack.validate)
self.assertIn("could not convert string to float",
- six.text_type(error))
+ str(error))
class RollingUpdatePolicyDiffTest(common.HeatTestCase):
@@ -736,7 +735,7 @@ class IncorrectUpdatePolicyTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('Unknown Property AutoScalingRollingUpdate',
- six.text_type(exc))
+ str(exc))
def test_with_update_policy_inst_group(self):
t = template_format.parse(inline_templates.as_heat_template)
@@ -750,7 +749,7 @@ class IncorrectUpdatePolicyTest(common.HeatTestCase):
stack = utils.parse_stack(tmpl)
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
- self.assertIn('Unknown Property RollingUpdate', six.text_type(exc))
+ self.assertIn('Unknown Property RollingUpdate', str(exc))
class TestCooldownMixin(common.HeatTestCase):
diff --git a/heat/tests/autoscaling/test_heat_scaling_policy.py b/heat/tests/autoscaling/test_heat_scaling_policy.py
index 1ff733926..658426635 100644
--- a/heat/tests/autoscaling/test_heat_scaling_policy.py
+++ b/heat/tests/autoscaling/test_heat_scaling_policy.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
@@ -61,7 +60,7 @@ class TestAutoScalingPolicy(common.HeatTestCase):
stack.validate)
self.assertIn('min_adjustment_step property should only '
'be specified for adjustment_type with '
- 'value percent_change_in_capacity.', six.text_type(ex))
+ 'value percent_change_in_capacity.', str(ex))
def test_scaling_policy_bad_group(self):
t = template_format.parse(inline_templates.as_heat_template_bad_group)
@@ -70,7 +69,7 @@ class TestAutoScalingPolicy(common.HeatTestCase):
ex = self.assertRaises(exception.ResourceFailure, up_policy.signal)
self.assertIn('Alarm my-policy could '
- 'not find scaling group', six.text_type(ex))
+ 'not find scaling group', str(ex))
def test_scaling_policy_adjust_no_action(self):
t = template_format.parse(as_template)
@@ -183,7 +182,6 @@ class ScalingPolicyAttrTest(common.HeatTestCase):
self.assertEqual('Signature', args[1].split('=')[0])
self.assertEqual('SignatureMethod', args[2].split('=')[0])
self.assertEqual('SignatureVersion', args[3].split('=')[0])
- self.assertEqual('Timestamp', args[4].split('=')[0])
def test_signal_attribute(self):
heat_plugin = self.stack.clients.client_plugin('heat')
diff --git a/heat/tests/autoscaling/test_launch_config.py b/heat/tests/autoscaling/test_launch_config.py
index 7509564d1..4569982c8 100644
--- a/heat/tests/autoscaling/test_launch_config.py
+++ b/heat/tests/autoscaling/test_launch_config.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import short_id
@@ -111,7 +110,7 @@ class LaunchConfigurationTest(common.HeatTestCase):
rsrc.validate)
ex_msg = ('If without InstanceId, '
'ImageId and InstanceType are required.')
- self.assertIn(ex_msg, six.text_type(e))
+ self.assertIn(ex_msg, str(e))
def test_lc_validate_without_InstanceId_and_InstanceType(self):
t = template_format.parse(inline_templates.as_template)
@@ -126,7 +125,7 @@ class LaunchConfigurationTest(common.HeatTestCase):
rsrc.validate)
ex_msg = ('If without InstanceId, '
'ImageId and InstanceType are required.')
- self.assertIn(ex_msg, six.text_type(e))
+ self.assertIn(ex_msg, str(e))
def test_launch_config_create_with_instanceid_not_found(self):
t = template_format.parse(inline_templates.as_template)
@@ -148,7 +147,7 @@ class LaunchConfigurationTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
- self.assertIn(msg, six.text_type(exc))
+ self.assertIn(msg, str(exc))
def test_validate_BlockDeviceMappings_without_Ebs_property(self):
t = template_format.parse(inline_templates.as_template)
@@ -164,7 +163,7 @@ class LaunchConfigurationTest(common.HeatTestCase):
self.validate_launch_config, stack)
self.assertIn("Ebs is missing, this is required",
- six.text_type(e))
+ str(e))
def test_validate_BlockDeviceMappings_without_SnapshotId_property(self):
t = template_format.parse(inline_templates.as_template)
@@ -181,7 +180,7 @@ class LaunchConfigurationTest(common.HeatTestCase):
self.validate_launch_config, stack)
self.assertIn("SnapshotId is missing, this is required",
- six.text_type(e))
+ str(e))
def test_validate_BlockDeviceMappings_without_DeviceName_property(self):
t = template_format.parse(inline_templates.as_template)
@@ -201,4 +200,4 @@ class LaunchConfigurationTest(common.HeatTestCase):
'Property error: '
'Resources.LaunchConfig.Properties.BlockDeviceMappings[0]: '
'Property DeviceName not assigned')
- self.assertIn(excepted_error, six.text_type(e))
+ self.assertIn(excepted_error, str(e))
diff --git a/heat/tests/autoscaling/test_lbutils.py b/heat/tests/autoscaling/test_lbutils.py
index aae618ba7..82ef9cabe 100644
--- a/heat/tests/autoscaling/test_lbutils.py
+++ b/heat/tests/autoscaling/test_lbutils.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
@@ -124,4 +123,4 @@ class LBUtilsTest(common.HeatTestCase):
lbutils.reconfigure_loadbalancers,
[non_lb], id_list)
self.assertIn("Unsupported resource 'non_lb' in LoadBalancerNames",
- six.text_type(error))
+ str(error))
diff --git a/heat/tests/autoscaling/test_scaling_group.py b/heat/tests/autoscaling/test_scaling_group.py
index db23355a6..db2177a9f 100644
--- a/heat/tests/autoscaling/test_scaling_group.py
+++ b/heat/tests/autoscaling/test_scaling_group.py
@@ -13,10 +13,9 @@
import datetime
import json
+from unittest import mock
-import mock
from oslo_utils import timeutils
-import six
from heat.common import exception
from heat.common import grouputils
@@ -77,7 +76,7 @@ class TestAutoScalingGroupValidation(common.HeatTestCase):
stack, 'WebServerGroup')
expected_msg = "The size of AutoScalingGroup can not be less than zero"
- self.assertEqual(expected_msg, six.text_type(e))
+ self.assertEqual(expected_msg, str(e))
def test_invalid_max_size(self):
t = template_format.parse(as_template)
@@ -96,7 +95,7 @@ class TestAutoScalingGroupValidation(common.HeatTestCase):
stack, 'WebServerGroup')
expected_msg = "MinSize can not be greater than MaxSize"
- self.assertEqual(expected_msg, six.text_type(e))
+ self.assertEqual(expected_msg, str(e))
def test_invalid_desiredcapacity(self):
t = template_format.parse(as_template)
@@ -114,7 +113,7 @@ class TestAutoScalingGroupValidation(common.HeatTestCase):
stack, 'WebServerGroup')
expected_msg = "DesiredCapacity must be between MinSize and MaxSize"
- self.assertEqual(expected_msg, six.text_type(e))
+ self.assertEqual(expected_msg, str(e))
def test_invalid_desiredcapacity_zero(self):
t = template_format.parse(as_template)
@@ -133,7 +132,7 @@ class TestAutoScalingGroupValidation(common.HeatTestCase):
stack, 'WebServerGroup')
expected_msg = "DesiredCapacity must be between MinSize and MaxSize"
- self.assertEqual(expected_msg, six.text_type(e))
+ self.assertEqual(expected_msg, str(e))
def test_validate_without_InstanceId_and_LaunchConfigurationName(self):
t = template_format.parse(as_template)
@@ -146,7 +145,7 @@ class TestAutoScalingGroupValidation(common.HeatTestCase):
"must be provided.")
exc = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
def test_validate_with_InstanceId_and_LaunchConfigurationName(self):
t = template_format.parse(as_template)
@@ -158,7 +157,7 @@ class TestAutoScalingGroupValidation(common.HeatTestCase):
"must be provided.")
exc = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
def _stub_nova_server_get(self, not_found=False):
mock_server = mock.MagicMock()
@@ -207,7 +206,7 @@ class TestAutoScalingGroupValidation(common.HeatTestCase):
"not be found.")
exc = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
- self.assertIn(msg, six.text_type(exc))
+ self.assertIn(msg, str(exc))
class TestScalingGroupTags(common.HeatTestCase):
@@ -640,7 +639,7 @@ class RollingUpdatePolicyTest(common.HeatTestCase):
stack = utils.parse_stack(tmpl, params=inline_templates.as_params)
error = self.assertRaises(
exception.StackValidationFailed, stack.validate)
- self.assertIn("foo", six.text_type(error))
+ self.assertIn("foo", str(error))
def test_parse_with_bad_pausetime_in_update_policy(self):
tmpl = template_format.parse(asg_tmpl_with_default_updt_policy())
@@ -650,7 +649,7 @@ class RollingUpdatePolicyTest(common.HeatTestCase):
stack = utils.parse_stack(tmpl, params=inline_templates.as_params)
error = self.assertRaises(
exception.StackValidationFailed, stack.validate)
- self.assertIn("Only ISO 8601 duration format", six.text_type(error))
+ self.assertIn("Only ISO 8601 duration format", str(error))
class RollingUpdatePolicyDiffTest(common.HeatTestCase):
diff --git a/heat/tests/autoscaling/test_scaling_policy.py b/heat/tests/autoscaling/test_scaling_policy.py
index c87b9efed..239a93ed7 100644
--- a/heat/tests/autoscaling/test_scaling_policy.py
+++ b/heat/tests/autoscaling/test_scaling_policy.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
@@ -63,7 +62,7 @@ class TestAutoScalingPolicy(common.HeatTestCase):
self.policy.validate)
self.assertIn('MinAdjustmentStep property should only '
'be specified for AdjustmentType with '
- 'value PercentChangeInCapacity.', six.text_type(ex))
+ 'value PercentChangeInCapacity.', str(ex))
def test_scaling_policy_bad_group(self):
t = template_format.parse(inline_templates.as_template_bad_group)
@@ -74,7 +73,7 @@ class TestAutoScalingPolicy(common.HeatTestCase):
ex = self.assertRaises(exception.ResourceFailure, up_policy.signal)
self.assertIn('Alarm WebServerScaleUpPolicy could '
- 'not find scaling group', six.text_type(ex))
+ 'not find scaling group', str(ex))
def test_scaling_policy_adjust_no_action(self):
t = template_format.parse(as_template)
diff --git a/heat/tests/aws/test_eip.py b/heat/tests/aws/test_eip.py
index 7bebed736..4b6047d6c 100644
--- a/heat/tests/aws/test_eip.py
+++ b/heat/tests/aws/test_eip.py
@@ -12,11 +12,10 @@
# under the License.
import copy
+from unittest import mock
-import mock
from neutronclient.common import exceptions as q_exceptions
from neutronclient.v2_0 import client as neutronclient
-import six
from heat.common import exception
from heat.common import short_id
@@ -454,7 +453,7 @@ class AllocTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
- self.assertIn(expected, six.text_type(exc))
+ self.assertIn(expected, str(exc))
def mock_show_network(self):
vpc_name = utils.PhysName('test_stack', 'the_vpc')
@@ -672,7 +671,7 @@ class AllocTest(common.HeatTestCase):
rsrc.validate)
self.assertIn('At least one of the following properties '
'must be specified: InstanceId, NetworkInterfaceId',
- six.text_type(exc))
+ str(exc))
def test_delete_association_successful_if_create_failed(self):
server = self.fc.servers.list()[0]
diff --git a/heat/tests/aws/test_instance.py b/heat/tests/aws/test_instance.py
index 8ae162c35..500cb190d 100644
--- a/heat/tests/aws/test_instance.py
+++ b/heat/tests/aws/test_instance.py
@@ -12,11 +12,10 @@
# under the License.
import copy
+from unittest import mock
import uuid
-import mock
from neutronclient.v2_0 import client as neutronclient
-import six
from heat.common import exception
from heat.common import template_format
@@ -245,7 +244,7 @@ class InstancesTest(common.HeatTestCase):
self.assertIn("WebServer.Properties.Volumes[0].VolumeId: "
"Error validating value '1234': The Volume "
"(1234) could not be found.",
- six.text_type(exc))
+ str(exc))
mock_get_vol.assert_called_once_with('1234')
@@ -285,7 +284,7 @@ class InstancesTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
instance.validate)
self.assertIn("Ebs is missing, this is required",
- six.text_type(exc))
+ str(exc))
def test_validate_BlockDeviceMappings_without_SnapshotId_property(self):
stack_name = 'without_SnapshotId'
@@ -305,7 +304,7 @@ class InstancesTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
instance.validate)
self.assertIn("SnapshotId is missing, this is required",
- six.text_type(exc))
+ str(exc))
def test_validate_BlockDeviceMappings_without_DeviceName_property(self):
stack_name = 'without_DeviceName'
@@ -330,7 +329,7 @@ class InstancesTest(common.HeatTestCase):
'Property error: '
'Resources.WebServer.Properties.BlockDeviceMappings[0]: '
'Property DeviceName not assigned')
- self.assertIn(excepted_error, six.text_type(exc))
+ self.assertIn(excepted_error, str(exc))
def test_instance_create_with_image_id(self):
return_server = self.fc.servers.list()[1]
@@ -397,7 +396,7 @@ class InstancesTest(common.HeatTestCase):
"StackValidationFailed: resources.instance_create_image_err: "
"Property error: WebServer.Properties.ImageId: "
"Error validating value 'Slackware': No image matching Slackware.",
- six.text_type(error))
+ str(error))
def test_instance_create_duplicate_image_name_err(self):
stack_name = 'test_instance_create_image_name_err_stack'
@@ -427,7 +426,7 @@ class InstancesTest(common.HeatTestCase):
"Property error: WebServer.Properties.ImageId: "
"Error validating value 'CentOS 5.2': No image unique match "
"found for CentOS 5.2.",
- six.text_type(error))
+ str(error))
def test_instance_create_image_id_err(self):
stack_name = 'test_instance_create_image_id_err_stack'
@@ -454,7 +453,7 @@ class InstancesTest(common.HeatTestCase):
"StackValidationFailed: resources.instance_create_image_err: "
"Property error: WebServer.Properties.ImageId: "
"Error validating value '1': No image matching 1.",
- six.text_type(error))
+ str(error))
def test_handle_check(self):
(tmpl, stack) = self._setup_test_stack('test_instance_check_active')
@@ -480,7 +479,7 @@ class InstancesTest(common.HeatTestCase):
return_value=False)
exc = self.assertRaises(exception.Error, instance.handle_check)
- self.assertIn('foo', six.text_type(exc))
+ self.assertIn('foo', str(exc))
def test_instance_create_unexpected_status(self):
# checking via check_create_complete only so not to mock
@@ -497,7 +496,7 @@ class InstancesTest(common.HeatTestCase):
instance.check_create_complete,
(creator, None))
self.assertEqual('Instance is not active - Unknown status BOGUS '
- 'due to "Unknown"', six.text_type(e))
+ 'due to "Unknown"', str(e))
self.fc.servers.get.assert_called_once_with(instance.resource_id)
@@ -521,7 +520,7 @@ class InstancesTest(common.HeatTestCase):
(creator, None))
self.assertEqual(
'Went to status ERROR due to "Message: NoValidHost, Code: 500"',
- six.text_type(e))
+ str(e))
self.fc.servers.get.assert_called_once_with(instance.resource_id)
@@ -541,7 +540,7 @@ class InstancesTest(common.HeatTestCase):
(creator, None))
self.assertEqual(
'Went to status ERROR due to "Message: Unknown, Code: Unknown"',
- six.text_type(e))
+ str(e))
self.fc.servers.get.assert_called_once_with(instance.resource_id)
@@ -761,7 +760,7 @@ class InstancesTest(common.HeatTestCase):
self.assertEqual(
"Error: resources.ud_type_f: "
"Resizing to '2' failed, status 'ERROR'",
- six.text_type(error))
+ str(error))
self.assertEqual((instance.UPDATE, instance.FAILED), instance.state)
self.fc.servers.get.assert_called_with('1234')
@@ -1385,7 +1384,8 @@ class InstancesTest(common.HeatTestCase):
self.nclient.create_port.assert_called_with({'port': props})
if not all_uuids:
- self.nclient.list_security_groups.assert_called_once_with()
+ self.nclient.list_security_groups.assert_called_once_with(
+ project_id=mock.ANY)
def _get_fake_properties(self, sg='one'):
fake_groups_list = {
diff --git a/heat/tests/aws/test_loadbalancer.py b/heat/tests/aws/test_loadbalancer.py
index 07417d130..7ad6106a9 100644
--- a/heat/tests/aws/test_loadbalancer.py
+++ b/heat/tests/aws/test_loadbalancer.py
@@ -12,8 +12,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_config import cfg
from heat.common import exception
diff --git a/heat/tests/aws/test_s3.py b/heat/tests/aws/test_s3.py
index 479ecb499..47882aa2c 100644
--- a/heat/tests/aws/test_s3.py
+++ b/heat/tests/aws/test_s3.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from oslo_config import cfg
import swiftclient.client as sc
@@ -255,7 +254,7 @@ class s3Test(common.HeatTestCase):
ex = self.assertRaises(exception.ResourceFailure, deleter)
self.assertIn("ResourceActionNotSupported: resources.test_resource: "
"The bucket you tried to delete is not empty",
- six.text_type(ex))
+ str(ex))
self.mock_con.put_container.assert_called_once_with(
container_name,
{'X-Container-Write': 'test_tenant:test_username',
@@ -278,7 +277,7 @@ class s3Test(common.HeatTestCase):
rsrc = self.create_resource(t, stack, 'S3Bucket')
deleter = scheduler.TaskRunner(rsrc.delete)
ex = self.assertRaises(exception.ResourceFailure, deleter)
- self.assertIn("Conflict", six.text_type(ex))
+ self.assertIn("Conflict", str(ex))
self.mock_con.put_container.assert_called_once_with(
container_name,
{'X-Container-Write': 'test_tenant:test_username',
diff --git a/heat/tests/aws/test_security_group.py b/heat/tests/aws/test_security_group.py
index 8c1b755e9..9f720caa9 100644
--- a/heat/tests/aws/test_security_group.py
+++ b/heat/tests/aws/test_security_group.py
@@ -13,8 +13,8 @@
import collections
import copy
+from unittest import mock
-import mock
from neutronclient.common import exceptions as neutron_exc
from neutronclient.v2_0 import client as neutronclient
diff --git a/heat/tests/aws/test_user.py b/heat/tests/aws/test_user.py
index 8d6a89fac..410d2cff5 100644
--- a/heat/tests/aws/test_user.py
+++ b/heat/tests/aws/test_user.py
@@ -11,7 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from heat.common import exception
diff --git a/heat/tests/aws/test_volume.py b/heat/tests/aws/test_volume.py
index 8657437f3..a2dfbf91c 100644
--- a/heat/tests/aws/test_volume.py
+++ b/heat/tests/aws/test_volume.py
@@ -12,11 +12,10 @@
# under the License.
import copy
+from unittest import mock
from cinderclient import exceptions as cinder_exp
-import mock
from oslo_config import cfg
-import six
from heat.common import exception
from heat.common import template_format
@@ -105,7 +104,7 @@ class VolumeTest(vt_base.VolumeTestCase):
self._mock_delete_volume(fv)
ex = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.destroy))
- self.assertIn("Volume in use", six.text_type(ex))
+ self.assertIn("Volume in use", str(ex))
self.cinder_fc.volumes.get.side_effect = [
vt_base.FakeVolume('available'), cinder_exp.NotFound('Not found')]
@@ -176,7 +175,7 @@ class VolumeTest(vt_base.VolumeTestCase):
ex = self.assertRaises(exception.ResourceFailure,
self.create_volume, self.t, stack, 'DataVolume')
self.assertIn('Went to status error due to "Unknown"',
- six.text_type(ex))
+ str(ex))
def test_volume_bad_tags(self):
stack_name = 'test_volume_bad_tags_stack'
@@ -188,7 +187,7 @@ class VolumeTest(vt_base.VolumeTestCase):
self.create_volume, self.t, stack, 'DataVolume')
self.assertEqual("Property error: "
"Resources.DataVolume.Properties.Tags[0]: "
- "Unknown Property Foo", six.text_type(ex))
+ "Unknown Property Foo", str(ex))
def test_volume_attachment_error(self):
stack_name = 'test_volume_attach_error_stack'
@@ -208,7 +207,7 @@ class VolumeTest(vt_base.VolumeTestCase):
self.create_attachment,
self.t, stack, 'MountPoint')
self.assertIn("Volume attachment failed - Unknown status error",
- six.text_type(ex))
+ str(ex))
self.validate_mock_create_server_volume_script()
def test_volume_attachment(self):
@@ -383,7 +382,7 @@ class VolumeTest(vt_base.VolumeTestCase):
detach_task = scheduler.TaskRunner(rsrc.delete)
ex = self.assertRaises(exception.ResourceFailure, detach_task)
self.assertIn('Volume detachment failed - Unknown status error',
- six.text_type(ex))
+ str(ex))
self.fc.volumes.delete_server_volume.assert_called_once_with(
u'WikiDatabase', 'vol-123')
@@ -476,7 +475,7 @@ class VolumeTest(vt_base.VolumeTestCase):
"Update to properties "
"AvailabilityZone, Size, Tags of DataVolume "
"(AWS::EC2::Volume) is not supported",
- six.text_type(ex))
+ str(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
def test_volume_check(self):
@@ -568,7 +567,7 @@ class VolumeTest(vt_base.VolumeTestCase):
ex = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.destroy))
- self.assertIn('Unknown status error', six.text_type(ex))
+ self.assertIn('Unknown status error', str(ex))
self.m_backups.create.assert_called_once_with(fv.id)
self.m_backups.get.assert_called_once_with(fb.id)
@@ -597,7 +596,7 @@ class VolumeTest(vt_base.VolumeTestCase):
create = scheduler.TaskRunner(rsrc.create)
ex = self.assertRaises(exception.ResourceFailure, create)
self.assertIn('Went to status error due to "Unknown"',
- six.text_type(ex))
+ str(ex))
self.cinder_fc.volumes.get.side_effect = [
fva,
@@ -650,7 +649,7 @@ class VolumeTest(vt_base.VolumeTestCase):
ex = self.assertRaises(exception.ResourceFailure,
self.create_volume, self.t, stack, 'DataVolume')
self.assertIn('Went to status error due to "Unknown"',
- six.text_type(ex))
+ str(ex))
cinder.CinderClientPlugin._create.assert_called_once_with()
self.m_restore.assert_called_once_with('backup-123')
@@ -665,7 +664,7 @@ class VolumeTest(vt_base.VolumeTestCase):
self.t, stack, 'DataVolume')
self.assertEqual(
"Property error: Resources.DataVolume.Properties.Size: "
- "0 is out of range (min: 1, max: None)", six.text_type(error))
+ "0 is out of range (min: 1, max: None)", str(error))
def test_volume_attachment_updates_not_supported(self):
self.patchobject(nova.NovaClientPlugin, 'get_server')
@@ -694,7 +693,7 @@ class VolumeTest(vt_base.VolumeTestCase):
self.assertIn('NotSupported: resources.MountPoint: '
'Update to properties Device, InstanceId, '
'VolumeId of MountPoint (AWS::EC2::VolumeAttachment)',
- six.text_type(ex))
+ str(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.validate_mock_create_server_volume_script()
diff --git a/heat/tests/aws/test_waitcondition.py b/heat/tests/aws/test_waitcondition.py
index 2353469e7..0e31cf172 100644
--- a/heat/tests/aws/test_waitcondition.py
+++ b/heat/tests/aws/test_waitcondition.py
@@ -14,12 +14,11 @@
import copy
import datetime
import json
+from unittest import mock
import uuid
-import mock
from oslo_utils import timeutils
-import six
-from six.moves.urllib import parse
+from urllib import parse
from heat.common import exception
from heat.common import identifier
@@ -217,7 +216,7 @@ class WaitConditionTest(common.HeatTestCase):
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
wc_att = rsrc.FnGetAtt('Data')
- self.assertEqual(six.text_type({}), wc_att)
+ self.assertEqual(str({}), wc_att)
handle = self.stack['WaitHandle']
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), handle.state)
@@ -233,7 +232,7 @@ class WaitConditionTest(common.HeatTestCase):
'Status': 'SUCCESS', 'UniqueId': '456'}
ret = handle.handle_signal(test_metadata)
wc_att = rsrc.FnGetAtt('Data')
- self.assertIsInstance(wc_att, six.string_types)
+ self.assertIsInstance(wc_att, str)
self.assertEqual({"123": "foo", "456": "dog"}, json.loads(wc_att))
self.assertEqual('status:SUCCESS reason:cat', ret)
self.assertEqual(1, self.m_gs.call_count)
@@ -375,7 +374,9 @@ class WaitConditionHandleTest(common.HeatTestCase):
def test_handle(self):
stack_id = 'STACKABCD1234'
stack_name = 'test_stack2'
- created_time = datetime.datetime(2012, 11, 29, 13, 49, 37)
+ now = datetime.datetime(2012, 11, 29, 13, 49, 37)
+ timeutils.set_time_override(now)
+ self.addCleanup(timeutils.clear_time_override)
self.stack = self.create_stack(stack_id=stack_id,
stack_name=stack_name)
@@ -387,7 +388,6 @@ class WaitConditionHandleTest(common.HeatTestCase):
# clear the url
rsrc.data_set('ec2_signed_url', None, False)
- rsrc.created_time = created_time
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
connection_url = "".join([
'http://server.test:8000/v1/waitcondition/',
@@ -621,6 +621,6 @@ class WaitConditionUpdateTest(common.HeatTestCase):
ex = self.assertRaises(exception.ResourceFailure,
updater)
self.assertEqual("WaitConditionTimeout: resources.WaitForTheHandle: "
- "0 of 5 received", six.text_type(ex))
+ "0 of 5 received", str(ex))
self.assertEqual(5, rsrc.properties['Count'])
self.assertEqual(2, m_gs.call_count)
diff --git a/heat/tests/clients/test_barbican_client.py b/heat/tests/clients/test_barbican_client.py
index 977886710..6159ab70e 100644
--- a/heat/tests/clients/test_barbican_client.py
+++ b/heat/tests/clients/test_barbican_client.py
@@ -12,9 +12,9 @@
# under the License.
import collections
+from unittest import mock
from barbicanclient import exceptions
-import mock
from heat.common import exception
from heat.engine.clients.os import barbican
diff --git a/heat/tests/clients/test_blazar_client.py b/heat/tests/clients/test_blazar_client.py
index f9410f9fe..e671f862c 100644
--- a/heat/tests/clients/test_blazar_client.py
+++ b/heat/tests/clients/test_blazar_client.py
@@ -11,9 +11,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from heat.tests import common
from heat.tests import utils
-import mock
class BlazarClientPluginTest(common.HeatTestCase):
diff --git a/heat/tests/clients/test_cinder_client.py b/heat/tests/clients/test_cinder_client.py
index bebfc574d..69b13c612 100644
--- a/heat/tests/clients/test_cinder_client.py
+++ b/heat/tests/clients/test_cinder_client.py
@@ -12,11 +12,11 @@
# under the License.
"""Tests for :module:'heat.engine.clients.os.cinder'."""
+from unittest import mock
import uuid
from cinderclient import exceptions as cinder_exc
from keystoneauth1 import exceptions as ks_exceptions
-import mock
from heat.common import exception
from heat.engine.clients.os import cinder
diff --git a/heat/tests/clients/test_clients.py b/heat/tests/clients/test_clients.py
index 83fd36ac9..c392c2a30 100644
--- a/heat/tests/clients/test_clients.py
+++ b/heat/tests/clients/test_clients.py
@@ -11,6 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from aodhclient import exceptions as aodh_exc
from cinderclient import exceptions as cinder_exc
from glanceclient import exc as glance_exc
@@ -20,12 +22,10 @@ from keystoneauth1 import exceptions as keystone_exc
from keystoneauth1.identity import generic
from manilaclient import exceptions as manila_exc
from mistralclient.api import base as mistral_base
-import mock
from neutronclient.common import exceptions as neutron_exc
from openstack import exceptions
from oslo_config import cfg
from saharaclient.api import base as sahara_base
-import six
from swiftclient import exceptions as swift_exc
from testtools import testcase
from troveclient import client as troveclient
@@ -48,12 +48,12 @@ class ClientsTest(common.HeatTestCase):
cfg.CONF.set_override('cloud_backend', 'some.weird.object')
exc = self.assertRaises(exception.Invalid, clients.Clients, con)
self.assertIn('Invalid cloud_backend setting in heat.conf detected',
- six.text_type(exc))
+ str(exc))
cfg.CONF.set_override('cloud_backend', 'heat.engine.clients.Clients')
exc = self.assertRaises(exception.Invalid, clients.Clients, con)
self.assertIn('Invalid cloud_backend setting in heat.conf detected',
- six.text_type(exc))
+ str(exc))
def test_clients_get_heat_url(self):
con = mock.Mock()
diff --git a/heat/tests/clients/test_designate_client.py b/heat/tests/clients/test_designate_client.py
index 8bc2e02cc..5631565aa 100644
--- a/heat/tests/clients/test_designate_client.py
+++ b/heat/tests/clients/test_designate_client.py
@@ -11,36 +11,15 @@
# License for the specific language governing permissions and limitations
# under the License.
-from designateclient import exceptions as designate_exceptions
-from designateclient import v1 as designate_client
-import mock
-import six
+from unittest import mock
+
+from designateclient import client as designate_client
from heat.common import exception as heat_exception
from heat.engine.clients.os import designate as client
from heat.tests import common
-class DesignateDomainConstraintTest(common.HeatTestCase):
-
- def test_expected_exceptions(self):
- self.assertEqual((heat_exception.EntityNotFound,),
- client.DesignateDomainConstraint.expected_exceptions,
- "DesignateDomainConstraint expected exceptions error")
-
- def test_constrain(self):
- constrain = client.DesignateDomainConstraint()
- client_mock = mock.MagicMock()
- client_plugin_mock = mock.MagicMock()
- client_plugin_mock.get_domain_id.return_value = None
- client_mock.client_plugin.return_value = client_plugin_mock
-
- self.assertIsNone(constrain.validate_with_client(client_mock,
- 'domain_1'))
-
- client_plugin_mock.get_domain_id.assert_called_once_with('domain_1')
-
-
class DesignateClientPluginTest(common.HeatTestCase):
@mock.patch.object(designate_client, 'Client')
@@ -56,267 +35,8 @@ class DesignateClientPluginTest(common.HeatTestCase):
# Make sure proper client is created with expected args
client_designate.assert_called_once_with(
endpoint_type='publicURL', service_type='dns',
- session=session, region_name='region1'
- )
-
-
-class DesignateClientPluginDomainTest(common.HeatTestCase):
-
- sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
- sample_name = 'test-domain.com'
-
- def _get_mock_domain(self):
- domain = mock.MagicMock()
- domain.id = self.sample_uuid
- domain.name = self.sample_name
- return domain
-
- def setUp(self):
- super(DesignateClientPluginDomainTest, self).setUp()
- self._client = mock.MagicMock()
- self.client_plugin = client.DesignateClientPlugin(
- context=mock.MagicMock()
- )
-
- @mock.patch.object(client.DesignateClientPlugin, 'client')
- def test_get_domain_id(self, client_designate):
- self._client.domains.get.return_value = self._get_mock_domain()
- client_designate.return_value = self._client
-
- self.assertEqual(self.sample_uuid,
- self.client_plugin.get_domain_id(self.sample_uuid))
- self._client.domains.get.assert_called_once_with(
- self.sample_uuid)
-
- @mock.patch.object(client.DesignateClientPlugin, 'client')
- def test_get_domain_id_not_found(self, client_designate):
- self._client.domains.get.side_effect = (designate_exceptions
- .NotFound)
- client_designate.return_value = self._client
-
- ex = self.assertRaises(heat_exception.EntityNotFound,
- self.client_plugin.get_domain_id,
- self.sample_uuid)
- msg = ("The Designate Domain (%(name)s) could not be found." %
- {'name': self.sample_uuid})
- self.assertEqual(msg, six.text_type(ex))
- self._client.domains.get.assert_called_once_with(
- self.sample_uuid)
-
- @mock.patch.object(client.DesignateClientPlugin, 'client')
- def test_get_domain_id_by_name(self, client_designate):
- self._client.domains.get.side_effect = (designate_exceptions
- .NotFound)
- self._client.domains.list.return_value = [self._get_mock_domain()]
- client_designate.return_value = self._client
-
- self.assertEqual(self.sample_uuid,
- self.client_plugin.get_domain_id(self.sample_name))
-
- self._client.domains.get.assert_called_once_with(
- self.sample_name)
- self._client.domains.list.assert_called_once_with()
-
- @mock.patch.object(client.DesignateClientPlugin, 'client')
- def test_get_domain_id_by_name_not_found(self, client_designate):
- self._client.domains.get.side_effect = (designate_exceptions
- .NotFound)
- self._client.domains.list.return_value = []
- client_designate.return_value = self._client
-
- ex = self.assertRaises(heat_exception.EntityNotFound,
- self.client_plugin.get_domain_id,
- self.sample_name)
- msg = ("The Designate Domain (%(name)s) could not be found." %
- {'name': self.sample_name})
- self.assertEqual(msg, six.text_type(ex))
-
- self._client.domains.get.assert_called_once_with(
- self.sample_name)
- self._client.domains.list.assert_called_once_with()
-
- @mock.patch.object(client.DesignateClientPlugin, 'client')
- @mock.patch('designateclient.v1.domains.Domain')
- def test_domain_create(self, mock_domain, client_designate):
- self._client.domains.create.return_value = None
- client_designate.return_value = self._client
-
- domain = dict(
- name='test-domain.com',
- description='updated description',
- ttl=4200,
- email='xyz@test-domain.com'
- )
-
- mock_sample_domain = mock.Mock()
- mock_domain.return_value = mock_sample_domain
-
- self.client_plugin.domain_create(**domain)
-
- # Make sure domain entity is created with right arguments
- mock_domain.assert_called_once_with(**domain)
- self._client.domains.create.assert_called_once_with(
- mock_sample_domain)
-
- @mock.patch.object(client.DesignateClientPlugin, 'client')
- def test_domain_update(self, client_designate):
- self._client.domains.update.return_value = None
- mock_domain = self._get_mock_domain()
- self._client.domains.get.return_value = mock_domain
-
- client_designate.return_value = self._client
-
- domain = dict(
- id='sample-id',
- description='updated description',
- ttl=4200,
- email='xyz@test-domain.com'
- )
-
- self.client_plugin.domain_update(**domain)
-
- self._client.domains.get.assert_called_once_with(
- mock_domain.id)
-
- for key in domain.keys():
- setattr(mock_domain, key, domain[key])
-
- self._client.domains.update.assert_called_once_with(
- mock_domain)
-
-
-class DesignateClientPluginRecordTest(common.HeatTestCase):
-
- sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
- sample_domain_id = '477e8273-60a7-4c41-b683-fdb0bc7cd153'
-
- def _get_mock_record(self):
- record = mock.MagicMock()
- record.id = self.sample_uuid
- record.domain_id = self.sample_domain_id
- return record
-
- def setUp(self):
- super(DesignateClientPluginRecordTest, self).setUp()
- self._client = mock.MagicMock()
- self.client_plugin = client.DesignateClientPlugin(
- context=mock.MagicMock()
- )
- self.client_plugin.get_domain_id = mock.Mock(
- return_value=self.sample_domain_id)
-
- @mock.patch.object(client.DesignateClientPlugin, 'client')
- @mock.patch('designateclient.v1.records.Record')
- def test_record_create(self, mock_record, client_designate):
- self._client.records.create.return_value = None
- client_designate.return_value = self._client
-
- record = dict(
- name='test-record.com',
- description='updated description',
- ttl=4200,
- type='',
- priority=1,
- data='1.1.1.1',
- domain=self.sample_domain_id
- )
-
- mock_sample_record = mock.Mock()
- mock_record.return_value = mock_sample_record
-
- self.client_plugin.record_create(**record)
-
- # Make sure record entity is created with right arguments
- domain_id = record.pop('domain')
- mock_record.assert_called_once_with(**record)
- self._client.records.create.assert_called_once_with(
- domain_id,
- mock_sample_record)
-
- @mock.patch.object(client.DesignateClientPlugin, 'client')
- @mock.patch('designateclient.v1.records.Record')
- def test_record_update(self, mock_record, client_designate):
- self._client.records.update.return_value = None
- mock_record = self._get_mock_record()
- self._client.records.get.return_value = mock_record
-
- client_designate.return_value = self._client
-
- record = dict(
- id=self.sample_uuid,
- name='test-record.com',
- description='updated description',
- ttl=4200,
- type='',
- priority=1,
- data='1.1.1.1',
- domain=self.sample_domain_id
- )
-
- self.client_plugin.record_update(**record)
-
- self._client.records.get.assert_called_once_with(
- self.sample_domain_id,
- self.sample_uuid)
-
- for key in record.keys():
- setattr(mock_record, key, record[key])
-
- self._client.records.update.assert_called_once_with(
- self.sample_domain_id,
- mock_record)
-
- @mock.patch.object(client.DesignateClientPlugin, 'client')
- @mock.patch('designateclient.v1.records.Record')
- def test_record_delete(self, mock_record, client_designate):
- self._client.records.delete.return_value = None
- client_designate.return_value = self._client
-
- record = dict(
- id=self.sample_uuid,
- domain=self.sample_domain_id
- )
-
- self.client_plugin.record_delete(**record)
-
- self._client.records.delete.assert_called_once_with(
- self.sample_domain_id,
- self.sample_uuid)
-
- @mock.patch.object(client.DesignateClientPlugin, 'client')
- @mock.patch('designateclient.v1.records.Record')
- def test_record_delete_domain_not_found(self, mock_record,
- client_designate):
- self._client.records.delete.return_value = None
- self.client_plugin.get_domain_id.side_effect = (
- heat_exception.EntityNotFound)
- client_designate.return_value = self._client
-
- record = dict(
- id=self.sample_uuid,
- domain=self.sample_domain_id
- )
-
- self.client_plugin.record_delete(**record)
-
- self.assertFalse(self._client.records.delete.called)
-
- @mock.patch.object(client.DesignateClientPlugin, 'client')
- @mock.patch('designateclient.v1.records.Record')
- def test_record_show(self, mock_record, client_designate):
- self._client.records.get.return_value = None
- client_designate.return_value = self._client
-
- record = dict(
- id=self.sample_uuid,
- domain=self.sample_domain_id
- )
-
- self.client_plugin.record_show(**record)
-
- self._client.records.get.assert_called_once_with(
- self.sample_domain_id,
- self.sample_uuid)
+ session=session, region_name='region1',
+ version='2')
class DesignateZoneConstraintTest(common.HeatTestCase):
diff --git a/heat/tests/clients/test_glance_client.py b/heat/tests/clients/test_glance_client.py
index 738f471d8..4db8b1ead 100644
--- a/heat/tests/clients/test_glance_client.py
+++ b/heat/tests/clients/test_glance_client.py
@@ -11,10 +11,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
import uuid
from glanceclient import exc
-import mock
from heat.engine.clients import client_exception as exception
from heat.engine.clients.os import glance
diff --git a/heat/tests/clients/test_heat_client.py b/heat/tests/clients/test_heat_client.py
index 6c6719cb4..dabd00436 100644
--- a/heat/tests/clients/test_heat_client.py
+++ b/heat/tests/clients/test_heat_client.py
@@ -12,7 +12,7 @@
# under the License.
import json
-import mock
+from unittest import mock
import uuid
from keystoneauth1 import access as ks_access
@@ -25,7 +25,6 @@ from keystoneauth1 import token_endpoint as ks_token_endpoint
from keystoneclient.v3 import client as kc_v3
from keystoneclient.v3 import domains as kc_v3_domains
from oslo_config import cfg
-import six
from heat.common import config
from heat.common import exception
@@ -86,6 +85,7 @@ class KeystoneClientTest(common.HeatTestCase):
self.m_client.assert_called_once_with(
session=utils.AnyInstance(ks_session.Session),
auth=self.mock_ks_auth,
+ connect_retries=2,
region_name=None)
def _stubs_auth(self, method='token', trust_scoped=True,
@@ -167,6 +167,7 @@ class KeystoneClientTest(common.HeatTestCase):
if self.client:
self.m_client.assert_any_call(
session=utils.AnyInstance(ks_session.Session),
+ connect_retries=2,
region_name=None)
if self.stub_admin_auth:
self.mock_admin_ks_auth.get_user_id.assert_called_once_with(
@@ -232,7 +233,7 @@ class KeystoneClientTest(common.HeatTestCase):
err = self.assertRaises(exception.Error,
heat_ks_client.create_stack_user,
'auser', password='password')
- self.assertIn("Can't find role heat_stack_user", six.text_type(err))
+ self.assertIn("Can't find role heat_stack_user", str(err))
self.mock_ks_v3_client.roles.list.assert_called_once_with(
name='heat_stack_user')
self._validate_stub_auth()
@@ -325,7 +326,7 @@ class KeystoneClientTest(common.HeatTestCase):
err = self.assertRaises(exception.Error,
heat_ks_client.create_stack_domain_user,
username='duser', project_id='aproject')
- self.assertIn("Can't find role heat_stack_user", six.text_type(err))
+ self.assertIn("Can't find role heat_stack_user", str(err))
self._validate_stub_domain_admin_client()
self.mock_ks_v3_client.roles.list.assert_called_once_with(
name='heat_stack_user')
@@ -520,6 +521,65 @@ class KeystoneClientTest(common.HeatTestCase):
self.assertRaises(exception.AuthorizationFailure,
heat_keystoneclient.KeystoneClient, ctx)
+ def test_regenerate_trust_context_with_no_exist_trust_id(self):
+
+ """Test regenerate_trust_context."""
+
+ class MockTrust(object):
+ id = 'dtrust123'
+
+ mock_ks_auth, mock_auth_ref = self._stubs_auth(user_id='5678',
+ project_id='42',
+ stub_trust_context=True,
+ stub_admin_auth=True)
+
+ cfg.CONF.set_override('deferred_auth_method', 'trusts')
+
+ trustor_roles = ['heat_stack_owner', 'admin', '__member__']
+ trustee_roles = trustor_roles
+ mock_auth_ref.user_id = '5678'
+ mock_auth_ref.project_id = '42'
+
+ self.mock_ks_v3_client.trusts.create.return_value = MockTrust()
+
+ ctx = utils.dummy_context(roles=trustor_roles)
+ ctx.trust_id = None
+ heat_ks_client = heat_keystoneclient.KeystoneClient(ctx)
+ trust_context = heat_ks_client.regenerate_trust_context()
+ self.assertEqual('dtrust123', trust_context.trust_id)
+ self.assertEqual('5678', trust_context.trustor_user_id)
+ ks_loading.load_auth_from_conf_options.assert_called_once_with(
+ cfg.CONF, 'trustee', trust_id=None)
+ self.mock_ks_v3_client.trusts.create.assert_called_once_with(
+ trustor_user='5678',
+ trustee_user='1234',
+ project='42',
+ impersonation=True,
+ allow_redelegation=False,
+ role_names=trustee_roles)
+ self.assertEqual(0, self.mock_ks_v3_client.trusts.delete.call_count)
+
+ def test_regenerate_trust_context_with_exist_trust_id(self):
+
+ """Test regenerate_trust_context."""
+
+ self._stubs_auth(method='trust')
+ cfg.CONF.set_override('deferred_auth_method', 'trusts')
+
+ ctx = utils.dummy_context()
+ ctx.trust_id = 'atrust123'
+ ctx.trustor_user_id = 'trustor_user_id'
+
+ class MockTrust(object):
+ id = 'dtrust123'
+
+ self.mock_ks_v3_client.trusts.create.return_value = MockTrust()
+ heat_ks_client = heat_keystoneclient.KeystoneClient(ctx)
+ trust_context = heat_ks_client.regenerate_trust_context()
+ self.assertEqual('dtrust123', trust_context.trust_id)
+ self.mock_ks_v3_client.trusts.delete.assert_called_once_with(
+ ctx.trust_id)
+
def test_create_trust_context_trust_id(self):
"""Test create_trust_context with existing trust_id."""
@@ -641,7 +701,7 @@ class KeystoneClientTest(common.HeatTestCase):
heat_ks_client.create_trust_context)
expected = "Missing required credential: roles "
"{'role_names': ['heat_stack_owner']}"
- self.assertIn(expected, six.text_type(exc))
+ self.assertIn(expected, str(exc))
self.m_load_auth.assert_called_with(
cfg.CONF, 'trustee', trust_id=None)
self.mock_ks_v3_client.trusts.create.assert_called_once_with(
@@ -679,7 +739,7 @@ class KeystoneClientTest(common.HeatTestCase):
'"stack_user_domain_id" or "stack_user_domain_name" '
'without "stack_domain_admin" and '
'"stack_domain_admin_password"')
- self.assertIn(exp_msg, six.text_type(err))
+ self.assertIn(exp_msg, str(err))
def test_trust_init(self):
@@ -1449,6 +1509,52 @@ class KeystoneClientTest(common.HeatTestCase):
self.assertIsNone(heat_ks_client.delete_stack_domain_project(
project_id='aprojectid'))
+ def test_server_keystone_endpoint_url_config(self):
+ """Return non fallback url path."""
+ cfg.CONF.set_override('server_keystone_endpoint_type', 'public')
+ ctx = utils.dummy_context()
+ ctx.trust_id = None
+ heat_ks_client = heat_keystoneclient.KeystoneClient(ctx)
+ fallback_url = 'http://server.fallback.test:5000/v3'
+ auth_ref = heat_ks_client.context.auth_plugin.get_access(
+ heat_ks_client.session)
+ auth_ref.service_catalog.get_urls = mock.MagicMock()
+ auth_ref.service_catalog.get_urls.return_value = [
+ 'http://server.public.test:5000']
+ self.assertEqual(
+ heat_ks_client.server_keystone_endpoint_url(fallback_url),
+ 'http://server.public.test:5000/v3')
+ cfg.CONF.clear_override('server_keystone_endpoint_type')
+
+ def test_server_keystone_endpoint_url_no_config(self):
+ """Return fallback as no config option specified."""
+ ctx = utils.dummy_context()
+ ctx.trust_id = None
+ heat_ks_client = heat_keystoneclient.KeystoneClient(ctx)
+ cfg.CONF.clear_override('server_keystone_endpoint_type')
+ fallback_url = 'http://server.fallback.test:5000/v3'
+ self.assertEqual(heat_ks_client.server_keystone_endpoint_url(
+ fallback_url), fallback_url)
+
+ def test_server_keystone_endpoint_url_auth_exception(self):
+ """Authorization call fails, return fallback."""
+ cfg.CONF.set_override('server_keystone_endpoint_type', 'public')
+ ctx = utils.dummy_context()
+ ctx.trust_id = None
+ heat_ks_client = heat_keystoneclient.KeystoneClient(ctx)
+ auth_ref = heat_ks_client.context.auth_plugin.get_access(
+ heat_ks_client.session)
+ auth_ref.service_catalog.get_urls = mock.MagicMock()
+ auth_ref.service_catalog.get_urls.return_value = [
+ 'http://server.public.test:5000']
+ heat_ks_client.context.auth_plugin.get_access = mock.MagicMock()
+ heat_ks_client.context.auth_plugin.get_access.side_effect = (
+ kc_exception.Unauthorized)
+ fallback_url = 'http://server.fallback.test:5000/v3'
+ self.assertEqual(heat_ks_client.server_keystone_endpoint_url(
+ fallback_url), fallback_url)
+ cfg.CONF.clear_override('server_keystone_endpoint_type')
+
class KeystoneClientTestDomainName(KeystoneClientTest):
def setUp(self):
@@ -1472,6 +1578,7 @@ class KeystoneClientTestDomainName(KeystoneClientTest):
self.m_client.assert_called_once_with(
session=utils.AnyInstance(ks_session.Session),
auth=self.mock_ks_auth,
+ connect_retries=2,
region_name=None)
def _stub_domain_admin_client(self, domain_id='adomain123'):
diff --git a/heat/tests/clients/test_ironic_client.py b/heat/tests/clients/test_ironic_client.py
new file mode 100644
index 000000000..5b0568d57
--- /dev/null
+++ b/heat/tests/clients/test_ironic_client.py
@@ -0,0 +1,78 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from ironicclient import exceptions as ic_exc
+
+from heat.engine.clients.os import ironic as ic
+from heat.tests import common
+from heat.tests import utils
+
+
+class IronicClientPluginTest(common.HeatTestCase):
+
+ def test_create(self):
+ context = utils.dummy_context()
+ plugin = context.clients.client_plugin('ironic')
+ client = plugin.client()
+ self.assertEqual('http://server.test:5000/v3',
+ client.port.api.session.auth.endpoint)
+
+
+class fake_resource(object):
+ def __init__(self, id=None, name=None):
+ self.uuid = id
+ self.name = name
+
+
+class PortGroupConstraintTest(common.HeatTestCase):
+ def setUp(self):
+ super(PortGroupConstraintTest, self).setUp()
+ self.ctx = utils.dummy_context()
+ self.mock_port_group_get = mock.Mock()
+ self.ctx.clients.client_plugin(
+ 'ironic').client().portgroup.get = self.mock_port_group_get
+ self.constraint = ic.PortGroupConstraint()
+
+ def test_validate(self):
+ self.mock_port_group_get.return_value = fake_resource(
+ id='my_port_group')
+ self.assertTrue(self.constraint.validate(
+ 'my_port_group', self.ctx))
+
+ def test_validate_fail(self):
+ self.mock_port_group_get.side_effect = ic_exc.NotFound()
+ self.assertFalse(self.constraint.validate(
+ "bad_port_group", self.ctx))
+
+
+class NodeConstraintTest(common.HeatTestCase):
+ def setUp(self):
+ super(NodeConstraintTest, self).setUp()
+ self.ctx = utils.dummy_context()
+ self.mock_node_get = mock.Mock()
+ self.ctx.clients.client_plugin(
+ 'ironic').client().node.get = self.mock_node_get
+ self.constraint = ic.NodeConstraint()
+
+ def test_validate(self):
+ self.mock_node_get.return_value = fake_resource(
+ id='my_node')
+ self.assertTrue(self.constraint.validate(
+ 'my_node', self.ctx))
+
+ def test_validate_fail(self):
+ self.mock_node_get.side_effect = ic_exc.NotFound()
+ self.assertFalse(self.constraint.validate(
+ "bad_node", self.ctx))
diff --git a/heat/tests/clients/test_keystone_client.py b/heat/tests/clients/test_keystone_client.py
index 21b96205c..5affb7b66 100644
--- a/heat/tests/clients/test_keystone_client.py
+++ b/heat/tests/clients/test_keystone_client.py
@@ -11,9 +11,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from keystoneauth1 import exceptions as keystone_exceptions
-import mock
-import six
from heat.common import exception
from heat.engine.clients.os import keystone
@@ -21,6 +21,39 @@ from heat.engine.clients.os.keystone import keystone_constraints as ks_constr
from heat.tests import common
+class KeystoneClientParseEntityTest(common.HeatTestCase):
+
+ sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
+
+ @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
+ def test_parse_entity_with_domain(self, client_keystone):
+ client_keystone.return_value = self._client
+ client_plugin = keystone.KeystoneClientPlugin(
+ context=mock.MagicMock()
+ )
+ client_plugin.get_domain_id = mock.MagicMock()
+ client_plugin.get_domain_id.return_value = self.sample_uuid
+ self.assertEqual(client_plugin.parse_entity_with_domain(
+ 'entity{domain}', 'entity_type'), ('entity', self.sample_uuid)
+ )
+
+ @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
+ def test_parse_entity_without_domain(self, client_keystone):
+ client_keystone.return_value = self._client
+ client_plugin = keystone.KeystoneClientPlugin(
+ context=mock.MagicMock()
+ )
+ client_plugin.get_domain_id = mock.MagicMock()
+ client_plugin.get_domain_id.return_value = self.sample_uuid
+ self.assertEqual(client_plugin.parse_entity_with_domain(
+ 'entity', 'entity_type'), ('entity', None)
+ )
+
+ def setUp(self):
+ super(KeystoneClientParseEntityTest, self).setUp()
+ self._client = mock.MagicMock()
+
+
class KeystoneRoleConstraintTest(common.HeatTestCase):
def test_expected_exceptions(self):
@@ -266,7 +299,7 @@ class KeystoneClientPluginServiceTest(common.HeatTestCase):
msg = ("Keystone has more than one service with same name "
"%s. Please use service id instead of name" %
self.sample_name)
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.services.get,
self.sample_name)
@@ -290,7 +323,7 @@ class KeystoneClientPluginServiceTest(common.HeatTestCase):
self.sample_name)
msg = ("The KeystoneService (%(name)s) could not be found." %
{'name': self.sample_name})
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.services.get,
self.sample_name)
@@ -302,11 +335,16 @@ class KeystoneClientPluginRoleTest(common.HeatTestCase):
sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
sample_name = 'sample_role'
+ sample_name_and_domain = 'sample_role{sample_domain}'
+ sample_domain_uuid = '577e8273-60a7-4c41-b683-fdb0bc7cd152'
+ sample_domain_name = 'sample_domain'
+ sample_name_and_domain_invalid_input = 'sample_role@@'
def _get_mock_role(self):
role = mock.MagicMock()
role.id = self.sample_uuid
role.name = self.sample_name
+ role.name_and_domain = self.sample_name_and_domain
return role
def setUp(self):
@@ -347,6 +385,29 @@ class KeystoneClientPluginRoleTest(common.HeatTestCase):
self._client.client.roles.get,
self.sample_name)
self._client.client.roles.list.assert_called_once_with(
+ domain=None, name=self.sample_name)
+
+ @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
+ def test_get_role_id_with_name_and_domain(self, client_keystone):
+ self._client.client.roles.get.side_effect = (keystone_exceptions
+ .NotFound)
+ self._client.client.roles.list.return_value = [
+ self._get_mock_role()
+ ]
+
+ client_keystone.return_value = self._client
+ client_plugin = keystone.KeystoneClientPlugin(
+ context=mock.MagicMock()
+ )
+
+ self.assertEqual(self.sample_uuid, client_plugin.get_role_id(
+ self.sample_name_and_domain))
+
+ self.assertRaises(keystone_exceptions.NotFound,
+ self._client.client.roles.get,
+ self.sample_name)
+ self._client.client.roles.list.assert_called_once_with(
+ domain=client_plugin.get_domain_id(self.sample_domain_uuid),
name=self.sample_name)
@mock.patch.object(keystone.KeystoneClientPlugin, 'client')
@@ -366,23 +427,67 @@ class KeystoneClientPluginRoleTest(common.HeatTestCase):
self.sample_name)
msg = ("The KeystoneRole (%(name)s) could not be found." %
{'name': self.sample_name})
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
+ self.assertRaises(keystone_exceptions.NotFound,
+ self._client.client.roles.get,
+ self.sample_name)
+ self._client.client.roles.list.assert_called_once_with(
+ domain=None, name=self.sample_name)
+
+ @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
+ def test_get_role_id_with_domain_not_found(self, client_keystone):
+ self._client.client.roles.get.side_effect = (keystone_exceptions
+ .NotFound)
+ self._client.client.roles.list.return_value = [
+ ]
+
+ client_keystone.return_value = self._client
+ client_plugin = keystone.KeystoneClientPlugin(
+ context=mock.MagicMock()
+ )
+
+ ex = self.assertRaises(exception.EntityNotFound,
+ client_plugin.get_role_id,
+ self.sample_name_and_domain)
+ msg = ("The KeystoneRole (%(name)s) could not be found." %
+ {'name': self.sample_name})
+ self.assertEqual(msg, str(ex))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.roles.get,
self.sample_name)
self._client.client.roles.list.assert_called_once_with(
+ domain=client_plugin.get_domain_id(self.sample_domain_uuid),
name=self.sample_name)
+ @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
+ def test_get_role_id_with_name_and_domain_invalid_input(self,
+ client_keystone):
+ self._client.client.roles.get.side_effect = (keystone_exceptions
+ .NotFound)
+ self._client.client.roles.list.return_value = []
-class KeystoneClientPluginProjectTest(common.HeatTestCase):
+ client_keystone.return_value = self._client
+ client_plugin = keystone.KeystoneClientPlugin(
+ context=mock.MagicMock()
+ )
+ self.assertRaises(exception.EntityNotFound,
+ client_plugin.get_role_id,
+ self.sample_name_and_domain_invalid_input)
+
+class KeystoneClientPluginProjectTest(common.HeatTestCase):
sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
sample_name = 'sample_project'
+ sample_name_and_domain = 'sample_project{sample_domain}'
+ sample_domain_uuid = '577e8273-60a7-4c41-b683-fdb0bc7cd152'
+ sample_domain_name = 'sample_domain'
+ sample_name_and_domain_invalid_input = 'sample_project@@'
def _get_mock_project(self):
project = mock.MagicMock()
project.id = self.sample_uuid
project.name = self.sample_name
+ project.name_and_domain = self.sample_name_and_domain
return project
def setUp(self):
@@ -423,6 +528,29 @@ class KeystoneClientPluginProjectTest(common.HeatTestCase):
self._client.client.projects.get,
self.sample_name)
self._client.client.projects.list.assert_called_once_with(
+ domain=None, name=self.sample_name)
+
+ @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
+ def test_get_project_id_with_name_and_domain(self, client_keystone):
+ self._client.client.projects.get.side_effect = (keystone_exceptions
+ .NotFound)
+ self._client.client.projects.list.return_value = [
+ self._get_mock_project()
+ ]
+
+ client_keystone.return_value = self._client
+ client_plugin = keystone.KeystoneClientPlugin(
+ context=mock.MagicMock()
+ )
+
+ self.assertEqual(self.sample_uuid, client_plugin.get_project_id(
+ self.sample_name_and_domain))
+
+ self.assertRaises(keystone_exceptions.NotFound,
+ self._client.client.projects.get,
+ self.sample_name)
+ self._client.client.projects.list.assert_called_once_with(
+ domain=client_plugin.get_domain_id(self.sample_domain_uuid),
name=self.sample_name)
@mock.patch.object(keystone.KeystoneClientPlugin, 'client')
@@ -442,13 +570,52 @@ class KeystoneClientPluginProjectTest(common.HeatTestCase):
self.sample_name)
msg = ("The KeystoneProject (%(name)s) could not be found." %
{'name': self.sample_name})
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.projects.get,
self.sample_name)
self._client.client.projects.list.assert_called_once_with(
+ domain=None, name=self.sample_name)
+
+ @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
+ def test_get_project_id_with_domain_not_found(self, client_keystone):
+ self._client.client.projects.get.side_effect = (keystone_exceptions
+ .NotFound)
+ self._client.client.projects.list.return_value = []
+
+ client_keystone.return_value = self._client
+ client_plugin = keystone.KeystoneClientPlugin(
+ context=mock.MagicMock()
+ )
+
+ ex = self.assertRaises(exception.EntityNotFound,
+ client_plugin.get_project_id,
+ self.sample_name_and_domain)
+ msg = ("The KeystoneProject (%(name)s) could not be found." %
+ {'name': self.sample_name})
+ self.assertEqual(msg, str(ex))
+ self.assertRaises(keystone_exceptions.NotFound,
+ self._client.client.projects.get,
+ self.sample_name)
+ self._client.client.projects.list.assert_called_once_with(
+ domain=client_plugin.get_domain_id(self.sample_domain_uuid),
name=self.sample_name)
+ @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
+ def test_get_project_id_with_name_and_domain_invalid_input(
+ self, client_keystone):
+ self._client.client.projects.get.side_effect = (keystone_exceptions
+ .NotFound)
+ self._client.client.projects.list.return_value = []
+
+ client_keystone.return_value = self._client
+ client_plugin = keystone.KeystoneClientPlugin(
+ context=mock.MagicMock()
+ )
+ self.assertRaises(exception.EntityNotFound,
+ client_plugin.get_project_id,
+ self.sample_name_and_domain_invalid_input)
+
class KeystoneClientPluginDomainTest(common.HeatTestCase):
@@ -518,7 +685,7 @@ class KeystoneClientPluginDomainTest(common.HeatTestCase):
self.sample_name)
msg = ("The KeystoneDomain (%(name)s) could not be found." %
{'name': self.sample_name})
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.domains.get,
self.sample_name)
@@ -530,11 +697,16 @@ class KeystoneClientPluginGroupTest(common.HeatTestCase):
sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
sample_name = 'sample_group'
+ sample_name_and_domain = 'sample_group{sample_domain}'
+ sample_domain_uuid = '577e8273-60a7-4c41-b683-fdb0bc7cd152'
+ sample_domain_name = 'sample_domain'
+ sample_name_and_domain_invalid_input = 'sample_group@@'
def _get_mock_group(self):
group = mock.MagicMock()
group.id = self.sample_uuid
group.name = self.sample_name
+ group.name_and_domain = self.sample_name_and_domain
return group
def setUp(self):
@@ -575,6 +747,29 @@ class KeystoneClientPluginGroupTest(common.HeatTestCase):
self._client.client.groups.get,
self.sample_name)
self._client.client.groups.list.assert_called_once_with(
+ domain=None, name=self.sample_name)
+
+ @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
+ def test_get_group_id_with_name_and_domain(self, client_keystone):
+ self._client.client.groups.get.side_effect = (keystone_exceptions
+ .NotFound)
+ self._client.client.groups.list.return_value = [
+ self._get_mock_group()
+ ]
+
+ client_keystone.return_value = self._client
+ client_plugin = keystone.KeystoneClientPlugin(
+ context=mock.MagicMock()
+ )
+
+ self.assertEqual(self.sample_uuid, client_plugin.get_group_id(
+ self.sample_name_and_domain))
+
+ self.assertRaises(keystone_exceptions.NotFound,
+ self._client.client.groups.get,
+ self.sample_name)
+ self._client.client.groups.list.assert_called_once_with(
+ domain=client_plugin.get_domain_id(self.sample_domain_uuid),
name=self.sample_name)
@mock.patch.object(keystone.KeystoneClientPlugin, 'client')
@@ -594,23 +789,68 @@ class KeystoneClientPluginGroupTest(common.HeatTestCase):
self.sample_name)
msg = ("The KeystoneGroup (%(name)s) could not be found." %
{'name': self.sample_name})
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.groups.get,
self.sample_name)
self._client.client.groups.list.assert_called_once_with(
+ domain=None, name=self.sample_name)
+
+ @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
+ def test_get_group_id_with_domain_not_found(self, client_keystone):
+ self._client.client.groups.get.side_effect = (keystone_exceptions
+ .NotFound)
+ self._client.client.groups.list.return_value = [
+ ]
+
+ client_keystone.return_value = self._client
+ client_plugin = keystone.KeystoneClientPlugin(
+ context=mock.MagicMock()
+ )
+
+ ex = self.assertRaises(exception.EntityNotFound,
+ client_plugin.get_group_id,
+ self.sample_name_and_domain)
+ msg = ("The KeystoneGroup (%(name)s) could not be found." %
+ {'name': self.sample_name})
+ self.assertEqual(msg, str(ex))
+ self.assertRaises(keystone_exceptions.NotFound,
+ self._client.client.groups.get,
+ self.sample_name)
+ self._client.client.groups.list.assert_called_once_with(
+ domain=client_plugin.get_domain_id(self.sample_domain_uuid),
name=self.sample_name)
+ @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
+ def test_get_group_id_with_name_and_domain_invalid_input(
+ self, client_keystone):
+ self._client.client.groups.get.side_effect = (keystone_exceptions
+ .NotFound)
+ self._client.client.groups.list.return_value = []
+
+ client_keystone.return_value = self._client
+ client_plugin = keystone.KeystoneClientPlugin(
+ context=mock.MagicMock()
+ )
+ self.assertRaises(exception.EntityNotFound,
+ client_plugin.get_group_id,
+ self.sample_name_and_domain_invalid_input)
+
class KeystoneClientPluginUserTest(common.HeatTestCase):
sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
sample_name = 'sample_user'
+ sample_name_and_domain = 'sample_user{sample_domain}'
+ sample_domain_uuid = '577e8273-60a7-4c41-b683-fdb0bc7cd152'
+ sample_domain_name = 'sample_domain'
+ sample_name_and_domain_invalid_input = 'sample_user@@'
def _get_mock_user(self):
user = mock.MagicMock()
user.id = self.sample_uuid
user.name = self.sample_name
+ user.name_and_domain = self.sample_name_and_domain
return user
def setUp(self):
@@ -620,7 +860,6 @@ class KeystoneClientPluginUserTest(common.HeatTestCase):
@mock.patch.object(keystone.KeystoneClientPlugin, 'client')
def test_get_user_id(self, client_keystone):
self._client.client.users.get.return_value = self._get_mock_user()
-
client_keystone.return_value = self._client
client_plugin = keystone.KeystoneClientPlugin(
context=mock.MagicMock()
@@ -635,9 +874,7 @@ class KeystoneClientPluginUserTest(common.HeatTestCase):
def test_get_user_id_with_name(self, client_keystone):
self._client.client.users.get.side_effect = (keystone_exceptions
.NotFound)
- self._client.client.users.list.return_value = [
- self._get_mock_user()
- ]
+ self._client.client.users.find.return_value = self._get_mock_user()
client_keystone.return_value = self._client
client_plugin = keystone.KeystoneClientPlugin(
@@ -649,15 +886,34 @@ class KeystoneClientPluginUserTest(common.HeatTestCase):
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.users.get,
self.sample_name)
- self._client.client.users.list.assert_called_once_with(
+ self._client.client.users.find.assert_called_once_with(
+ domain_id=None, name=self.sample_name)
+
+ @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
+ def test_get_user_id_with_name_and_domain(self, client_keystone):
+ self._client.client.users.get.side_effect = (keystone_exceptions
+ .NotFound)
+ self._client.client.users.find.return_value = self._get_mock_user()
+
+ client_keystone.return_value = self._client
+ client_plugin = keystone.KeystoneClientPlugin(
+ context=mock.MagicMock())
+ self.assertEqual(self.sample_uuid, client_plugin.get_user_id(
+ self.sample_name_and_domain))
+
+ self.assertRaises(keystone_exceptions.NotFound,
+ self._client.client.users.get,
+ self.sample_name)
+ self._client.client.users.find.assert_called_once_with(
+ domain_id=client_plugin.get_domain_id(self.sample_domain_uuid),
name=self.sample_name)
@mock.patch.object(keystone.KeystoneClientPlugin, 'client')
def test_get_user_id_not_found(self, client_keystone):
self._client.client.users.get.side_effect = (keystone_exceptions
.NotFound)
- self._client.client.users.list.return_value = []
-
+ self._client.client.users.find.side_effect = (keystone_exceptions
+ .NotFound)
client_keystone.return_value = self._client
client_plugin = keystone.KeystoneClientPlugin(
context=mock.MagicMock()
@@ -668,12 +924,27 @@ class KeystoneClientPluginUserTest(common.HeatTestCase):
self.sample_name)
msg = ('The KeystoneUser (%(name)s) could not be found.' %
{'name': self.sample_name})
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.users.get,
self.sample_name)
- self._client.client.users.list.assert_called_once_with(
- name=self.sample_name)
+ self._client.client.users.find.assert_called_once_with(
+ domain_id=None, name=self.sample_name)
+
+ @mock.patch.object(keystone.KeystoneClientPlugin, 'client')
+ def test_get_user_id_with_name_and_domain_invalid_input(self,
+ client_keystone):
+ self._client.client.users.get.side_effect = (keystone_exceptions
+ .NotFound)
+ self._client.client.users.find.side_effect = (keystone_exceptions
+ .NotFound)
+ client_keystone.return_value = self._client
+ client_plugin = keystone.KeystoneClientPlugin(
+ context=mock.MagicMock()
+ )
+ self.assertRaises(exception.EntityNotFound,
+ client_plugin.get_user_id,
+ self.sample_name_and_domain_invalid_input)
class KeystoneClientPluginRegionTest(common.HeatTestCase):
@@ -719,4 +990,4 @@ class KeystoneClientPluginRegionTest(common.HeatTestCase):
self.sample_name)
msg = ('The KeystoneRegion (%(name)s) could not be found.' %
{'name': self.sample_name})
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
diff --git a/heat/tests/clients/test_magnum_client.py b/heat/tests/clients/test_magnum_client.py
index 97bf939ce..e0468308c 100644
--- a/heat/tests/clients/test_magnum_client.py
+++ b/heat/tests/clients/test_magnum_client.py
@@ -11,8 +11,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from magnumclient import exceptions as mc_exc
-import mock
from heat.engine.clients.os import magnum as mc
from heat.tests import common
diff --git a/heat/tests/clients/test_manila_client.py b/heat/tests/clients/test_manila_client.py
index a1a986091..5ed2f7a2f 100644
--- a/heat/tests/clients/test_manila_client.py
+++ b/heat/tests/clients/test_manila_client.py
@@ -11,9 +11,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
+from unittest import mock
from manilaclient import exceptions
-import mock
from heat.common import exception as heat_exception
from heat.tests import common
diff --git a/heat/tests/clients/test_mistral_client.py b/heat/tests/clients/test_mistral_client.py
index da3707ba1..b05475423 100644
--- a/heat/tests/clients/test_mistral_client.py
+++ b/heat/tests/clients/test_mistral_client.py
@@ -11,8 +11,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from mistralclient.auth import keystone
-import mock
from heat.common import exception
from heat.engine.clients.os import mistral
diff --git a/heat/tests/clients/test_monasca_client.py b/heat/tests/clients/test_monasca_client.py
index adc4cec06..b5d566d6f 100644
--- a/heat/tests/clients/test_monasca_client.py
+++ b/heat/tests/clients/test_monasca_client.py
@@ -11,10 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
-
-import monascaclient
+from unittest import mock
from heat.common import exception as heat_exception
from heat.engine.clients.os import monasca as client_plugin
@@ -49,8 +46,7 @@ class MonascaClientPluginTest(common.HeatTestCase):
client = plugin.client()
self.assertIsNotNone(client.metrics)
- @mock.patch.object(monascaclient.client, '_session')
- def test_client_uses_session(self, mock_session):
+ def test_client_uses_session(self):
context = mock.MagicMock()
monasca_client = client_plugin.MonascaClientPlugin(context=context)
self.assertIsNotNone(monasca_client._create())
@@ -97,6 +93,6 @@ class MonascaClientPluginNotificationTest(common.HeatTestCase):
self.sample_uuid)
msg = ("The Monasca Notification (%(name)s) could not be found." %
{'name': self.sample_uuid})
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
self._client.notifications.get.assert_called_once_with(
notification_id=self.sample_uuid)
diff --git a/heat/tests/clients/test_neutron_client.py b/heat/tests/clients/test_neutron_client.py
index 1cbd61526..bc470b79b 100644
--- a/heat/tests/clients/test_neutron_client.py
+++ b/heat/tests/clients/test_neutron_client.py
@@ -11,9 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
+from unittest import mock
-import mock
from neutronclient.common import exceptions as qe
from heat.common import exception
@@ -66,28 +65,6 @@ class NeutronClientPluginTest(NeutronClientPluginTestCase):
self.neutron_client.list_security_groups.return_value = fake_list
self.assertEqual(expected_groups,
self.neutron_plugin.get_secgroup_uuids(sgs_non_uuid))
- # test only one belong to the tenant
- fake_list = {
- 'security_groups': [
- {
- 'tenant_id': 'test_tenant_id',
- 'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3',
- 'name': 'security_group_1',
- 'security_group_rules': [],
- 'description': 'no protocol'
- },
- {
- 'tenant_id': 'not_test_tenant_id',
- 'id': '384ccd91-447c-4d83-832c-06974a7d3d05',
- 'name': 'security_group_1',
- 'security_group_rules': [],
- 'description': 'no protocol'
- }
- ]
- }
- self.neutron_client.list_security_groups.return_value = fake_list
- self.assertEqual(expected_groups,
- self.neutron_plugin.get_secgroup_uuids(sgs_non_uuid))
# test there are two securityGroups with same name, and the two
# all belong to the tenant
fake_list = {
@@ -191,7 +168,7 @@ class NeutronConstraintsValidate(common.HeatTestCase):
)
expected = ("The neutron extension (%s) could not be found." %
constraint.extension)
- self.assertEqual(expected, six.text_type(ex))
+ self.assertEqual(expected, str(ex))
self.assertTrue(constraint.validate("foo", ctx))
self.assertFalse(constraint.validate("bar", ctx))
mock_find.assert_has_calls(
diff --git a/heat/tests/clients/test_nova_client.py b/heat/tests/clients/test_nova_client.py
index 2c28016fe..e78f82667 100644
--- a/heat/tests/clients/test_nova_client.py
+++ b/heat/tests/clients/test_nova_client.py
@@ -13,15 +13,14 @@
"""Tests for :module:'heat.engine.clients.os.nova'."""
import collections
+from unittest import mock
import uuid
-import mock
from novaclient import client as nc
from novaclient import exceptions as nova_exceptions
from oslo_config import cfg
from oslo_serialization import jsonutils as json
import requests
-import six
from heat.common import exception
from heat.engine.clients.os import nova
@@ -45,27 +44,21 @@ class NovaClientPluginTest(NovaClientPluginTestCase):
def test_create(self):
context = utils.dummy_context()
- ext_mock = self.patchobject(nc, 'discover_extensions')
plugin = context.clients.client_plugin('nova')
plugin.max_microversion = '2.53'
client = plugin.client()
- ext_mock.assert_called_once_with('2.53')
self.assertIsNotNone(client.servers)
def test_v2_26_create(self):
ctxt = utils.dummy_context()
- ext_mock = self.patchobject(nc, 'discover_extensions')
self.patchobject(nc, 'Client', return_value=mock.Mock())
plugin = ctxt.clients.client_plugin('nova')
plugin.max_microversion = '2.53'
plugin.client(version='2.26')
- ext_mock.assert_called_once_with('2.26')
-
def test_v2_26_create_failed(self):
ctxt = utils.dummy_context()
- self.patchobject(nc, 'discover_extensions')
plugin = ctxt.clients.client_plugin('nova')
plugin.max_microversion = '2.23'
client_stub = mock.Mock()
@@ -125,28 +118,21 @@ class NovaClientPluginTest(NovaClientPluginTestCase):
def test_get_host(self):
"""Tests the get_host function."""
- my_host_name = 'myhost'
+ my_hypervisor_hostname = 'myhost'
my_host = mock.MagicMock()
- my_host.host_name = my_host_name
- my_host.service = 'compute'
-
- wrong_host = mock.MagicMock()
- wrong_host.host_name = 'wrong_host'
- wrong_host.service = 'compute'
- self.nova_client.hosts.list.side_effect = [
- [my_host],
- [wrong_host],
- exception.EntityNotFound(entity='Host', name='nohost')
+ my_host.hypervisor_hostname = my_hypervisor_hostname
+
+ self.nova_client.hypervisors.search.side_effect = [
+ my_host, nova_exceptions.NotFound(404)
]
- self.assertEqual(my_host, self.nova_plugin.get_host(my_host_name))
- self.assertRaises(exception.EntityNotFound,
- self.nova_plugin.get_host, my_host_name)
- self.assertRaises(exception.EntityNotFound,
+ self.assertEqual(my_host,
+ self.nova_plugin.get_host(my_hypervisor_hostname))
+ self.assertRaises(nova_exceptions.NotFound,
self.nova_plugin.get_host, 'nohost')
- self.assertEqual(3, self.nova_client.hosts.list.call_count)
- calls = [mock.call(), mock.call(), mock.call()]
+ self.assertEqual(2, self.nova_client.hypervisors.search.call_count)
+ calls = [mock.call('myhost'), mock.call('nohost')]
self.assertEqual(calls,
- self.nova_client.hosts.list.call_args_list)
+ self.nova_client.hypervisors.search.call_args_list)
def test_get_keypair(self):
"""Tests the get_keypair function."""
@@ -187,6 +173,26 @@ class NovaClientPluginTest(NovaClientPluginTestCase):
observed = self.nova_plugin.get_status(server)
self.assertEqual('ACTIVE', observed)
+ def test_check_verify_resize_task_state(self):
+ """Tests the check_verify_resize function with resize task_state."""
+ my_server = mock.MagicMock(status='Foo')
+ setattr(my_server, 'OS-EXT-STS:task_state', 'resize_finish')
+ self.nova_client.servers.get.side_effect = [my_server]
+
+ self.assertEqual(
+ False, self.nova_plugin.check_verify_resize('my_server'))
+
+ def test_check_verify_resize_error(self):
+ """Tests the check_verify_resize function with unknown status."""
+ my_server = mock.MagicMock(status='Foo')
+ setattr(my_server, 'OS-EXT-STS:task_state', 'active')
+ self.nova_client.servers.get.side_effect = [my_server]
+
+ self.assertRaises(
+ exception.ResourceUnknownStatus,
+ self.nova_plugin.check_verify_resize,
+ 'my_server')
+
def _absolute_limits(self):
max_personality = mock.Mock()
max_personality.name = 'maxPersonality'
@@ -394,6 +400,34 @@ class NovaClientPluginUserdataTest(NovaClientPluginTestCase):
self.assertIn('useradd', data)
self.assertIn('ec2-user', data)
+ def test_build_userdata_with_ignition(self):
+ metadata = {"os-collect-config": {"heat": {"password": "***"}}}
+ userdata = '{"ignition": {"version": "3.0"}, "storage": {"files": []}}'
+ ud_format = 'SOFTWARE_CONFIG'
+ data = self.nova_plugin.build_userdata(metadata,
+ userdata=userdata,
+ user_data_format=ud_format)
+ ig = json.loads(data)
+ self.assertEqual("/var/lib/heat-cfntools/cfn-init-data",
+ ig["storage"]["files"][0]["path"])
+ self.assertEqual("/var/lib/cloud/data/cfn-init-data",
+ ig["storage"]["files"][1]["path"])
+ self.assertEqual("data:,%7B%22os-collect-config%22%3A%20%7B%22heat"
+ "%22%3A%20%7B%22password%22%3A%20%22%2A%2A%2A%22"
+ "%7D%7D%7D",
+ ig["storage"]["files"][0]["contents"]["source"])
+
+ def test_build_userdata_with_invalid_ignition(self):
+ metadata = {"os-collect-config": {"heat": {"password": "***"}}}
+ userdata = '{"ignition": {"version": "3.0"}, "storage": []}'
+ ud_format = 'SOFTWARE_CONFIG'
+
+ self.assertRaises(ValueError,
+ self.nova_plugin.build_userdata,
+ metadata,
+ userdata=userdata,
+ user_data_format=ud_format)
+
class NovaClientPluginMetadataTest(NovaClientPluginTestCase):
@@ -432,7 +466,7 @@ class NovaClientPluginMetadataTest(NovaClientPluginTestCase):
"""Prove that the user can only pass in a dict to nova metadata."""
excp = self.assertRaises(exception.StackValidationFailed,
self.nova_plugin.meta_serialize, "foo")
- self.assertIn('metadata needs to be a Map', six.text_type(excp))
+ self.assertIn('metadata needs to be a Map', str(excp))
def test_serialize_combined(self):
original = {
@@ -517,7 +551,9 @@ class HostConstraintTest(common.HeatTestCase):
def test_validation_error(self):
self.mock_get_host.side_effect = exception.EntityNotFound(
entity='Host', name='bar')
- self.assertFalse(self.constraint.validate("bar", self.ctx))
+ self.assertRaises(
+ exception.EntityNotFound,
+ self.constraint.validate, "bar", self.ctx)
class KeypairConstraintTest(common.HeatTestCase):
@@ -626,30 +662,3 @@ class ConsoleUrlsTest(common.HeatTestCase):
self.console_method.side_effect = exc("spam")
self._test_get_console_url_tolerate_exception('spam')
-
-
-class NovaClientPluginExtensionsTest(NovaClientPluginTestCase):
- """Tests for extensions in novaclient."""
-
- def test_has_no_extensions(self):
- self.nova_client.list_extensions.show_all.return_value = []
- self.assertFalse(self.nova_plugin.has_extension(
- "os-virtual-interfaces"))
-
- def test_has_no_interface_extensions(self):
- mock_extension = mock.Mock()
- p = mock.PropertyMock(return_value='os-xxxx')
- type(mock_extension).alias = p
- self.nova_client.list_extensions.show_all.return_value = [
- mock_extension]
- self.assertFalse(self.nova_plugin.has_extension(
- "os-virtual-interfaces"))
-
- def test_has_os_interface_extension(self):
- mock_extension = mock.Mock()
- p = mock.PropertyMock(return_value='os-virtual-interfaces')
- type(mock_extension).alias = p
- self.nova_client.list_extensions.show_all.return_value = [
- mock_extension]
- self.assertTrue(self.nova_plugin.has_extension(
- "os-virtual-interfaces"))
diff --git a/heat/tests/clients/test_sahara_client.py b/heat/tests/clients/test_sahara_client.py
index a69450b7c..d0d0d3438 100644
--- a/heat/tests/clients/test_sahara_client.py
+++ b/heat/tests/clients/test_sahara_client.py
@@ -11,11 +11,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
import uuid
-import mock
from saharaclient.api import base as sahara_base
-import six
from heat.common import exception
from heat.engine.clients.os import sahara
@@ -86,7 +85,7 @@ class SaharaUtilsTest(common.HeatTestCase):
e = self.assertRaises(exception.Error,
self.sahara_plugin.find_resource_by_name,
'images', img_name)
- self.assertEqual(expected_error, six.text_type(e))
+ self.assertEqual(expected_error, str(e))
self.sahara_client.images.find.assert_called_once_with(name=img_name)
@@ -152,7 +151,7 @@ class SaharaUtilsTest(common.HeatTestCase):
plugin_name, '1.2.3')
self.assertEqual("Requested plugin 'vanilla' doesn't support version "
"'1.2.3'. Allowed versions are 1.2.1, 2.6.0, 2.7.1",
- six.text_type(ex))
+ str(ex))
calls = [mock.call(plugin_name), mock.call(plugin_name)]
self.sahara_client.plugins.get.assert_has_calls(calls)
diff --git a/heat/tests/clients/test_sdk_client.py b/heat/tests/clients/test_sdk_client.py
index 98f40af07..d2fd238f0 100644
--- a/heat/tests/clients/test_sdk_client.py
+++ b/heat/tests/clients/test_sdk_client.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from openstack import exceptions
diff --git a/heat/tests/clients/test_senlin_client.py b/heat/tests/clients/test_senlin_client.py
index 9510050d1..0e3bdf3d8 100644
--- a/heat/tests/clients/test_senlin_client.py
+++ b/heat/tests/clients/test_senlin_client.py
@@ -11,7 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from openstack import exceptions
from heat.engine.clients.os import senlin as senlin_plugin
diff --git a/heat/tests/clients/test_swift_client.py b/heat/tests/clients/test_swift_client.py
index 4b6d1060f..90272b6df 100644
--- a/heat/tests/clients/test_swift_client.py
+++ b/heat/tests/clients/test_swift_client.py
@@ -12,8 +12,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
import pytz
from testtools import matchers
diff --git a/heat/tests/clients/test_vitrage_client.py b/heat/tests/clients/test_vitrage_client.py
new file mode 100644
index 000000000..8e9c46b07
--- /dev/null
+++ b/heat/tests/clients/test_vitrage_client.py
@@ -0,0 +1,24 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat.tests import common
+from heat.tests import utils
+
+
+class VitrageClientPluginTest(common.HeatTestCase):
+
+ def test_create(self):
+ context = utils.dummy_context()
+ plugin = context.clients.client_plugin('vitrage')
+ client = plugin.client()
+ self.assertIsNotNone(client.template.list)
diff --git a/heat/tests/clients/test_zaqar_client.py b/heat/tests/clients/test_zaqar_client.py
index ffd7b6aaa..bd588c7c9 100644
--- a/heat/tests/clients/test_zaqar_client.py
+++ b/heat/tests/clients/test_zaqar_client.py
@@ -11,10 +11,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.engine.clients.os import zaqar
-
from heat.tests import common
from heat.tests import utils
diff --git a/heat/tests/clients/test_zun_client.py b/heat/tests/clients/test_zun_client.py
index ae022a223..e20326597 100644
--- a/heat/tests/clients/test_zun_client.py
+++ b/heat/tests/clients/test_zun_client.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.tests import common
from heat.tests import utils
diff --git a/heat/tests/constraints/test_common_constraints.py b/heat/tests/constraints/test_common_constraints.py
index 3270d8400..7f993fada 100644
--- a/heat/tests/constraints/test_common_constraints.py
+++ b/heat/tests/constraints/test_common_constraints.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.engine.constraint import common_constraints as cc
from heat.tests import common
@@ -257,7 +256,7 @@ class CRONExpressionConstraint(common.HeatTestCase):
"is not acceptable, out of range") % cron_expression
self.assertFalse(self.constraint.validate(cron_expression, self.ctx))
self.assertEqual(expect,
- six.text_type(self.constraint._error_message))
+ str(self.constraint._error_message))
def test_validation_columns_length_error(self):
cron_expression = "* *"
@@ -266,7 +265,7 @@ class CRONExpressionConstraint(common.HeatTestCase):
"iteratorexpression.")
self.assertFalse(self.constraint.validate(cron_expression, self.ctx))
self.assertEqual(expect,
- six.text_type(self.constraint._error_message))
+ str(self.constraint._error_message))
class TimezoneConstraintTest(common.HeatTestCase):
@@ -286,7 +285,7 @@ class TimezoneConstraintTest(common.HeatTestCase):
self.assertFalse(self.constraint.validate(timezone, self.ctx))
self.assertEqual(
expected,
- six.text_type(self.constraint._error_message)
+ str(self.constraint._error_message)
)
def test_validation_none(self):
@@ -312,7 +311,7 @@ class DNSNameConstraintTest(common.HeatTestCase):
self.assertFalse(self.constraint.validate(dns_name, self.ctx))
self.assertEqual(
expected,
- six.text_type(self.constraint._error_message)
+ str(self.constraint._error_message)
)
def test_validation_error_empty_component(self):
@@ -323,7 +322,7 @@ class DNSNameConstraintTest(common.HeatTestCase):
self.assertFalse(self.constraint.validate(dns_name, self.ctx))
self.assertEqual(
expected,
- six.text_type(self.constraint._error_message)
+ str(self.constraint._error_message)
)
def test_validation_error_special_char(self):
@@ -336,7 +335,7 @@ class DNSNameConstraintTest(common.HeatTestCase):
self.assertFalse(self.constraint.validate(dns_name, self.ctx))
self.assertEqual(
expected,
- six.text_type(self.constraint._error_message)
+ str(self.constraint._error_message)
)
def test_validation_error_tld_allnumeric(self):
@@ -348,7 +347,7 @@ class DNSNameConstraintTest(common.HeatTestCase):
self.assertFalse(self.constraint.validate(dns_name, self.ctx))
self.assertEqual(
expected,
- six.text_type(self.constraint._error_message)
+ str(self.constraint._error_message)
)
def test_validation_none(self):
@@ -372,7 +371,7 @@ class DNSDomainConstraintTest(common.HeatTestCase):
self.assertFalse(self.constraint.validate(dns_domain, self.ctx))
self.assertEqual(
expected,
- six.text_type(self.constraint._error_message)
+ str(self.constraint._error_message)
)
def test_validation_none(self):
@@ -396,7 +395,7 @@ class FIPDNSNameConstraintTest(common.HeatTestCase):
self.assertFalse(self.constraint.validate(dns_name, self.ctx))
self.assertEqual(
expected,
- six.text_type(self.constraint._error_message)
+ str(self.constraint._error_message)
)
def test_validation_none(self):
@@ -422,7 +421,7 @@ class ExpirationConstraintTest(common.HeatTestCase):
self.assertFalse(self.constraint.validate(expiration, self.ctx))
self.assertEqual(
expected,
- six.text_type(self.constraint._error_message)
+ str(self.constraint._error_message)
)
def test_validation_before_current_time(self):
@@ -433,7 +432,7 @@ class ExpirationConstraintTest(common.HeatTestCase):
self.assertFalse(self.constraint.validate(expiration, self.ctx))
self.assertEqual(
expected,
- six.text_type(self.constraint._error_message)
+ str(self.constraint._error_message)
)
def test_validation_none(self):
diff --git a/heat/tests/convergence/framework/engine_wrapper.py b/heat/tests/convergence/framework/engine_wrapper.py
index 0200dec53..b1dc5e6b4 100644
--- a/heat/tests/convergence/framework/engine_wrapper.py
+++ b/heat/tests/convergence/framework/engine_wrapper.py
@@ -11,8 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
from heat.db.sqlalchemy import api as db_api
from heat.engine import service
from heat.engine import stack
@@ -49,7 +47,7 @@ class Engine(message_processor.MessageProcessor):
"""Converts the scenario template into hot template."""
hot_tmpl = {"heat_template_version": "2013-05-23"}
resources = {}
- for res_name, res_def in six.iteritems(scenario_tmpl.resources):
+ for res_name, res_def in scenario_tmpl.resources.items():
props = getattr(res_def, 'properties')
depends = getattr(res_def, 'depends_on')
res_defn = {"type": "OS::Heat::TestResource"}
diff --git a/heat/tests/convergence/framework/message_processor.py b/heat/tests/convergence/framework/message_processor.py
index 99d0bf281..485e6db3e 100644
--- a/heat/tests/convergence/framework/message_processor.py
+++ b/heat/tests/convergence/framework/message_processor.py
@@ -14,7 +14,6 @@
import collections
import functools
import inspect
-import six
from oslo_log import log as logging
from oslo_messaging import rpc
@@ -30,12 +29,9 @@ def asynchronous(function):
run on a future iteration of the event loop.
"""
- if six.PY2:
- arg_names = inspect.getargspec(function).args
- else:
- sig = inspect.signature(function)
- arg_names = [name for name, param in sig.parameters.items()
- if param.kind == param.POSITIONAL_OR_KEYWORD]
+ sig = inspect.signature(function)
+ arg_names = [name for name, param in sig.parameters.items()
+ if param.kind == param.POSITIONAL_OR_KEYWORD]
MessageData = collections.namedtuple(function.__name__, arg_names[1:])
@functools.wraps(function)
@@ -112,4 +108,5 @@ class MessageProcessor(object):
"""Delete all the messages from the queue."""
self.queue.clear()
+
__all__ = ['MessageProcessor', 'asynchronous']
diff --git a/heat/tests/convergence/framework/processes.py b/heat/tests/convergence/framework/processes.py
index 5104a62b3..992b2b4c0 100644
--- a/heat/tests/convergence/framework/processes.py
+++ b/heat/tests/convergence/framework/processes.py
@@ -41,4 +41,5 @@ class Processes(object):
self.engine.clear()
self.worker.clear()
+
Processes()
diff --git a/heat/tests/convergence/framework/reality.py b/heat/tests/convergence/framework/reality.py
index 4951541dc..dcd7a79c7 100644
--- a/heat/tests/convergence/framework/reality.py
+++ b/heat/tests/convergence/framework/reality.py
@@ -48,4 +48,5 @@ class RealityStore(object):
prop_name)
return res_data.value
+
reality = RealityStore()
diff --git a/heat/tests/convergence/framework/worker_wrapper.py b/heat/tests/convergence/framework/worker_wrapper.py
index 7ca39ada1..841cdb361 100644
--- a/heat/tests/convergence/framework/worker_wrapper.py
+++ b/heat/tests/convergence/framework/worker_wrapper.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.engine import worker
from heat.tests.convergence.framework import message_processor
diff --git a/heat/tests/convergence/test_converge.py b/heat/tests/convergence/test_converge.py
index 081a8d43c..8e1b10480 100644
--- a/heat/tests/convergence/test_converge.py
+++ b/heat/tests/convergence/test_converge.py
@@ -11,13 +11,15 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_config import cfg
+
+from heat.common import context
from heat.engine import resource
from heat.tests import common
from heat.tests.convergence.framework import fake_resource
from heat.tests.convergence.framework import processes
from heat.tests.convergence.framework import scenario
from heat.tests.convergence.framework import testutils
-from oslo_config import cfg
class ScenarioTest(common.HeatTestCase):
@@ -27,6 +29,7 @@ class ScenarioTest(common.HeatTestCase):
def setUp(self):
super(ScenarioTest, self).setUp()
+ self.patchobject(context, 'StoredContext')
resource._register_class('OS::Heat::TestResource',
fake_resource.TestResource)
self.procs = processes.Processes()
diff --git a/heat/tests/db/test_migrations.py b/heat/tests/db/test_migrations.py
index 69a6b1ff3..0dbf85bf3 100644
--- a/heat/tests/db/test_migrations.py
+++ b/heat/tests/db/test_migrations.py
@@ -20,20 +20,15 @@ properly both upgrading and downgrading, and that no data loss occurs
if possible.
"""
-import datetime
import fixtures
import os
-import uuid
from migrate.versioning import repository
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils
-from oslo_serialization import jsonutils
-from oslo_utils import timeutils
from oslotest import base as test_base
-import six
import sqlalchemy
import testtools
@@ -183,523 +178,6 @@ class HeatMigrationsCheckers(test_migrations.WalkVersionsMixin,
self.assertEqual(sorted(members), sorted(index_columns))
- def _pre_upgrade_031(self, engine):
- raw_template = utils.get_table(engine, 'raw_template')
- templ = []
- for i in range(300, 303, 1):
- t = dict(id=i, template='{}', files='{}')
- engine.execute(raw_template.insert(), [t])
- templ.append(t)
-
- user_creds = utils.get_table(engine, 'user_creds')
- user = [dict(id=4, username='angus', password='notthis',
- tenant='mine', auth_url='bla',
- tenant_id=str(uuid.uuid4()),
- trust_id='',
- trustor_user_id='')]
- engine.execute(user_creds.insert(), user)
-
- stack = utils.get_table(engine, 'stack')
- stack_ids = [('967aaefb-152e-405d-b13a-35d4c816390c', 0),
- ('9e9deba9-a303-4f29-84d3-c8165647c47e', 1),
- ('9a4bd1ec-8b21-46cd-964a-f66cb1cfa2f9', 2)]
- data = [dict(id=ll_id, name='fruity',
- raw_template_id=templ[templ_id]['id'],
- user_creds_id=user[0]['id'],
- username='angus', disable_rollback=True)
- for ll_id, templ_id in stack_ids]
-
- engine.execute(stack.insert(), data)
- return data
-
- def _check_031(self, engine, data):
- self.assertColumnExists(engine, 'stack_lock', 'stack_id')
- self.assertColumnExists(engine, 'stack_lock', 'engine_id')
- self.assertColumnExists(engine, 'stack_lock', 'created_at')
- self.assertColumnExists(engine, 'stack_lock', 'updated_at')
-
- def _check_034(self, engine, data):
- self.assertColumnExists(engine, 'raw_template', 'files')
-
- def _pre_upgrade_035(self, engine):
- # The stacks id are for the 33 version migration
- event_table = utils.get_table(engine, 'event')
- data = [{
- 'id': '22222222-152e-405d-b13a-35d4c816390c',
- 'stack_id': '967aaefb-152e-405d-b13a-35d4c816390c',
- 'resource_action': 'Test',
- 'resource_status': 'TEST IN PROGRESS',
- 'resource_name': 'Testing Resource',
- 'physical_resource_id': '3465d1ec-8b21-46cd-9dgf-f66cttrh53f9',
- 'resource_status_reason': '',
- 'resource_type': '',
- 'resource_properties': None,
- 'created_at': timeutils.utcnow()},
- {'id': '11111111-152e-405d-b13a-35d4c816390c',
- 'stack_id': '967aaefb-152e-405d-b13a-35d4c816390c',
- 'resource_action': 'Test',
- 'resource_status': 'TEST COMPLETE',
- 'resource_name': 'Testing Resource',
- 'physical_resource_id': '3465d1ec-8b21-46cd-9dgf-f66cttrh53f9',
- 'resource_status_reason': '',
- 'resource_type': '',
- 'resource_properties': None,
- 'created_at': timeutils.utcnow() +
- datetime.timedelta(days=5)}]
- engine.execute(event_table.insert(), data)
- return data
-
- def _check_035(self, engine, data):
- self.assertColumnExists(engine, 'event', 'id')
- self.assertColumnExists(engine, 'event', 'uuid')
-
- event_table = utils.get_table(engine, 'event')
- events_in_db = list(event_table.select().execute())
- last_id = 0
- for index, event in enumerate(data):
- last_id = index + 1
- self.assertEqual(last_id, events_in_db[index].id)
- self.assertEqual(event['id'], events_in_db[index].uuid)
-
- # Check that the autoincremental id is ok
- data = [{
- 'uuid': '33333333-152e-405d-b13a-35d4c816390c',
- 'stack_id': '967aaefb-152e-405d-b13a-35d4c816390c',
- 'resource_action': 'Test',
- 'resource_status': 'TEST COMPLEATE AGAIN',
- 'resource_name': 'Testing Resource',
- 'physical_resource_id': '3465d1ec-8b21-46cd-9dgf-f66cttrh53f9',
- 'resource_status_reason': '',
- 'resource_type': '',
- 'resource_properties': None,
- 'created_at': timeutils.utcnow()}]
- result = engine.execute(event_table.insert(), data)
- self.assertEqual(last_id + 1, result.inserted_primary_key[0])
-
- def _check_036(self, engine, data):
- self.assertColumnExists(engine, 'stack', 'stack_user_project_id')
-
- def _pre_upgrade_037(self, engine):
- raw_template = utils.get_table(engine, 'raw_template')
- templ = '''{"heat_template_version": "2013-05-23",
- "parameters": {
- "key_name": {
- "Type": "string"
- }
- }
- }'''
- data = [dict(id=4, template=templ, files='{}')]
- engine.execute(raw_template.insert(), data)
- return data[0]
-
- def _check_037(self, engine, data):
- raw_template = utils.get_table(engine, 'raw_template')
- templs = list(raw_template.select().
- where(raw_template.c.id == str(data['id'])).
- execute())
- template = jsonutils.loads(templs[0].template)
- data_template = jsonutils.loads(data['template'])
- self.assertNotIn('Type', template['parameters']['key_name'])
- self.assertIn('type', template['parameters']['key_name'])
- self.assertEqual(template['parameters']['key_name']['type'],
- data_template['parameters']['key_name']['Type'])
-
- def _check_038(self, engine, data):
- self.assertColumnNotExists(engine, 'software_config', 'io')
-
- def _check_039(self, engine, data):
- self.assertColumnIsNullable(engine, 'stack', 'user_creds_id')
-
- def _check_040(self, engine, data):
- self.assertColumnNotExists(engine, 'software_deployment', 'signal_id')
-
- def _pre_upgrade_041(self, engine):
- raw_template = utils.get_table(engine, 'raw_template')
- templ = '''{"heat_template_version": "2013-05-23",
- "resources": {
- "my_instance": {
- "Type": "OS::Nova::Server"
- }
- },
- "outputs": {
- "instance_ip": {
- "Value": { "get_attr": "[my_instance, networks]" }
- }
- }
- }'''
- data = [dict(id=7, template=templ, files='{}')]
- engine.execute(raw_template.insert(), data)
- return data[0]
-
- def _check_041(self, engine, data):
- raw_template = utils.get_table(engine, 'raw_template')
- templs = list(raw_template.select().
- where(raw_template.c.id == str(data['id'])).
- execute())
- template = jsonutils.loads(templs[0].template)
- self.assertIn('type', template['resources']['my_instance'])
- self.assertNotIn('Type', template['resources']['my_instance'])
- self.assertIn('value', template['outputs']['instance_ip'])
- self.assertNotIn('Value', template['outputs']['instance_ip'])
-
- def _pre_upgrade_043(self, engine):
- raw_template = utils.get_table(engine, 'raw_template')
- templ = '''{"HeatTemplateFormatVersion" : "2012-12-11",
- "Parameters" : {
- "foo" : { "Type" : "String", "NoEcho": "True" },
- "bar" : { "Type" : "String", "NoEcho": "True", "Default": "abc" },
- "blarg" : { "Type" : "String", "Default": "quux" }
- }
- }'''
- data = [dict(id=8, template=templ, files='{}')]
- engine.execute(raw_template.insert(), data)
- return data[0]
-
- def _check_043(self, engine, data):
- raw_template = utils.get_table(engine, 'raw_template')
- templ = list(raw_template.select().
- where(raw_template.c.id == data['id']).execute())
- template = jsonutils.loads(templ[0].template)
- self.assertEqual(template['HeatTemplateFormatVersion'], '2012-12-12')
-
- def _pre_upgrade_045(self, engine):
- raw_template = utils.get_table(engine, 'raw_template')
- templ = []
- for i in range(200, 203, 1):
- t = dict(id=i, template='{}', files='{}')
- engine.execute(raw_template.insert(), [t])
- templ.append(t)
-
- user_creds = utils.get_table(engine, 'user_creds')
- user = [dict(id=6, username='steve', password='notthis',
- tenant='mine', auth_url='bla',
- tenant_id=str(uuid.uuid4()),
- trust_id='',
- trustor_user_id='')]
- engine.execute(user_creds.insert(), user)
-
- stack = utils.get_table(engine, 'stack')
- stack_ids = [('s1', '967aaefb-152e-505d-b13a-35d4c816390c', 0),
- ('s2', '9e9deba9-a303-5f29-84d3-c8165647c47e', 1),
- ('s1*', '9a4bd1ec-8b21-56cd-964a-f66cb1cfa2f9', 2)]
- data = [dict(id=ll_id, name=name,
- raw_template_id=templ[templ_id]['id'],
- user_creds_id=user[0]['id'],
- username='steve', disable_rollback=True)
- for name, ll_id, templ_id in stack_ids]
- data[2]['owner_id'] = '967aaefb-152e-505d-b13a-35d4c816390c'
-
- engine.execute(stack.insert(), data)
- return data
-
- def _check_045(self, engine, data):
- self.assertColumnExists(engine, 'stack', 'backup')
- stack_table = utils.get_table(engine, 'stack')
- stacks_in_db = list(stack_table.select().execute())
- stack_names_in_db = [s.name for s in stacks_in_db]
- # Assert the expected stacks are still there
- for stack in data:
- self.assertIn(stack['name'], stack_names_in_db)
- # And that the backup flag is set as expected
- for stack in stacks_in_db:
- if stack.name.endswith('*'):
- self.assertTrue(stack.backup)
- else:
- self.assertFalse(stack.backup)
-
- def _check_046(self, engine, data):
- self.assertColumnExists(engine, 'resource', 'properties_data')
-
- def _pre_upgrade_047(self, engine):
- raw_template = utils.get_table(engine, 'raw_template')
- templ = []
- for i in range(100, 105, 1):
- t = dict(id=i, template='{}', files='{}')
- engine.execute(raw_template.insert(), [t])
- templ.append(t)
-
- user_creds = utils.get_table(engine, 'user_creds')
- user = [dict(id=7, username='steve', password='notthis',
- tenant='mine', auth_url='bla',
- tenant_id=str(uuid.uuid4()),
- trust_id='',
- trustor_user_id='')]
- engine.execute(user_creds.insert(), user)
-
- stack = utils.get_table(engine, 'stack')
- stack_ids = [
- ('s9', '167aaefb-152e-505d-b13a-35d4c816390c', 0),
- ('n1', '1e9deba9-a303-5f29-84d3-c8165647c47e', 1),
- ('n2', '1e9deba9-a304-5f29-84d3-c8165647c47e', 2),
- ('n3', '1e9deba9-a305-5f29-84d3-c8165647c47e', 3),
- ('s9*', '1a4bd1ec-8b21-56cd-964a-f66cb1cfa2f9', 4)]
- data = [dict(id=ll_id, name=name,
- raw_template_id=templ[tmpl_id]['id'],
- user_creds_id=user[0]['id'],
- owner_id=None,
- backup=False,
- username='steve', disable_rollback=True)
- for name, ll_id, tmpl_id in stack_ids]
- # Make a nested tree s1->s2->s3->s4 with a s1 backup
- data[1]['owner_id'] = '167aaefb-152e-505d-b13a-35d4c816390c'
- data[2]['owner_id'] = '1e9deba9-a303-5f29-84d3-c8165647c47e'
- data[3]['owner_id'] = '1e9deba9-a304-5f29-84d3-c8165647c47e'
- data[4]['owner_id'] = '167aaefb-152e-505d-b13a-35d4c816390c'
- data[4]['backup'] = True
- engine.execute(stack.insert(), data)
- return data
-
- def _check_047(self, engine, data):
- self.assertColumnExists(engine, 'stack', 'nested_depth')
- stack_table = utils.get_table(engine, 'stack')
- stacks_in_db = list(stack_table.select().execute())
- stack_ids_in_db = [s.id for s in stacks_in_db]
-
- # Assert the expected stacks are still there
- for stack in data:
- self.assertIn(stack['id'], stack_ids_in_db)
-
- # And that the depth is set as expected
- def n_depth(sid):
- s = [s for s in stacks_in_db if s.id == sid][0]
- return s.nested_depth
-
- self.assertEqual(0, n_depth('167aaefb-152e-505d-b13a-35d4c816390c'))
- self.assertEqual(1, n_depth('1e9deba9-a303-5f29-84d3-c8165647c47e'))
- self.assertEqual(2, n_depth('1e9deba9-a304-5f29-84d3-c8165647c47e'))
- self.assertEqual(3, n_depth('1e9deba9-a305-5f29-84d3-c8165647c47e'))
- self.assertEqual(0, n_depth('1a4bd1ec-8b21-56cd-964a-f66cb1cfa2f9'))
-
- def _check_049(self, engine, data):
- self.assertColumnExists(engine, 'user_creds', 'region_name')
-
- def _check_051(self, engine, data):
- column_list = [('id', False),
- ('host', False),
- ('topic', False),
- ('binary', False),
- ('hostname', False),
- ('engine_id', False),
- ('report_interval', False),
- ('updated_at', True),
- ('created_at', True),
- ('deleted_at', True)]
- for column in column_list:
- self.assertColumnExists(engine, 'service', column[0])
- if not column[1]:
- self.assertColumnIsNotNullable(engine, 'service', column[0])
- else:
- self.assertColumnIsNullable(engine, 'service', column[0])
-
- def _check_052(self, engine, data):
- self.assertColumnExists(engine, 'stack', 'convergence')
-
- def _check_055(self, engine, data):
- self.assertColumnExists(engine, 'stack', 'prev_raw_template_id')
- self.assertColumnExists(engine, 'stack', 'current_traversal')
- self.assertColumnExists(engine, 'stack', 'current_deps')
-
- def _pre_upgrade_056(self, engine):
- raw_template = utils.get_table(engine, 'raw_template')
- templ = []
- for i in range(900, 903, 1):
- t = dict(id=i, template='{}', files='{}')
- engine.execute(raw_template.insert(), [t])
- templ.append(t)
-
- user_creds = utils.get_table(engine, 'user_creds')
- user = [dict(id=uid, username='test_user', password='password',
- tenant='test_project', auth_url='bla',
- tenant_id=str(uuid.uuid4()),
- trust_id='',
- trustor_user_id='') for uid in range(900, 903)]
- engine.execute(user_creds.insert(), user)
-
- stack = utils.get_table(engine, 'stack')
- stack_ids = [('967aaefa-152e-405d-b13a-35d4c816390c', 0),
- ('9e9debab-a303-4f29-84d3-c8165647c47e', 1),
- ('9a4bd1e9-8b21-46cd-964a-f66cb1cfa2f9', 2)]
- data = [dict(id=ll_id, name=ll_id,
- raw_template_id=templ[templ_id]['id'],
- user_creds_id=user[templ_id]['id'],
- username='test_user',
- disable_rollback=True,
- parameters='test_params',
- created_at=timeutils.utcnow(),
- deleted_at=None)
- for ll_id, templ_id in stack_ids]
- data[-1]['deleted_at'] = timeutils.utcnow()
-
- engine.execute(stack.insert(), data)
- return data
-
- def _check_056(self, engine, data):
- self.assertColumnNotExists(engine, 'stack', 'parameters')
-
- self.assertColumnExists(engine, 'raw_template', 'environment')
- self.assertColumnExists(engine, 'raw_template', 'predecessor')
-
- # Get the parameters in stack table
- stack_parameters = {}
- for stack in data:
- templ_id = stack['raw_template_id']
- stack_parameters[templ_id] = (stack['parameters'],
- stack.get('deleted_at'))
-
- # validate whether its moved to raw_template
- raw_template_table = utils.get_table(engine, 'raw_template')
- raw_templates = raw_template_table.select().execute()
-
- for raw_template in raw_templates:
- if raw_template.id in stack_parameters:
- stack_param, deleted_at = stack_parameters[raw_template.id]
- tmpl_env = raw_template.environment
- if engine.name == 'sqlite' and deleted_at is None:
- stack_param = '"%s"' % stack_param
- if deleted_at is None:
- self.assertEqual(stack_param,
- tmpl_env,
- 'parameters migration from stack to '
- 'raw_template failed')
- else:
- self.assertIsNone(tmpl_env,
- 'parameters migration did not skip '
- 'deleted stack')
-
- def _pre_upgrade_057(self, engine):
- # template
- raw_template = utils.get_table(engine, 'raw_template')
- templ = [dict(id=11, template='{}', files='{}')]
- engine.execute(raw_template.insert(), templ)
-
- # credentials
- user_creds = utils.get_table(engine, 'user_creds')
- user = [dict(id=11, username='steve', password='notthis',
- tenant='mine', auth_url='bla',
- tenant_id=str(uuid.uuid4()),
- trust_id='',
- trustor_user_id='')]
- engine.execute(user_creds.insert(), user)
-
- # stack
- stack = utils.get_table(engine, 'stack')
- stack_data = [dict(id='867aaefb-152e-505d-b13a-35d4c816390c',
- name='s1',
- raw_template_id=templ[0]['id'],
- user_creds_id=user[0]['id'],
- username='steve', disable_rollback=True)]
- engine.execute(stack.insert(), stack_data)
-
- # resource
- resource = utils.get_table(engine, 'resource')
- res_data = [dict(id='167aaefb-152e-505d-b13a-35d4c816390c',
- name='res-4',
- stack_id=stack_data[0]['id'],
- user_creds_id=user[0]['id']),
- dict(id='177aaefb-152e-505d-b13a-35d4c816390c',
- name='res-5',
- stack_id=stack_data[0]['id'],
- user_creds_id=user[0]['id'])]
- engine.execute(resource.insert(), res_data)
-
- # resource_data
- resource_data = utils.get_table(engine, 'resource_data')
- rd_data = [dict(key='fruit',
- value='blueberries',
- reduct=False,
- resource_id=res_data[0]['id']),
- dict(key='fruit',
- value='apples',
- reduct=False,
- resource_id=res_data[1]['id'])]
- engine.execute(resource_data.insert(), rd_data)
-
- return {'resource': res_data, 'resource_data': rd_data}
-
- def _check_057(self, engine, data):
- def uuid_in_res_data(res_uuid):
- for rd in data['resource']:
- if rd['id'] == res_uuid:
- return True
- return False
-
- def rd_matches_old_data(key, value, res_uuid):
- for rd in data['resource_data']:
- if (rd['resource_id'] == res_uuid and rd['key'] == key
- and rd['value'] == value):
- return True
- return False
-
- self.assertColumnIsNotNullable(engine, 'resource', 'id')
- res_table = utils.get_table(engine, 'resource')
- res_in_db = list(res_table.select().execute())
- # confirm the resource.id is an int and the uuid field has been
- # copied from the old id.
- for r in res_in_db:
- self.assertIsInstance(r.id, six.integer_types)
- self.assertTrue(uuid_in_res_data(r.uuid))
-
- # confirm that the new resource_id points to the correct resource.
- rd_table = utils.get_table(engine, 'resource_data')
- rd_in_db = list(rd_table.select().execute())
- for rd in rd_in_db:
- for r in res_in_db:
- if rd.resource_id == r.id:
- self.assertTrue(rd_matches_old_data(rd.key, rd.value,
- r.uuid))
-
- def _check_058(self, engine, data):
- self.assertColumnExists(engine, 'resource', 'engine_id')
- self.assertColumnExists(engine, 'resource', 'atomic_key')
-
- def _check_059(self, engine, data):
- column_list = [('entity_id', False),
- ('traversal_id', False),
- ('is_update', False),
- ('atomic_key', False),
- ('stack_id', False),
- ('input_data', True),
- ('updated_at', True),
- ('created_at', True)]
- for column in column_list:
- self.assertColumnExists(engine, 'sync_point', column[0])
- if not column[1]:
- self.assertColumnIsNotNullable(engine, 'sync_point',
- column[0])
- else:
- self.assertColumnIsNullable(engine, 'sync_point', column[0])
-
- def _check_060(self, engine, data):
- column_list = ['needed_by', 'requires', 'replaces', 'replaced_by',
- 'current_template_id']
- for column in column_list:
- self.assertColumnExists(engine, 'resource', column)
-
- def _check_061(self, engine, data):
- for tab_name in ['stack', 'resource', 'software_deployment']:
- self.assertColumnType(engine, tab_name, 'status_reason',
- sqlalchemy.Text)
-
- def _check_062(self, engine, data):
- self.assertColumnExists(engine, 'stack', 'parent_resource_name')
-
- def _check_063(self, engine, data):
- self.assertColumnExists(engine, 'resource',
- 'properties_data_encrypted')
-
- def _check_064(self, engine, data):
- self.assertColumnNotExists(engine, 'raw_template',
- 'predecessor')
-
- def _check_065(self, engine, data):
- self.assertColumnExists(engine, 'resource', 'root_stack_id')
- self.assertIndexExists(engine, 'resource', 'ix_resource_root_stack_id')
-
- def _check_071(self, engine, data):
- self.assertIndexExists(engine, 'stack', 'ix_stack_owner_id')
- self.assertIndexMembers(engine, 'stack', 'ix_stack_owner_id',
- ['owner_id'])
-
def _check_073(self, engine, data):
# check if column still exists and is not nullable.
self.assertColumnIsNotNullable(engine, 'resource_data', 'resource_id')
diff --git a/heat/tests/db/test_sqlalchemy_api.py b/heat/tests/db/test_sqlalchemy_api.py
index 096d3b88f..21fb8b55c 100644
--- a/heat/tests/db/test_sqlalchemy_api.py
+++ b/heat/tests/db/test_sqlalchemy_api.py
@@ -13,23 +13,22 @@
import copy
import datetime
-import fixtures
import json
import logging
import time
+from unittest import mock
import uuid
-import mock
+import fixtures
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_utils import timeutils
-import six
from sqlalchemy.orm import exc
from sqlalchemy.orm import session
-
from heat.common import context
from heat.common import exception
+from heat.common import short_id
from heat.common import template_format
from heat.db.sqlalchemy import api as db_api
from heat.db.sqlalchemy import models
@@ -95,6 +94,7 @@ class SqlAlchemyTest(common.HeatTestCase):
stack_user_project_id=stack_user_project_id)
with utils.UUIDStub(stack_id):
stack.store(backup=backup)
+
return (template, stack)
def _mock_create(self):
@@ -173,8 +173,8 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertTrue(mock_db_filter.called)
@mock.patch.object(db_api, '_paginate_query')
- def test_filter_and_page_query_whitelists_sort_keys(self,
- mock_paginate_query):
+ def test_filter_and_page_query_allowed_sort_keys(self,
+ mock_paginate_query):
query = mock.Mock()
sort_keys = ['stack_name', 'foo']
db_api._filter_and_page_query(self.ctx, query, sort_keys=sort_keys)
@@ -183,7 +183,7 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertIn(['name'], args)
@mock.patch.object(db_api, '_events_paginate_query')
- def test_events_filter_and_page_query_whitelists_sort_keys(
+ def test_events_filter_and_page_query_allowed_sort_keys(
self, mock_paginate_query):
query = mock.Mock()
sort_keys = ['event_time', 'foo']
@@ -259,14 +259,14 @@ class SqlAlchemyTest(common.HeatTestCase):
filtered_keys = db_api._get_sort_keys(sort_keys, mapping)
self.assertEqual([], filtered_keys)
- def test_get_sort_keys_whitelists_single_key(self):
+ def test_get_sort_keys_allow_single_key(self):
sort_key = 'foo'
mapping = {'foo': 'Foo'}
filtered_keys = db_api._get_sort_keys(sort_key, mapping)
self.assertEqual(['Foo'], filtered_keys)
- def test_get_sort_keys_whitelists_multiple_keys(self):
+ def test_get_sort_keys_allow_multiple_keys(self):
sort_keys = ['foo', 'bar', 'nope']
mapping = {'foo': 'Foo', 'bar': 'Bar'}
@@ -310,7 +310,7 @@ class SqlAlchemyTest(common.HeatTestCase):
)
def test_resource_data_delete(self):
- stack = self._setup_test_stack('stack', UUID1)[1]
+ stack = self._setup_test_stack('res_data_delete', UUID1)[1]
self._mock_create()
stack.create()
@@ -335,73 +335,89 @@ class SqlAlchemyTest(common.HeatTestCase):
)
def test_stack_get_by_name(self):
- stack = self._setup_test_stack('stack', UUID1,
+ name = 'stack_get_by_name'
+ stack = self._setup_test_stack(name, UUID1,
stack_user_project_id=UUID2)[1]
- st = db_api.stack_get_by_name(self.ctx, 'stack')
+ st = db_api.stack_get_by_name(self.ctx, name)
self.assertEqual(UUID1, st.id)
self.ctx.tenant = UUID3
- st = db_api.stack_get_by_name(self.ctx, 'stack')
+ st = db_api.stack_get_by_name(self.ctx, name)
self.assertIsNone(st)
self.ctx.tenant = UUID2
- st = db_api.stack_get_by_name(self.ctx, 'stack')
+ st = db_api.stack_get_by_name(self.ctx, name)
self.assertEqual(UUID1, st.id)
stack.delete()
- st = db_api.stack_get_by_name(self.ctx, 'stack')
+ st = db_api.stack_get_by_name(self.ctx, name)
self.assertIsNone(st)
+ def test_stack_create_multiple(self):
+ name = 'stack_race'
+ stack = self._setup_test_stack(name, UUID1,
+ stack_user_project_id=UUID2)[1]
+ self.assertRaises(exception.StackExists,
+ self._setup_test_stack,
+ name, UUID2, stack_user_project_id=UUID2)
+
+ st = db_api.stack_get_by_name(self.ctx, name)
+ self.assertEqual(UUID1, st.id)
+
+ stack.delete()
+
+ self.assertIsNone(db_api.stack_get_by_name(self.ctx, name))
+
def test_nested_stack_get_by_name(self):
- stack1 = self._setup_test_stack('stack1', UUID1)[1]
- stack2 = self._setup_test_stack('stack2', UUID2,
+ stack1 = self._setup_test_stack('neststack1', UUID1)[1]
+ stack2 = self._setup_test_stack('neststack2', UUID2,
owner_id=stack1.id)[1]
- result = db_api.stack_get_by_name(self.ctx, 'stack2')
+ result = db_api.stack_get_by_name(self.ctx, 'neststack2')
self.assertEqual(UUID2, result.id)
stack2.delete()
- result = db_api.stack_get_by_name(self.ctx, 'stack2')
+ result = db_api.stack_get_by_name(self.ctx, 'neststack2')
self.assertIsNone(result)
def test_stack_get_by_name_and_owner_id(self):
- stack1 = self._setup_test_stack('stack1', UUID1,
+ stack1 = self._setup_test_stack('ownstack1', UUID1,
stack_user_project_id=UUID3)[1]
- stack2 = self._setup_test_stack('stack2', UUID2,
+ stack2 = self._setup_test_stack('ownstack2', UUID2,
owner_id=stack1.id,
stack_user_project_id=UUID3)[1]
- result = db_api.stack_get_by_name_and_owner_id(self.ctx, 'stack2',
+ result = db_api.stack_get_by_name_and_owner_id(self.ctx, 'ownstack2',
None)
self.assertIsNone(result)
- result = db_api.stack_get_by_name_and_owner_id(self.ctx, 'stack2',
+ result = db_api.stack_get_by_name_and_owner_id(self.ctx, 'ownstack2',
stack1.id)
self.assertEqual(UUID2, result.id)
self.ctx.tenant = str(uuid.uuid4())
- result = db_api.stack_get_by_name_and_owner_id(self.ctx, 'stack2',
+ result = db_api.stack_get_by_name_and_owner_id(self.ctx, 'ownstack2',
None)
self.assertIsNone(result)
self.ctx.tenant = UUID3
- result = db_api.stack_get_by_name_and_owner_id(self.ctx, 'stack2',
+ result = db_api.stack_get_by_name_and_owner_id(self.ctx, 'ownstack2',
stack1.id)
self.assertEqual(UUID2, result.id)
stack2.delete()
- result = db_api.stack_get_by_name_and_owner_id(self.ctx, 'stack2',
+ result = db_api.stack_get_by_name_and_owner_id(self.ctx, 'ownstack2',
stack1.id)
self.assertIsNone(result)
def test_stack_get(self):
- stack = self._setup_test_stack('stack', UUID1)[1]
+ stack = self._setup_test_stack('stack_get', UUID1)[1]
st = db_api.stack_get(self.ctx, UUID1, show_deleted=False)
self.assertEqual(UUID1, st.id)
@@ -414,7 +430,7 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(UUID1, st.id)
def test_stack_get_status(self):
- stack = self._setup_test_stack('stack', UUID1)[1]
+ stack = self._setup_test_stack('stack_get_status', UUID1)[1]
st = db_api.stack_get_status(self.ctx, UUID1)
self.assertEqual(('CREATE', 'IN_PROGRESS', '', None), st)
@@ -430,7 +446,7 @@ class SqlAlchemyTest(common.HeatTestCase):
db_api.stack_get_status, self.ctx, UUID2)
def test_stack_get_show_deleted_context(self):
- stack = self._setup_test_stack('stack', UUID1)[1]
+ stack = self._setup_test_stack('stack_get_deleted', UUID1)[1]
self.assertFalse(self.ctx.show_deleted)
st = db_api.stack_get(self.ctx, UUID1)
@@ -445,7 +461,8 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(UUID1, st.id)
def test_stack_get_all(self):
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stack_get_all_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
st_db = db_api.stack_get_all(self.ctx)
self.assertEqual(3, len(st_db))
@@ -459,7 +476,8 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(1, len(st_db))
def test_stack_get_all_show_deleted(self):
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stack_get_all_deleted_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
st_db = db_api.stack_get_all(self.ctx)
self.assertEqual(3, len(st_db))
@@ -472,11 +490,11 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(3, len(st_db))
def test_stack_get_all_show_nested(self):
- stack1 = self._setup_test_stack('stack1', UUID1)[1]
- stack2 = self._setup_test_stack('stack2', UUID2,
+ stack1 = self._setup_test_stack('neststack_get_all_1', UUID1)[1]
+ stack2 = self._setup_test_stack('neststack_get_all_2', UUID2,
owner_id=stack1.id)[1]
# Backup stack should not be returned
- stack3 = self._setup_test_stack('stack1*', UUID3,
+ stack3 = self._setup_test_stack('neststack_get_all_1*', UUID3,
owner_id=stack1.id,
backup=True)[1]
@@ -493,7 +511,7 @@ class SqlAlchemyTest(common.HeatTestCase):
def test_stack_get_all_with_filters(self):
self._setup_test_stack('foo', UUID1)
- self._setup_test_stack('bar', UUID2)
+ self._setup_test_stack('baz', UUID2)
filters = {'name': 'foo'}
results = db_api.stack_get_all(self.ctx,
@@ -503,7 +521,7 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual('foo', results[0]['name'])
def test_stack_get_all_filter_matches_in_list(self):
- self._setup_test_stack('foo', UUID1)
+ self._setup_test_stack('wibble', UUID1)
self._setup_test_stack('bar', UUID2)
filters = {'name': ['bar', 'quux']}
@@ -514,8 +532,8 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual('bar', results[0]['name'])
def test_stack_get_all_returns_all_if_no_filters(self):
- self._setup_test_stack('foo', UUID1)
- self._setup_test_stack('bar', UUID2)
+ self._setup_test_stack('stack_get_all_no_filter1', UUID1)
+ self._setup_test_stack('stack_get_all_no_filter2', UUID2)
filters = None
results = db_api.stack_get_all(self.ctx,
@@ -524,7 +542,8 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(2, len(results))
def test_stack_get_all_default_sort_keys_and_dir(self):
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stacks_def_sort_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
st_db = db_api.stack_get_all(self.ctx)
self.assertEqual(3, len(st_db))
@@ -533,7 +552,8 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(stacks[0].id, st_db[2].id)
def test_stack_get_all_default_sort_dir(self):
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stacks_def_sort_dir_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
st_db = db_api.stack_get_all(self.ctx, sort_dir='asc')
self.assertEqual(3, len(st_db))
@@ -542,7 +562,8 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(stacks[2].id, st_db[2].id)
def test_stack_get_all_str_sort_keys(self):
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stacks_str_sort_keys_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
st_db = db_api.stack_get_all(self.ctx,
sort_keys='creation_time')
@@ -564,21 +585,24 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(expected_keys, used_sort_keys)
def test_stack_get_all_marker(self):
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stacks_marker_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
st_db = db_api.stack_get_all(self.ctx, marker=stacks[1].id)
self.assertEqual(1, len(st_db))
self.assertEqual(stacks[0].id, st_db[0].id)
def test_stack_get_all_non_existing_marker(self):
- [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ [self._setup_test_stack('stacks_nonex_marker_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
uuid = 'this stack doesn\'t exist'
st_db = db_api.stack_get_all(self.ctx, marker=uuid)
self.assertEqual(3, len(st_db))
def test_stack_get_all_doesnt_mutate_sort_keys(self):
- [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ [self._setup_test_stack('stacks_sort_nomutate_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
sort_keys = ['id']
db_api.stack_get_all(self.ctx, sort_keys=sort_keys)
@@ -587,7 +611,8 @@ class SqlAlchemyTest(common.HeatTestCase):
def test_stack_get_all_hidden_tags(self):
cfg.CONF.set_override('hidden_stack_tags', ['hidden'])
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stacks_hidden_tags_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
stacks[0].tags = ['hidden']
stacks[0].store()
stacks[1].tags = ['random']
@@ -605,7 +630,8 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertNotEqual(stacks[0].id, stack.id)
def test_stack_get_all_by_tags(self):
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stacks_tags_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
stacks[0].tags = ['tag1']
stacks[0].store()
stacks[1].tags = ['tag1', 'tag2']
@@ -623,7 +649,8 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(1, len(st_db))
def test_stack_get_all_by_tags_any(self):
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stacks_tags_any_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
stacks[0].tags = ['tag2']
stacks[0].store()
stacks[1].tags = ['tag1', 'tag2']
@@ -639,7 +666,8 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(3, len(st_db))
def test_stack_get_all_by_not_tags(self):
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stacks_not_tags_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
stacks[0].tags = ['tag1']
stacks[0].store()
stacks[1].tags = ['tag1', 'tag2']
@@ -658,7 +686,8 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(2, len(st_db))
def test_stack_get_all_by_not_tags_any(self):
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stacks_not_tags_any_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
stacks[0].tags = ['tag2']
stacks[0].store()
stacks[1].tags = ['tag1', 'tag2']
@@ -674,7 +703,8 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(0, len(st_db))
def test_stack_get_all_by_tag_with_pagination(self):
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stacks_tag_page_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
stacks[0].tags = ['tag1']
stacks[0].store()
stacks[1].tags = ['tag2']
@@ -697,7 +727,8 @@ class SqlAlchemyTest(common.HeatTestCase):
def test_stack_get_all_by_tag_with_show_hidden(self):
cfg.CONF.set_override('hidden_stack_tags', ['hidden'])
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stacks_tag_hidden_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
stacks[0].tags = ['tag1']
stacks[0].store()
stacks[1].tags = ['hidden', 'tag1']
@@ -712,7 +743,8 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(1, len(st_db))
def test_stack_count_all(self):
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stacks_count_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
st_db = db_api.stack_count_all(self.ctx)
self.assertEqual(3, st_db)
@@ -734,7 +766,8 @@ class SqlAlchemyTest(common.HeatTestCase):
def test_count_all_hidden_tags(self):
cfg.CONF.set_override('hidden_stack_tags', ['hidden'])
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stacks_count_hid_tag_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
stacks[0].tags = ['hidden']
stacks[0].store()
stacks[1].tags = ['random']
@@ -747,7 +780,8 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(2, st_db_visible)
def test_count_all_by_tags(self):
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stacks_count_all_tag_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
stacks[0].tags = ['tag1']
stacks[0].store()
stacks[1].tags = ['tag2']
@@ -764,7 +798,8 @@ class SqlAlchemyTest(common.HeatTestCase):
def test_count_all_by_tag_with_show_hidden(self):
cfg.CONF.set_override('hidden_stack_tags', ['hidden'])
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stacks_count_all_tagsh_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
stacks[0].tags = ['tag1']
stacks[0].store()
stacks[1].tags = ['hidden', 'tag1']
@@ -779,13 +814,12 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(1, st_db)
def test_stack_count_all_with_filters(self):
- self._setup_test_stack('foo', UUID1)
- self._setup_test_stack('bar', UUID2)
- self._setup_test_stack('bar', UUID3)
- filters = {'name': 'bar'}
+ self._setup_test_stack('sca_foo', UUID1)
+ self._setup_test_stack('sca_bar', UUID2)
+ filters = {'name': 'sca_bar'}
st_db = db_api.stack_count_all(self.ctx, filters=filters)
- self.assertEqual(2, st_db)
+ self.assertEqual(1, st_db)
def test_stack_count_all_show_nested(self):
stack1 = self._setup_test_stack('stack1', UUID1)[1]
@@ -803,7 +837,7 @@ class SqlAlchemyTest(common.HeatTestCase):
self.assertEqual(2, st_db)
def test_event_get_all_by_stack(self):
- stack = self._setup_test_stack('stack', UUID1)[1]
+ stack = self._setup_test_stack('stack_events', UUID1)[1]
self._mock_create()
stack.create()
@@ -900,7 +934,7 @@ class SqlAlchemyTest(common.HeatTestCase):
)
def test_event_count_all_by_stack(self):
- stack = self._setup_test_stack('stack', UUID1)[1]
+ stack = self._setup_test_stack('stack_event_count', UUID1)[1]
self._mock_create()
stack.create()
@@ -926,7 +960,8 @@ class SqlAlchemyTest(common.HeatTestCase):
)
def test_event_get_all_by_tenant(self):
- stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
+ stacks = [self._setup_test_stack('stack_ev_ten_%d' % i, x)[1]
+ for i, x in enumerate(UUIDs)]
self._mock_create()
[s.create() for s in stacks]
@@ -978,7 +1013,7 @@ class SqlAlchemyTest(common.HeatTestCase):
db_api.user_creds_create,
self.ctx)
self.assertIn('Length of OS_PASSWORD after encryption exceeds '
- 'Heat limit (255 chars)', six.text_type(error))
+ 'Heat limit (255 chars)', str(error))
def test_user_creds_trust(self):
self.ctx.username = None
@@ -1101,12 +1136,12 @@ class SqlAlchemyTest(common.HeatTestCase):
db_api.software_config_get,
self.ctx,
scf_id)
- self.assertIn(scf_id, six.text_type(err))
+ self.assertIn(scf_id, str(err))
err = self.assertRaises(
exception.NotFound, db_api.software_config_delete,
self.ctx, scf_id)
- self.assertIn(scf_id, six.text_type(err))
+ self.assertIn(scf_id, str(err))
def test_software_config_delete_by_admin(self):
scf_id = self._create_software_config_record()
@@ -1134,7 +1169,7 @@ class SqlAlchemyTest(common.HeatTestCase):
self.ctx, config_id)
msg = ("Software config with id %s can not be deleted as it is "
"referenced" % config_id)
- self.assertIn(msg, six.text_type(err))
+ self.assertIn(msg, str(err))
def _deployment_values(self):
tenant_id = self.ctx.tenant_id
@@ -1223,7 +1258,7 @@ class SqlAlchemyTest(common.HeatTestCase):
err = self.assertRaises(exception.NotFound,
db_api.software_deployment_update,
self.ctx, deployment_id, values={})
- self.assertIn(deployment_id, six.text_type(err))
+ self.assertIn(deployment_id, str(err))
values = self._deployment_values()
deployment = db_api.software_deployment_create(self.ctx, values)
deployment_id = deployment.id
@@ -1245,7 +1280,7 @@ class SqlAlchemyTest(common.HeatTestCase):
err = self.assertRaises(exception.NotFound,
db_api.software_deployment_delete,
self.ctx, deployment_id)
- self.assertIn(deployment_id, six.text_type(err))
+ self.assertIn(deployment_id, str(err))
values = self._deployment_values()
deployment = db_api.software_deployment_create(self.ctx, values)
deployment_id = deployment.id
@@ -1260,7 +1295,7 @@ class SqlAlchemyTest(common.HeatTestCase):
test_ctx,
deployment_id)
- self.assertIn(deployment_id, six.text_type(err))
+ self.assertIn(deployment_id, str(err))
def test_software_deployment_delete(self):
self._test_software_deployment_delete()
@@ -1347,7 +1382,7 @@ class SqlAlchemyTest(common.HeatTestCase):
err = self.assertRaises(exception.NotFound,
db_api.snapshot_update,
self.ctx, snapshot_id, values={})
- self.assertIn(snapshot_id, six.text_type(err))
+ self.assertIn(snapshot_id, str(err))
def test_snapshot_update(self):
template = create_raw_template(self.ctx)
@@ -1367,7 +1402,7 @@ class SqlAlchemyTest(common.HeatTestCase):
err = self.assertRaises(exception.NotFound,
db_api.snapshot_delete,
self.ctx, snapshot_id)
- self.assertIn(snapshot_id, six.text_type(err))
+ self.assertIn(snapshot_id, str(err))
def test_snapshot_delete(self):
template = create_raw_template(self.ctx)
@@ -1387,7 +1422,7 @@ class SqlAlchemyTest(common.HeatTestCase):
self.ctx,
snapshot_id)
- self.assertIn(snapshot_id, six.text_type(err))
+ self.assertIn(snapshot_id, str(err))
def test_snapshot_get_all(self):
template = create_raw_template(self.ctx)
@@ -1410,7 +1445,7 @@ def create_raw_template(context, **kwargs):
'template': t,
}
if 'files' not in kwargs and 'files_id' not in kwargs:
- # modern raw_templates have associated raw_template_files db obj
+ # modern raw_templates have associated raw_template_files DB obj
tf = template_files.TemplateFiles({'foo': 'bar'})
tf.store(context)
kwargs['files_id'] = tf.files_id
@@ -1427,7 +1462,7 @@ def create_user_creds(ctx, **kwargs):
def create_stack(ctx, template, user_creds, **kwargs):
values = {
- 'name': 'db_test_stack_name',
+ 'name': short_id.generate_id(),
'raw_template_id': template.id,
'username': ctx.username,
'tenant': ctx.tenant_id,
@@ -1444,6 +1479,8 @@ def create_stack(ctx, template, user_creds, **kwargs):
'prev_raw_template': None
}
values.update(kwargs)
+ if 'tenant' in kwargs:
+ ctx.tenant_id = kwargs['tenant']
return db_api.stack_create(ctx, values)
@@ -1647,7 +1684,7 @@ class DBAPIUserCredsTest(common.HeatTestCase):
self.ctx, user_creds['id'])
exp_msg = ('Attempt to delete user creds with id '
'%s that does not exist' % user_creds['id'])
- self.assertIn(exp_msg, six.text_type(err))
+ self.assertIn(exp_msg, str(err))
self.assertEqual(0, mock_delete.call_count)
def test_user_creds_delete_retries(self):
@@ -1711,7 +1748,7 @@ class DBAPIStackTest(common.HeatTestCase):
def test_stack_create(self):
stack = create_stack(self.ctx, self.template, self.user_creds)
self.assertIsNotNone(stack.id)
- self.assertEqual('db_test_stack_name', stack.name)
+ self.assertEqual(12, len(stack.name))
self.assertEqual(self.template.id, stack.raw_template_id)
self.assertEqual(self.ctx.username, stack.username)
self.assertEqual(self.ctx.tenant_id, stack.tenant)
@@ -1741,7 +1778,7 @@ class DBAPIStackTest(common.HeatTestCase):
ret_stack = db_api.stack_get(self.ctx, stack_id, show_deleted=True)
self.assertIsNotNone(ret_stack)
self.assertEqual(stack_id, ret_stack.id)
- self.assertEqual('db_test_stack_name', ret_stack.name)
+ self.assertEqual(12, len(ret_stack.name))
# Testing child resources deletion
self.assertRaises(exception.NotFound, db_api.resource_get,
@@ -1798,7 +1835,7 @@ class DBAPIStackTest(common.HeatTestCase):
side_effect=db_exception.DBDeadlock) as mock_update:
self.assertRaises(db_exception.DBDeadlock,
db_api.stack_update, self.ctx, stack.id, {})
- self.assertEqual(4, mock_update.call_count)
+ self.assertEqual(21, mock_update.call_count)
def test_stack_set_status_release_lock(self):
stack = create_stack(self.ctx, self.template, self.user_creds)
@@ -1857,7 +1894,7 @@ class DBAPIStackTest(common.HeatTestCase):
ret_stack = db_api.stack_get(self.ctx, stack.id, show_deleted=False)
self.assertIsNotNone(ret_stack)
self.assertEqual(stack.id, ret_stack.id)
- self.assertEqual('db_test_stack_name', ret_stack.name)
+ self.assertEqual(12, len(ret_stack.name))
def test_stack_get_returns_none_if_stack_does_not_exist(self):
stack = db_api.stack_get(self.ctx, UUID1, show_deleted=False)
@@ -1876,7 +1913,7 @@ class DBAPIStackTest(common.HeatTestCase):
ret_stack = db_api.stack_get(self.ctx, stack.id, show_deleted=False)
self.assertIsNotNone(ret_stack)
self.assertEqual(stack.id, ret_stack.id)
- self.assertEqual('db_test_stack_name', ret_stack.name)
+ self.assertEqual(12, len(ret_stack.name))
def test_stack_get_can_return_a_stack_from_different_tenant(self):
# create a stack with the common tenant
@@ -1888,14 +1925,14 @@ class DBAPIStackTest(common.HeatTestCase):
ret_stack = db_api.stack_get(admin_ctx, stack.id,
show_deleted=False)
self.assertEqual(stack.id, ret_stack.id)
- self.assertEqual('db_test_stack_name', ret_stack.name)
+ self.assertEqual(12, len(ret_stack.name))
def test_stack_get_by_name(self):
stack = create_stack(self.ctx, self.template, self.user_creds)
ret_stack = db_api.stack_get_by_name(self.ctx, stack.name)
self.assertIsNotNone(ret_stack)
self.assertEqual(stack.id, ret_stack.id)
- self.assertEqual('db_test_stack_name', ret_stack.name)
+ self.assertEqual(12, len(ret_stack.name))
self.assertIsNone(db_api.stack_get_by_name(self.ctx, 'abc'))
@@ -2582,7 +2619,7 @@ class DBAPIResourceTest(common.HeatTestCase):
self.assertRaises(db_exception.DBDeadlock,
db_api.resource_purge_deleted,
self.ctx, self.stack.id)
- self.assertEqual(4, mock_delete.call_count)
+ self.assertEqual(21, mock_delete.call_count)
def test_engine_get_all_locked_by_stack(self):
values = [
@@ -2685,8 +2722,8 @@ class DBAPIResourceReplacementTest(common.HeatTestCase):
db_api.resource_update_and_save(other_ctx, orig.id,
{'atomic_key': 2})
- self.patchobject(db_api, 'resource_update',
- new=mock.Mock(wraps=db_api.resource_update,
+ self.patchobject(db_api, '_try_resource_update',
+ new=mock.Mock(wraps=db_api._try_resource_update,
side_effect=update_atomic_key))
self.assertRaises(exception.UpdateInProgress,
@@ -2728,8 +2765,8 @@ class DBAPIResourceReplacementTest(common.HeatTestCase):
{'engine_id': 'a',
'atomic_key': 2})
- self.patchobject(db_api, 'resource_update',
- new=mock.Mock(wraps=db_api.resource_update,
+ self.patchobject(db_api, '_try_resource_update',
+ new=mock.Mock(wraps=db_api._try_resource_update,
side_effect=lock_resource))
self.assertRaises(exception.UpdateInProgress,
@@ -3073,7 +3110,7 @@ class DBAPIResourceUpdateTest(common.HeatTestCase):
ret = db_api.resource_update(self.ctx, self.resource.id,
values, db_res.atomic_key, None)
self.assertTrue(ret)
- db_res = db_api.resource_get(self.ctx, self.resource.id)
+ db_res = db_api.resource_get(self.ctx, self.resource.id, refresh=True)
self.assertEqual('engine-1', db_res.engine_id)
self.assertEqual('CREATE', db_res.action)
self.assertEqual('IN_PROGRESS', db_res.status)
@@ -3087,7 +3124,7 @@ class DBAPIResourceUpdateTest(common.HeatTestCase):
ret = db_api.resource_update(self.ctx, self.resource.id,
values, db_res.atomic_key, None)
self.assertTrue(ret)
- db_res = db_api.resource_get(self.ctx, self.resource.id)
+ db_res = db_api.resource_get(self.ctx, self.resource.id, refresh=True)
self.assertEqual('engine-1', db_res.engine_id)
self.assertEqual(1, db_res.atomic_key)
values = {'engine_id': 'engine-1',
@@ -3096,7 +3133,7 @@ class DBAPIResourceUpdateTest(common.HeatTestCase):
ret = db_api.resource_update(self.ctx, self.resource.id,
values, db_res.atomic_key, 'engine-1')
self.assertTrue(ret)
- db_res = db_api.resource_get(self.ctx, self.resource.id)
+ db_res = db_api.resource_get(self.ctx, self.resource.id, refresh=True)
self.assertEqual('engine-1', db_res.engine_id)
self.assertEqual('CREATE', db_res.action)
self.assertEqual('FAILED', db_res.status)
@@ -3110,7 +3147,7 @@ class DBAPIResourceUpdateTest(common.HeatTestCase):
ret = db_api.resource_update(self.ctx, self.resource.id,
values, db_res.atomic_key, None)
self.assertTrue(ret)
- db_res = db_api.resource_get(self.ctx, self.resource.id)
+ db_res = db_api.resource_get(self.ctx, self.resource.id, refresh=True)
self.assertEqual('engine-1', db_res.engine_id)
self.assertEqual(1, db_res.atomic_key)
values = {'engine_id': 'engine-2',
@@ -3128,7 +3165,7 @@ class DBAPIResourceUpdateTest(common.HeatTestCase):
ret = db_api.resource_update(self.ctx, self.resource.id,
values, db_res.atomic_key, None)
self.assertTrue(ret)
- db_res = db_api.resource_get(self.ctx, self.resource.id)
+ db_res = db_api.resource_get(self.ctx, self.resource.id, refresh=True)
self.assertEqual('engine-1', db_res.engine_id)
self.assertEqual(1, db_res.atomic_key)
# Set engine id as None to release the lock
@@ -3138,7 +3175,7 @@ class DBAPIResourceUpdateTest(common.HeatTestCase):
ret = db_api.resource_update(self.ctx, self.resource.id,
values, db_res.atomic_key, 'engine-1')
self.assertTrue(ret)
- db_res = db_api.resource_get(self.ctx, self.resource.id)
+ db_res = db_api.resource_get(self.ctx, self.resource.id, refresh=True)
self.assertIsNone(db_res.engine_id)
self.assertEqual('CREATE', db_res.action)
self.assertEqual('COMPLETE', db_res.status)
@@ -3152,18 +3189,19 @@ class DBAPIResourceUpdateTest(common.HeatTestCase):
ret = db_api.resource_update(self.ctx, self.resource.id,
values, db_res.atomic_key, None)
self.assertTrue(ret)
- db_res = db_api.resource_get(self.ctx, self.resource.id)
+
+ db_res = db_api.resource_get(self.ctx, self.resource.id, refresh=True)
self.assertEqual('engine-1', db_res.engine_id)
self.assertEqual(1, db_res.atomic_key)
# Set engine id as engine-2 and pass expected engine id as old engine
- # i.e engine-1 in db api steal the lock
+ # i.e engine-1 in DB api steal the lock
values = {'engine_id': 'engine-2',
'action': 'DELETE',
'status': 'IN_PROGRESS'}
ret = db_api.resource_update(self.ctx, self.resource.id,
values, db_res.atomic_key, 'engine-1')
self.assertTrue(ret)
- db_res = db_api.resource_get(self.ctx, self.resource.id)
+ db_res = db_api.resource_get(self.ctx, self.resource.id, refresh=True)
self.assertEqual('engine-2', db_res.engine_id)
self.assertEqual('DELETE', db_res.action)
self.assertEqual(2, db_res.atomic_key)
@@ -3342,7 +3380,7 @@ class DBAPISyncPointTest(common.HeatTestCase):
self.ctx, entity_id=str(res.id),
stack_id=self.stack.id,
traversal_id=self.stack.current_traversal)
- self.assertEqual(len(self.resources) * 4, add.call_count)
+ self.assertEqual(len(self.resources) * 21, add.call_count)
class DBAPIMigratePropertiesDataTest(common.HeatTestCase):
diff --git a/heat/tests/db/test_sqlalchemy_filters.py b/heat/tests/db/test_sqlalchemy_filters.py
index 93a9cb4a5..e3424aee7 100644
--- a/heat/tests/db/test_sqlalchemy_filters.py
+++ b/heat/tests/db/test_sqlalchemy_filters.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.db.sqlalchemy import filters as db_filters
from heat.tests import common
diff --git a/heat/tests/db/test_utils.py b/heat/tests/db/test_utils.py
index e44988f16..c6bc30922 100644
--- a/heat/tests/db/test_utils.py
+++ b/heat/tests/db/test_utils.py
@@ -26,7 +26,7 @@ def _has_constraint(cset, ctype, cname):
and c.name == cname):
return True
else:
- return False
+ return False
class DBMigrationUtilsTest(common.HeatTestCase):
diff --git a/heat/tests/engine/service/test_service_engine.py b/heat/tests/engine/service/test_service_engine.py
index 421c2e359..cdda53097 100644
--- a/heat/tests/engine/service/test_service_engine.py
+++ b/heat/tests/engine/service/test_service_engine.py
@@ -12,8 +12,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_utils import timeutils
diff --git a/heat/tests/engine/service/test_software_config.py b/heat/tests/engine/service/test_software_config.py
index feaaf7885..ecd7bef62 100644
--- a/heat/tests/engine/service/test_software_config.py
+++ b/heat/tests/engine/service/test_software_config.py
@@ -12,13 +12,12 @@
# under the License.
import datetime
+from unittest import mock
import uuid
-import mock
from oslo_messaging.rpc import dispatcher
from oslo_serialization import jsonutils as json
from oslo_utils import timeutils
-import six
from heat.common import crypt
from heat.common import exception
@@ -170,6 +169,20 @@ class SoftwareConfigServiceTest(common.HeatTestCase):
config['outputs'])
self.assertEqual(kwargs['options'], config['options'])
+ def test_create_software_config_structured(self):
+ kwargs = {
+ 'group': 'json-file',
+ 'name': 'config_heat',
+ 'config': {'foo': 'bar'},
+ 'inputs': [{'name': 'mode'}],
+ 'outputs': [{'name': 'endpoint'}],
+ 'options': {}
+ }
+ config = self._create_software_config(**kwargs)
+ config_id = config['id']
+ config = self.engine.show_software_config(self.ctx, config_id)
+ self.assertEqual(kwargs['config'], config['config'])
+
def test_delete_software_config(self):
config = self._create_software_config()
self.assertIsNotNone(config)
@@ -188,7 +201,7 @@ class SoftwareConfigServiceTest(common.HeatTestCase):
try:
stack.validate()
except exception.StackValidationFailed as exc:
- self.fail("Validation should have passed: %s" % six.text_type(exc))
+ self.fail("Validation should have passed: %s" % str(exc))
def _create_software_deployment(self, config_id=None, input_values=None,
action='INIT',
@@ -586,7 +599,7 @@ class SoftwareConfigServiceTest(common.HeatTestCase):
values.update(kwargs)
updated = self.engine.update_software_deployment(
self.ctx, deployment_id, updated_at=None, **values)
- for key, value in six.iteritems(kwargs):
+ for key, value in kwargs.items():
self.assertEqual(value, updated[key])
check_software_deployment_updated(config_id=config_id)
@@ -808,7 +821,7 @@ class SoftwareConfigServiceTest(common.HeatTestCase):
deployment = self._create_software_deployment(
status='IN_PROGRESS', config_id=config['id'])
- deployment_id = six.text_type(deployment['id'])
+ deployment_id = str(deployment['id'])
sd = software_deployment_object.SoftwareDeployment.get_by_id(
self.ctx, deployment_id)
diff --git a/heat/tests/engine/service/test_stack_action.py b/heat/tests/engine/service/test_stack_action.py
index ccec6bcac..74a1d96e5 100644
--- a/heat/tests/engine/service/test_stack_action.py
+++ b/heat/tests/engine/service/test_stack_action.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_messaging.rpc import dispatcher
from heat.common import exception
@@ -158,6 +159,7 @@ class StackServiceUpdateActionsNotSupportedTest(common.HeatTestCase):
self.ctx, old_stack.identifier(), template,
params, None, {})
self.assertEqual(exception.NotSupported, ex.exc_info[0])
- mock_load.assert_called_once_with(self.ctx, stack=s)
+ mock_load.assert_called_once_with(self.ctx, stack=s,
+ check_refresh_cred=True)
old_stack.delete()
diff --git a/heat/tests/engine/service/test_stack_adopt.py b/heat/tests/engine/service/test_stack_adopt.py
index 735d02023..acdb6c253 100644
--- a/heat/tests/engine/service/test_stack_adopt.py
+++ b/heat/tests/engine/service/test_stack_adopt.py
@@ -11,10 +11,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_messaging.rpc import dispatcher
-import six
from heat.common import exception
from heat.engine import service
@@ -168,4 +168,4 @@ class StackServiceAdoptTest(common.HeatTestCase):
template, {}, None,
{'adopt_stack_data': str(adopt_data)})
self.assertEqual(exception.NotSupported, ex.exc_info[0])
- self.assertIn('Stack Adopt', six.text_type(ex.exc_info[1]))
+ self.assertIn('Stack Adopt', str(ex.exc_info[1]))
diff --git a/heat/tests/engine/service/test_stack_create.py b/heat/tests/engine/service/test_stack_create.py
index 6975651d9..ad55a270f 100644
--- a/heat/tests/engine/service/test_stack_create.py
+++ b/heat/tests/engine/service/test_stack_create.py
@@ -11,11 +11,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_messaging.rpc import dispatcher
from oslo_service import threadgroup
-import six
from swiftclient import exceptions
from heat.common import environment_util as env_util
@@ -94,7 +94,7 @@ class StackCreateTest(common.HeatTestCase):
self.assertEqual(exception.NotFound, ex.exc_info[0])
self.assertIn('Could not fetch files from container '
'test_container, reason: error.',
- six.text_type(ex.exc_info[1]))
+ str(ex.exc_info[1]))
def test_stack_create(self):
stack_name = 'service_create_test_stack'
@@ -153,7 +153,7 @@ class StackCreateTest(common.HeatTestCase):
self._test_stack_create, stack_name)
self.assertEqual(exception.RequestLimitExceeded, ex.exc_info[0])
self.assertIn("You have reached the maximum stacks per tenant",
- six.text_type(ex.exc_info[1]))
+ str(ex.exc_info[1]))
@mock.patch.object(stack.Stack, 'validate')
def test_stack_create_verify_err(self, mock_validate):
@@ -238,7 +238,7 @@ class StackCreateTest(common.HeatTestCase):
template, params, None, {}, None)
self.assertEqual(exception.MissingCredentialError, ex.exc_info[0])
self.assertEqual('Missing required credential: X-Auth-Key',
- six.text_type(ex.exc_info[1]))
+ str(ex.exc_info[1]))
mock_tmpl.assert_called_once_with(template, files=None)
mock_env.assert_called_once_with(params)
@@ -259,7 +259,7 @@ class StackCreateTest(common.HeatTestCase):
template, params, None, {})
self.assertEqual(exception.MissingCredentialError, ex.exc_info[0])
self.assertEqual('Missing required credential: X-Auth-User',
- six.text_type(ex.exc_info[1]))
+ str(ex.exc_info[1]))
mock_tmpl.assert_called_once_with(template, files=None)
mock_env.assert_called_once_with(params)
@@ -326,7 +326,7 @@ class StackCreateTest(common.HeatTestCase):
tpl, params, None, {})
self.assertEqual(exception.RequestLimitExceeded, ex.exc_info[0])
self.assertIn(exception.StackResourceLimitExceeded.msg_fmt,
- six.text_type(ex.exc_info[1]))
+ str(ex.exc_info[1]))
@mock.patch.object(threadgroup, 'ThreadGroup')
@mock.patch.object(stack.Stack, 'validate')
@@ -421,7 +421,7 @@ class StackCreateTest(common.HeatTestCase):
self.man._validate_deferred_auth_context,
ctx, stk)
self.assertEqual('Missing required credential: X-Auth-User',
- six.text_type(ex))
+ str(ex))
# missing password
ctx = utils.dummy_context(password=None)
@@ -429,7 +429,7 @@ class StackCreateTest(common.HeatTestCase):
self.man._validate_deferred_auth_context,
ctx, stk)
self.assertEqual('Missing required credential: X-Auth-Key',
- six.text_type(ex))
+ str(ex))
@mock.patch.object(instances.Instance, 'validate')
@mock.patch.object(stack.Stack, 'total_resources')
diff --git a/heat/tests/engine/service/test_stack_delete.py b/heat/tests/engine/service/test_stack_delete.py
index 8eb40f763..1b34ee61c 100644
--- a/heat/tests/engine/service/test_stack_delete.py
+++ b/heat/tests/engine/service/test_stack_delete.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_messaging.rpc import dispatcher
from oslo_utils import timeutils
diff --git a/heat/tests/engine/service/test_stack_events.py b/heat/tests/engine/service/test_stack_events.py
index fa0e8e1d1..ef1907c47 100644
--- a/heat/tests/engine/service/test_stack_events.py
+++ b/heat/tests/engine/service/test_stack_events.py
@@ -11,10 +11,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_messaging import conffixture
+from heat.common import context
from heat.engine import resource as res
from heat.engine.resources.aws.ec2 import instance as instances
from heat.engine import service
@@ -31,6 +33,7 @@ class StackEventTest(common.HeatTestCase):
def setUp(self):
super(StackEventTest, self).setUp()
+ self.patchobject(context, 'StoredContext')
self.ctx = utils.dummy_context(tenant_id='stack_event_test_tenant')
self.eng = service.EngineService('a-host', 'a-topic')
diff --git a/heat/tests/engine/service/test_stack_resources.py b/heat/tests/engine/service/test_stack_resources.py
index cea0923e3..b09e0b66c 100644
--- a/heat/tests/engine/service/test_stack_resources.py
+++ b/heat/tests/engine/service/test_stack_resources.py
@@ -10,13 +10,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_messaging.rpc import dispatcher
-import six
from heat.common import exception
from heat.common import identifier
+from heat.engine.clients.os import heat_plugin
from heat.engine.clients.os import keystone
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine import dependencies
@@ -244,7 +245,7 @@ class StackResourcesServiceTest(common.HeatTestCase):
@tools.stack_context('service_resources_list_test_stack_with_depth')
def test_stack_resources_list_with_depth(self, mock_load):
mock_load.return_value = self.stack
- resources = six.itervalues(self.stack)
+ resources = self.stack.values()
self.stack.iter_resources = mock.Mock(return_value=resources)
self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
@@ -256,7 +257,7 @@ class StackResourcesServiceTest(common.HeatTestCase):
@tools.stack_context('service_resources_list_test_stack_with_max_depth')
def test_stack_resources_list_with_max_depth(self, mock_load):
mock_load.return_value = self.stack
- resources = six.itervalues(self.stack)
+ resources = self.stack.values()
self.stack.iter_resources = mock.Mock(return_value=resources)
self.eng.list_stack_resources(self.ctx,
self.stack.identifier(),
@@ -269,7 +270,7 @@ class StackResourcesServiceTest(common.HeatTestCase):
@tools.stack_context('service_resources_list_test_stack')
def test_stack_resources_filter_type(self, mock_load):
mock_load.return_value = self.stack
- resources = six.itervalues(self.stack)
+ resources = self.stack.values()
self.stack.iter_resources = mock.Mock(return_value=resources)
filters = {'type': 'AWS::EC2::Instance'}
resources = self.eng.list_stack_resources(self.ctx,
@@ -283,7 +284,7 @@ class StackResourcesServiceTest(common.HeatTestCase):
@tools.stack_context('service_resources_list_test_stack')
def test_stack_resources_filter_type_not_found(self, mock_load):
mock_load.return_value = self.stack
- resources = six.itervalues(self.stack)
+ resources = self.stack.values()
self.stack.iter_resources = mock.Mock(return_value=resources)
filters = {'type': 'NonExisted'}
resources = self.eng.list_stack_resources(self.ctx,
@@ -432,7 +433,7 @@ class StackResourcesServiceTest(common.HeatTestCase):
'WebServerScaleDownPolicy',
details)
msg = 'Invalid hook type "invalid_hook"'
- self.assertIn(msg, six.text_type(ex.exc_info[1]))
+ self.assertIn(msg, str(ex.exc_info[1]))
self.assertEqual(exception.InvalidBreakPointHook,
ex.exc_info[0])
@@ -447,15 +448,17 @@ class StackResourcesServiceTest(common.HeatTestCase):
details)
msg = ('The "pre-update" hook is not defined on '
'AWSScalingPolicy "WebServerScaleDownPolicy"')
- self.assertIn(msg, six.text_type(ex.exc_info[1]))
+ self.assertIn(msg, str(ex.exc_info[1]))
self.assertEqual(exception.InvalidBreakPointHook,
ex.exc_info[0])
+ @mock.patch.object(heat_plugin.HeatClientPlugin, 'get_heat_cfn_url')
@mock.patch.object(res.Resource, 'metadata_update')
@mock.patch.object(res.Resource, 'signal')
@mock.patch.object(service.EngineService, '_get_stack')
def test_signal_calls_metadata_update(self, mock_get, mock_signal,
- mock_update):
+ mock_update, mock_get_cfn):
+ mock_get_cfn.return_value = 'http://server.test:8000/v1'
# fake keystone client
self.patchobject(keystone.KeystoneClientPlugin, '_create',
return_value=fake_ks.FakeKeystoneClient())
diff --git a/heat/tests/engine/service/test_stack_snapshot.py b/heat/tests/engine/service/test_stack_snapshot.py
index 86bae3ed7..79e8b05b9 100644
--- a/heat/tests/engine/service/test_stack_snapshot.py
+++ b/heat/tests/engine/service/test_stack_snapshot.py
@@ -10,11 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
import uuid
-import mock
from oslo_messaging.rpc import dispatcher
-import six
from heat.common import exception
from heat.common import template_format
@@ -52,7 +51,7 @@ class SnapshotServiceTest(common.HeatTestCase):
snapshot_id)
expected = 'Snapshot with id %s not found' % snapshot_id
self.assertEqual(exception.NotFound, ex.exc_info[0])
- self.assertIn(expected, six.text_type(ex.exc_info[1]))
+ self.assertIn(expected, str(ex.exc_info[1]))
def test_show_snapshot_not_belong_to_stack(self):
stk1 = self._create_stack('stack_snaphot_not_belong_to_stack_1')
@@ -72,7 +71,7 @@ class SnapshotServiceTest(common.HeatTestCase):
'could not be found') % {'snapshot': snapshot_id,
'stack': stk2.name}
self.assertEqual(exception.SnapshotNotFound, ex.exc_info[0])
- self.assertIn(expected, six.text_type(ex.exc_info[1]))
+ self.assertIn(expected, str(ex.exc_info[1]))
@mock.patch.object(stack.Stack, 'load')
def test_create_snapshot(self, mock_load):
@@ -111,7 +110,7 @@ class SnapshotServiceTest(common.HeatTestCase):
self.assertEqual(exception.ActionInProgress, ex.exc_info[0])
msg = ("Stack %(stack)s already has an action (%(action)s) "
"in progress.") % {'stack': stack_name, 'action': stk.action}
- self.assertEqual(msg, six.text_type(ex.exc_info[1]))
+ self.assertEqual(msg, str(ex.exc_info[1]))
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@@ -153,7 +152,7 @@ class SnapshotServiceTest(common.HeatTestCase):
'could not be found') % {'snapshot': snapshot_id,
'stack': stk2.name}
self.assertEqual(exception.SnapshotNotFound, ex.exc_info[0])
- self.assertIn(expected, six.text_type(ex.exc_info[1]))
+ self.assertIn(expected, str(ex.exc_info[1]))
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
mock_load.reset_mock()
@@ -172,7 +171,7 @@ class SnapshotServiceTest(common.HeatTestCase):
self.engine.delete_snapshot,
self.ctx, stk.identifier(), snapshot.id)
msg = 'Deleting in-progress snapshot is not supported'
- self.assertIn(msg, six.text_type(ex.exc_info[1]))
+ self.assertIn(msg, str(ex.exc_info[1]))
self.assertEqual(exception.NotSupported, ex.exc_info[0])
@mock.patch.object(stack.Stack, 'load')
@@ -192,7 +191,7 @@ class SnapshotServiceTest(common.HeatTestCase):
stk.identifier(), snapshot_id)
self.assertEqual(exception.NotFound, ex.exc_info[0])
- self.assertTrue(2, mock_load.call_count)
+ self.assertEqual(2, mock_load.call_count)
@mock.patch.object(stack.Stack, 'load')
def test_list_snapshots(self, mock_load):
@@ -245,7 +244,7 @@ class SnapshotServiceTest(common.HeatTestCase):
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
mock_load.reset_mock()
- stk2 = self._create_stack('stack_snapshot_restore_other_stack_1')
+ stk2 = self._create_stack('stack_snapshot_restore_other_stack_2')
mock_load.return_value = stk2
ex = self.assertRaises(dispatcher.ExpectedException,
@@ -256,6 +255,6 @@ class SnapshotServiceTest(common.HeatTestCase):
'could not be found') % {'snapshot': snapshot_id,
'stack': stk2.name}
self.assertEqual(exception.SnapshotNotFound, ex.exc_info[0])
- self.assertIn(expected, six.text_type(ex.exc_info[1]))
+ self.assertIn(expected, str(ex.exc_info[1]))
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
diff --git a/heat/tests/engine/service/test_stack_update.py b/heat/tests/engine/service/test_stack_update.py
index ff4e7943f..f1df832af 100644
--- a/heat/tests/engine/service/test_stack_update.py
+++ b/heat/tests/engine/service/test_stack_update.py
@@ -10,15 +10,15 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
import uuid
import eventlet.queue
-import mock
from oslo_config import cfg
from oslo_messaging import conffixture
from oslo_messaging.rpc import dispatcher
-import six
+from heat.common import context
from heat.common import environment_util as env_util
from heat.common import exception
from heat.common import messaging
@@ -46,6 +46,7 @@ class ServiceStackUpdateTest(common.HeatTestCase):
def setUp(self):
super(ServiceStackUpdateTest, self).setUp()
self.useFixture(conffixture.ConfFixture(cfg.CONF))
+ self.patchobject(context, 'StoredContext')
self.ctx = utils.dummy_context()
self.man = service.EngineService('a-host', 'a-topic')
self.man.thread_group_mgr = tools.DummyThreadGroupManager()
@@ -104,7 +105,8 @@ class ServiceStackUpdateTest(common.HeatTestCase):
username='test_username',
converge=True
)
- mock_load.assert_called_once_with(self.ctx, stack=s)
+ mock_load.assert_called_once_with(self.ctx, stack=s,
+ check_refresh_cred=True)
mock_validate.assert_called_once_with()
def _test_stack_update_with_environment_files(self, stack_name,
@@ -223,7 +225,8 @@ class ServiceStackUpdateTest(common.HeatTestCase):
username='test_username',
converge=False
)
- mock_load.assert_called_once_with(self.ctx, stack=s)
+ mock_load.assert_called_once_with(self.ctx, stack=s,
+ check_refresh_cred=True)
mock_validate.assert_called_once_with()
def test_stack_update_existing_parameters(self):
@@ -385,18 +388,28 @@ resources:
self.ctx, stk, t, {}, None, None, None, api_args, None)
self.assertEqual(['tag1'], updated_stack.tags)
+ # update clear old tags
+ api_args[rpc_api.STACK_TAGS] = []
+ _, _, updated_stack = self.man._prepare_stack_updates(
+ self.ctx, stk, t, {}, None, None, None, api_args, None)
+ self.assertEqual([], updated_stack.tags)
+
# with new tags
api_args[rpc_api.STACK_TAGS] = ['tag2']
_, _, updated_stack = self.man._prepare_stack_updates(
self.ctx, stk, t, {}, None, None, None, api_args, None)
self.assertEqual(['tag2'], updated_stack.tags)
+ api_args[rpc_api.STACK_TAGS] = ['tag3']
+ _, _, updated_stack = self.man._prepare_stack_updates(
+ self.ctx, stk, t, {}, None, None, None, api_args, None)
+ self.assertEqual(['tag3'], updated_stack.tags)
# with no PARAM_EXISTING flag and no tags
del api_args[rpc_api.PARAM_EXISTING]
del api_args[rpc_api.STACK_TAGS]
_, _, updated_stack = self.man._prepare_stack_updates(
self.ctx, stk, t, {}, None, None, None, api_args, None)
- self.assertIsNone(updated_stack.tags)
+ self.assertEqual([], updated_stack.tags)
def test_stack_update_existing_registry(self):
# Use a template with existing flag and ensure the
@@ -546,7 +559,8 @@ resources:
mock_validate.assert_called_once_with()
mock_tmpl.assert_called_once_with(template, files=None)
mock_env.assert_called_once_with(params)
- mock_load.assert_called_once_with(self.ctx, stack=s)
+ mock_load.assert_called_once_with(self.ctx, stack=s,
+ check_refresh_cred=True)
mock_stack.assert_called_once_with(
self.ctx, stk.name, stk.t,
convergence=False,
@@ -634,7 +648,7 @@ resources:
self.assertEqual(exception.NotSupported, ex.exc_info[0])
self.assertIn("Cancelling update when stack is "
"UPDATE_COMPLETE",
- six.text_type(ex.exc_info[1]))
+ str(ex.exc_info[1]))
@mock.patch.object(stack_object.Stack, 'count_total_resources')
def test_stack_update_equals(self, ctr):
@@ -694,7 +708,8 @@ resources:
username='test_username',
converge=False
)
- mock_load.assert_called_once_with(self.ctx, stack=s)
+ mock_load.assert_called_once_with(self.ctx, stack=s,
+ check_refresh_cred=True)
mock_validate.assert_called_once_with()
def test_stack_update_stack_id_equal(self):
@@ -741,9 +756,11 @@ resources:
old_stack['A'].properties['Foo'])
self.assertEqual(create_stack['A'].id, old_stack['A'].id)
- mock_load.assert_called_once_with(self.ctx, stack=s)
+ mock_load.assert_called_once_with(self.ctx, stack=s,
+ check_refresh_cred=True)
def test_stack_update_exceeds_resource_limit(self):
+ self.patchobject(context, 'StoredContext')
stack_name = 'test_stack_update_exceeds_resource_limit'
params = {}
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
@@ -765,7 +782,7 @@ resources:
None, {rpc_api.PARAM_CONVERGE: False})
self.assertEqual(exception.RequestLimitExceeded, ex.exc_info[0])
self.assertIn(exception.StackResourceLimitExceeded.msg_fmt,
- six.text_type(ex.exc_info[1]))
+ str(ex.exc_info[1]))
def test_stack_update_verify_err(self):
stack_name = 'service_update_verify_err_test_stack'
@@ -813,7 +830,8 @@ resources:
username='test_username',
converge=False
)
- mock_load.assert_called_once_with(self.ctx, stack=s)
+ mock_load.assert_called_once_with(self.ctx, stack=s,
+ check_refresh_cred=True)
mock_validate.assert_called_once_with()
def test_stack_update_nonexist(self):
@@ -859,7 +877,7 @@ resources:
template, params, None, api_args)
self.assertEqual(exception.MissingCredentialError, ex.exc_info[0])
self.assertEqual('Missing required credential: X-Auth-Key',
- six.text_type(ex.exc_info[1]))
+ str(ex.exc_info[1]))
mock_get.assert_called_once_with(self.ctx, stk.identifier())
@@ -877,7 +895,8 @@ resources:
user_creds_id=u'1', username='test_username',
converge=False
)
- mock_load.assert_called_once_with(self.ctx, stack=s)
+ mock_load.assert_called_once_with(self.ctx, stack=s,
+ check_refresh_cred=True)
def test_stack_update_existing_template(self):
'''Update a stack using the same template.'''
@@ -940,7 +959,7 @@ resources:
self.assertEqual(exception.NotSupported, ex.exc_info[0])
self.assertIn("PATCH update to non-COMPLETE stack",
- six.text_type(ex.exc_info[1]))
+ str(ex.exc_info[1]))
def test_update_immutable_parameter_disallowed(self):
diff --git a/heat/tests/engine/service/test_threadgroup_mgr.py b/heat/tests/engine/service/test_threadgroup_mgr.py
index f53880fbe..6df892e13 100644
--- a/heat/tests/engine/service/test_threadgroup_mgr.py
+++ b/heat/tests/engine/service/test_threadgroup_mgr.py
@@ -11,9 +11,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import eventlet
-import mock
+from unittest import mock
+import eventlet
from oslo_context import context
from heat.engine import service
@@ -76,7 +76,7 @@ class ThreadGroupManagerTest(common.HeatTestCase):
self.assertEqual(self.tg_mock, thm.groups[stack_id])
self.tg_mock.add_timer.assert_called_with(
self.cfg_mock.CONF.periodic_interval,
- self.f, *self.fargs, **self.fkwargs)
+ self.f, None, *self.fargs, **self.fkwargs)
def test_tgm_add_msg_queue(self):
stack_id = 'add_msg_queues_test'
diff --git a/heat/tests/engine/test_check_resource.py b/heat/tests/engine/test_check_resource.py
index 486c15ea7..025992b62 100644
--- a/heat/tests/engine/test_check_resource.py
+++ b/heat/tests/engine/test_check_resource.py
@@ -13,10 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import eventlet
-import mock
+from unittest import mock
import uuid
+import eventlet
from oslo_config import cfg
from heat.common import exception
@@ -352,7 +352,7 @@ class CheckWorkflowUpdateTest(common.HeatTestCase):
self.stack.current_traversal,
self.is_update, self.resource,
self.stack)
- mock_rcr.assert_called_once_with(self.ctx, self.is_update,
+ mock_rcr.assert_called_once_with(self.ctx,
self.resource.id, updated_stack)
def test_check_stack_complete_is_invoked_for_replaced_resource(
@@ -385,15 +385,14 @@ class CheckWorkflowUpdateTest(common.HeatTestCase):
# A, B are predecessors to C when is_update is True
expected_predecessors = {(self.stack['A'].id, True),
(self.stack['B'].id, True)}
- self.cr.retrigger_check_resource(self.ctx, self.is_update,
- resC.id, self.stack)
+ self.cr.retrigger_check_resource(self.ctx, resC.id, self.stack)
mock_pcr.assert_called_once_with(self.ctx, mock.ANY, resC.id,
self.stack.current_traversal,
mock.ANY, (resC.id, True), None,
True, None)
call_args, call_kwargs = mock_pcr.call_args
actual_predecessors = call_args[4]
- self.assertItemsEqual(expected_predecessors, actual_predecessors)
+ self.assertCountEqual(expected_predecessors, actual_predecessors)
def test_update_retrigger_check_resource_new_traversal_deletes_rsrc(
self, mock_cru, mock_crc, mock_pcr, mock_csc):
@@ -403,7 +402,7 @@ class CheckWorkflowUpdateTest(common.HeatTestCase):
[(1, False), (1, True)], [(2, False), None]])
# simulate rsrc 2 completing its update for old traversal
# and calling rcr
- self.cr.retrigger_check_resource(self.ctx, True, 2, self.stack)
+ self.cr.retrigger_check_resource(self.ctx, 2, self.stack)
# Ensure that pcr was called with proper delete traversal
mock_pcr.assert_called_once_with(self.ctx, mock.ANY, 2,
self.stack.current_traversal,
@@ -418,7 +417,7 @@ class CheckWorkflowUpdateTest(common.HeatTestCase):
[(1, False), (1, True)], [(2, False), (2, True)]])
# simulate rsrc 2 completing its delete for old traversal
# and calling rcr
- self.cr.retrigger_check_resource(self.ctx, False, 2, self.stack)
+ self.cr.retrigger_check_resource(self.ctx, 2, self.stack)
# Ensure that pcr was called with proper delete traversal
mock_pcr.assert_called_once_with(self.ctx, mock.ANY, 2,
self.stack.current_traversal,
diff --git a/heat/tests/engine/test_dependencies.py b/heat/tests/engine/test_dependencies.py
index 9f6d0c82b..7643232d4 100644
--- a/heat/tests/engine/test_dependencies.py
+++ b/heat/tests/engine/test_dependencies.py
@@ -30,8 +30,8 @@ class dependenciesTest(common.HeatTestCase):
self.assertEqual(len(nodes), len(order))
- for l, f in deps:
- checkorder(order.index(f), order.index(l))
+ for lr, fr in deps:
+ checkorder(order.index(fr), order.index(lr))
def _dep_test_fwd(self, *deps):
def assertLess(a, b):
@@ -57,16 +57,16 @@ class dependenciesTest(common.HeatTestCase):
def test_single_node(self):
d = dependencies.Dependencies([('only', None)])
- l = list(iter(d))
- self.assertEqual(1, len(l))
- self.assertEqual('only', l[0])
+ li = list(iter(d))
+ self.assertEqual(1, len(li))
+ self.assertEqual('only', li[0])
def test_disjoint(self):
d = dependencies.Dependencies([('1', None), ('2', None)])
- l = list(iter(d))
- self.assertEqual(2, len(l))
- self.assertIn('1', l)
- self.assertIn('2', l)
+ li = list(iter(d))
+ self.assertEqual(2, len(li))
+ self.assertIn('1', li)
+ self.assertIn('2', li)
def test_single_fwd(self):
self._dep_test_fwd(('second', 'first'))
@@ -170,9 +170,9 @@ class dependenciesTest(common.HeatTestCase):
def test_single_partial(self):
d = dependencies.Dependencies([('last', 'first')])
p = d['last']
- l = list(iter(p))
- self.assertEqual(1, len(l))
- self.assertEqual('last', l[0])
+ li = list(iter(p))
+ self.assertEqual(1, len(li))
+ self.assertEqual('last', li[0])
def test_simple_partial(self):
d = dependencies.Dependencies([('last', 'middle'),
diff --git a/heat/tests/engine/test_engine_worker.py b/heat/tests/engine/test_engine_worker.py
index affb51186..0eed84fb0 100644
--- a/heat/tests/engine/test_engine_worker.py
+++ b/heat/tests/engine/test_engine_worker.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
+from unittest import mock
from heat.db.sqlalchemy import api as db_api
from heat.engine import check_resource
@@ -197,6 +197,29 @@ class WorkerServiceTest(common.HeatTestCase):
self.assertEqual('stack1', call_args1.name)
self.assertEqual('stack2', call_args2.name)
+ @mock.patch.object(worker, '_stop_traversal')
+ def test_stop_nested_traversal_stops_deeply_nested_stack(self, mock_st):
+ mock_tgm = mock.Mock()
+ ctx = utils.dummy_context()
+ tmpl = templatem.Template.create_empty_template()
+ stack1 = parser.Stack(ctx, 'stack1', tmpl,
+ current_traversal='123')
+ stack1.store()
+ stack2 = parser.Stack(ctx, 'stack2', tmpl,
+ owner_id=stack1.id, current_traversal='456')
+ stack2.store()
+ stack3 = parser.Stack(ctx, 'stack3', tmpl,
+ owner_id=stack2.id, current_traversal='789')
+ stack3.store()
+ _worker = worker.WorkerService('host-1', 'topic-1', 'engine-001',
+ mock_tgm)
+ _worker.stop_traversal(stack2)
+ self.assertEqual(2, mock_st.call_count)
+ call1, call2 = mock_st.call_args_list
+ call_args1, call_args2 = call1[0][0], call2[0][0]
+ self.assertEqual('stack2', call_args1.name)
+ self.assertEqual('stack3', call_args2.name)
+
@mock.patch.object(worker, '_cancel_workers')
@mock.patch.object(worker.WorkerService, 'stop_traversal')
def test_stop_all_workers_when_stack_in_progress(self, mock_st, mock_cw):
diff --git a/heat/tests/engine/test_plugin_manager.py b/heat/tests/engine/test_plugin_manager.py
index 4466bbf3f..4cf962252 100644
--- a/heat/tests/engine/test_plugin_manager.py
+++ b/heat/tests/engine/test_plugin_manager.py
@@ -14,8 +14,6 @@
import sys
import types
-import six
-
from heat.engine import plugin_manager
from heat.tests import common
@@ -122,7 +120,7 @@ class TestPluginManager(common.HeatTestCase):
all_items = pm.load_all(mgr)
- for item in six.iteritems(current_test_mapping()):
+ for item in current_test_mapping().items():
self.assertNotIn(item, all_items)
def test_load_all(self):
@@ -138,5 +136,5 @@ class TestPluginManager(common.HeatTestCase):
all_items = pm.load_all(mgr)
- for item in six.iteritems(current_test_mapping()):
+ for item in current_test_mapping().items():
self.assertIn(item, all_items)
diff --git a/heat/tests/engine/test_resource_type.py b/heat/tests/engine/test_resource_type.py
index 2f67d700d..770a357ff 100644
--- a/heat/tests/engine/test_resource_type.py
+++ b/heat/tests/engine/test_resource_type.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.engine import environment
@@ -196,7 +195,7 @@ class ResourceTypeTest(common.HeatTestCase):
type_name='ResourceWithWrongRefOnFile')
msg = ('There was an error loading the definition of the global '
'resource type ResourceWithWrongRefOnFile.')
- self.assertIn(msg, six.text_type(ex))
+ self.assertIn(msg, str(ex))
def test_resource_schema_no_template_file(self):
self._no_template_file(self.eng.resource_schema)
@@ -209,7 +208,7 @@ class ResourceTypeTest(common.HeatTestCase):
self.eng.resource_schema,
self.ctx, type_name='Bogus')
msg = 'The Resource Type (Bogus) could not be found.'
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
def test_resource_schema_unavailable(self):
type_name = 'ResourceWithDefaultClientName'
@@ -227,7 +226,7 @@ class ResourceTypeTest(common.HeatTestCase):
'type ResourceWithDefaultClientName, reason: '
'Service endpoint not in service catalog.')
self.assertEqual(msg,
- six.text_type(ex),
+ str(ex),
'invalid exception message')
mock_is_service_available.assert_called_once_with(self.ctx)
diff --git a/heat/tests/engine/test_scheduler.py b/heat/tests/engine/test_scheduler.py
index b688e6a9b..112d810a1 100644
--- a/heat/tests/engine/test_scheduler.py
+++ b/heat/tests/engine/test_scheduler.py
@@ -13,12 +13,10 @@
import contextlib
import itertools
+from unittest import mock
import eventlet
-import mock
-import six
-from heat.common.i18n import repr_wrapper
from heat.common import timeutils
from heat.engine import dependencies
from heat.engine import scheduler
@@ -68,7 +66,7 @@ class ExceptionGroupTest(common.HeatTestCase):
ex2 = Exception("ex 2")
exception_group = scheduler.ExceptionGroup([ex1, ex2])
- self.assertEqual("['ex 1', 'ex 2']", six.text_type(exception_group))
+ self.assertEqual("['ex 1', 'ex 2']", str(exception_group))
class StepTracker(object):
@@ -1242,9 +1240,9 @@ class DescriptionTest(common.HeatTestCase):
self.assertEqual('f', scheduler.task_description(f))
def test_lambda(self):
- l = lambda: None
+ lam = lambda: None # noqa: E731
- self.assertEqual('<lambda>', scheduler.task_description(l))
+ self.assertEqual('<lambda>', scheduler.task_description(lam))
def test_method(self):
class C(object):
@@ -1273,8 +1271,6 @@ class DescriptionTest(common.HeatTestCase):
self.assertEqual('o', scheduler.task_description(C()))
def test_unicode(self):
- @repr_wrapper
- @six.python_2_unicode_compatible
class C(object):
def __str__(self):
return u'C "\u2665"'
diff --git a/heat/tests/engine/test_sync_point.py b/heat/tests/engine/test_sync_point.py
index cbe03d164..d424160c8 100644
--- a/heat/tests/engine/test_sync_point.py
+++ b/heat/tests/engine/test_sync_point.py
@@ -12,7 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
+from unittest import mock
+
from oslo_db import exception
from heat.engine import sync_point
diff --git a/heat/tests/engine/tools.py b/heat/tests/engine/tools.py
index 5192134dc..2b75a8535 100644
--- a/heat/tests/engine/tools.py
+++ b/heat/tests/engine/tools.py
@@ -10,9 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sys
-
-import six
+import functools
from heat.common import template_format
from heat.engine.clients.os import glance
@@ -268,7 +266,7 @@ def stack_context(stack_name, create_res=True, convergence=False):
of test success/failure.
"""
def stack_delete(test_fn):
- @six.wraps(test_fn)
+ @functools.wraps(test_fn)
def wrapped_test(test_case, *args, **kwargs):
def create_stack():
ctx = getattr(test_case, 'ctx', None)
@@ -285,12 +283,11 @@ def stack_context(stack_name, create_res=True, convergence=False):
create_stack()
try:
test_fn(test_case, *args, **kwargs)
- except Exception:
- exc_class, exc_val, exc_tb = sys.exc_info()
+ except Exception as err:
try:
delete_stack()
finally:
- six.reraise(exc_class, exc_val, exc_tb)
+ raise err from None
else:
delete_stack()
diff --git a/heat/tests/fakes.py b/heat/tests/fakes.py
index 63fd86a8e..2de791830 100644
--- a/heat/tests/fakes.py
+++ b/heat/tests/fakes.py
@@ -18,8 +18,9 @@ wrong the tests might raise AssertionError. I've indicated in comments the
places where actual behavior differs from the spec.
"""
+from unittest import mock
+
from keystoneauth1 import plugin
-import mock
class FakeClient(object):
@@ -29,21 +30,26 @@ class FakeClient(object):
expected = (method, url)
called = self.client.callstack[pos][0:2]
- assert self.client.callstack, ("Expected %s %s "
- "but no calls were made." % expected)
+ if not self.client.callstack:
+ raise AssertionError("Expected %s %s "
+ "but no calls were made." % expected)
- assert expected == called, 'Expected %s %s; got %s %s' % (
- expected + called)
+ if expected != called:
+ raise AssertionError('Expected %s %s; got %s %s' %
+ (expected + called))
if body is not None:
- assert self.client.callstack[pos][2] == body
+ if self.client.callstack[pos][2] != body:
+ raise AssertionError('%s != %s' %
+ (self.client.callstack[pos][2], body))
def assert_called_anytime(self, method, url, body=None):
"""Assert that an API method was called anytime in the test."""
expected = (method, url)
- assert self.client.callstack, ("Expected %s %s but no calls "
- "were made." % expected)
+ if not self.client.callstack:
+ raise AssertionError("Expected %s %s but no calls "
+ "were made." % expected)
found = False
for entry in self.client.callstack:
@@ -51,16 +57,12 @@ class FakeClient(object):
found = True
break
- assert found, 'Expected %s %s; got %s' % (expected,
- self.client.callstack)
+ if not found:
+ raise AssertionError('Expected %s; got %s' %
+ (expected, self.client.callstack))
if body is not None:
- try:
- assert entry[2] == body
- except AssertionError:
- print(entry[2])
- print("!=")
- print(body)
- raise
+ if entry[2] != body:
+ raise AssertionError("%s != %s" % (entry[2], body))
self.client.callstack = []
diff --git a/heat/tests/generic_resource.py b/heat/tests/generic_resource.py
index 5f98af74a..4bad64ea2 100644
--- a/heat/tests/generic_resource.py
+++ b/heat/tests/generic_resource.py
@@ -60,6 +60,12 @@ class GenericResource(resource.Resource):
self.type())
+class CheckableResource(GenericResource):
+ def handle_check(self):
+ LOG.warning(('Checking generic resource (Type "%s")'),
+ self.type())
+
+
class CancellableResource(GenericResource):
def check_create_complete(self, cookie):
return True
diff --git a/heat/tests/openstack/aodh/test_alarm.py b/heat/tests/openstack/aodh/test_alarm.py
index 23ab45d96..cb5b6fb53 100644
--- a/heat/tests/openstack/aodh/test_alarm.py
+++ b/heat/tests/openstack/aodh/test_alarm.py
@@ -1,3 +1,4 @@
+
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -13,13 +14,12 @@
import copy
import json
-
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import aodh
+from heat.engine.clients.os import octavia
from heat.engine import resource
from heat.engine.resources.openstack.aodh import alarm
from heat.engine import rsrc_defn
@@ -142,6 +142,29 @@ event_alarm_template = '''
}
'''
+lbmemberhealth_alarm_template = '''
+{
+ "heat_template_version" : "newton",
+ "description" : "Loadbalancer member health alarm test",
+ "parameters" : {},
+ "resources" : {
+ "test_loadbalancer_member_health_alarm": {
+ "type": "OS::Aodh::LBMemberHealthAlarm",
+ "properties": {
+ "description": "Something something dark side",
+ "alarm_actions": ["trust+heat://"],
+ "repeat_actions": false,
+ "pool": "12345",
+ "stack": "13579",
+ "autoscaling_group_id": "02468"
+ }
+ },
+ "signal_handler" : {
+ "type" : "SignalResourceType"
+ }
+ }
+}
+'''
FakeAodhAlarm = {'other_attrs': 'val',
'alarm_id': 'foo'}
@@ -325,7 +348,7 @@ class AodhAlarmTest(common.HeatTestCase):
rsrc.properties.data = rsrc.get_alarm_props(properties)
self.assertIsNone(rsrc.properties.data.get('matching_metadata'))
for key in rsrc.properties.data['threshold_rule']['query']:
- self.assertIsInstance(key['value'], six.text_type)
+ self.assertIsInstance(key['value'], str)
def test_no_matching_metadata(self):
"""Make sure that we can pass in an empty matching_metadata."""
@@ -356,7 +379,7 @@ class AodhAlarmTest(common.HeatTestCase):
rsrc.validate)
self.assertEqual(
"Property error: Resources.MEMAlarmHigh.Properties.%s: "
- "Value '60a' is not an integer" % p, six.text_type(error))
+ "Value '60a' is not an integer" % p, str(error))
def test_mem_alarm_high_not_integer_parameters(self):
orig_snippet = template_format.parse(not_string_alarm_template)
@@ -390,7 +413,7 @@ class AodhAlarmTest(common.HeatTestCase):
self.assertEqual(
"Property error: Resources.MEMAlarmHigh.Properties: "
"Property meter_name not assigned",
- six.text_type(error))
+ str(error))
for p in ('period', 'evaluation_periods', 'statistic',
'comparison_operator'):
@@ -709,3 +732,101 @@ class EventAlarmTest(common.HeatTestCase):
res.client().alarm.get.return_value = FakeAodhAlarm
scheduler.TaskRunner(res.create)()
self.assertEqual(FakeAodhAlarm, res.FnGetAtt('show'))
+
+
+class LBMemberHealthAlarmTest(common.HeatTestCase):
+
+ def setUp(self):
+ super(LBMemberHealthAlarmTest, self).setUp()
+ self.fa = mock.Mock()
+ self.patchobject(
+ octavia.OctaviaClientPlugin, 'get_pool').return_value = "9999"
+
+ def create_stack(self, template=None):
+
+ if template is None:
+ template = lbmemberhealth_alarm_template
+ temp = template_format.parse(template)
+ template = tmpl.Template(temp)
+ ctx = utils.dummy_context()
+ ctx.tenant = 'test_tenant'
+ stack = parser.Stack(ctx, utils.random_name(), template,
+ disable_rollback=True)
+ stack.store()
+
+ self.patchobject(aodh.AodhClientPlugin,
+ '_create').return_value = self.fa
+
+ self.patchobject(self.fa.alarm, 'create').return_value = FakeAodhAlarm
+ return stack
+
+ def _prepare_resource(self, for_check=True):
+
+ snippet = template_format.parse(lbmemberhealth_alarm_template)
+ self.stack = utils.parse_stack(snippet)
+ res = self.stack['test_loadbalancer_member_health_alarm']
+ if for_check:
+ res.state_set(res.CREATE, res.COMPLETE)
+ res.client = mock.Mock()
+ mock_alarm = mock.Mock(enabled=True, state='ok')
+ res.client().alarm.get.return_value = mock_alarm
+ return res
+
+ def test_delete(self):
+ test_stack = self.create_stack()
+ rsrc = test_stack['test_loadbalancer_member_health_alarm']
+
+ self.patchobject(aodh.AodhClientPlugin, 'client',
+ return_value=self.fa)
+ self.patchobject(self.fa.alarm, 'delete')
+ rsrc.resource_id = '12345'
+
+ self.assertEqual('12345', rsrc.handle_delete())
+ self.assertEqual(1, self.fa.alarm.delete.call_count)
+
+ def test_check(self):
+ res = self._prepare_resource()
+ scheduler.TaskRunner(res.check)()
+ self.assertEqual((res.CHECK, res.COMPLETE), res.state)
+
+ def test_check_alarm_failure(self):
+ res = self._prepare_resource()
+ res.client().alarm.get.side_effect = Exception('Boom')
+
+ self.assertRaises(exception.ResourceFailure,
+ scheduler.TaskRunner(res.check))
+ self.assertEqual((res.CHECK, res.FAILED), res.state)
+ self.assertIn('Boom', res.status_reason)
+
+ def test_show_resource(self):
+ res = self._prepare_resource(for_check=False)
+ res.client().alarm.create.return_value = FakeAodhAlarm
+ res.client().alarm.get.return_value = FakeAodhAlarm
+ scheduler.TaskRunner(res.create)()
+ self.assertEqual(FakeAodhAlarm, res.FnGetAtt('show'))
+
+ def test_update(self):
+ test_stack = self.create_stack()
+ update_mock = self.patchobject(self.fa.alarm, 'update')
+ test_stack.create()
+ rsrc = test_stack['test_loadbalancer_member_health_alarm']
+
+ update_props = copy.deepcopy(rsrc.properties.data)
+ update_props.update({
+ "enabled": True,
+ "description": "",
+ "insufficient_data_actions": [],
+ "alarm_actions": [],
+ "ok_actions": ["signal_handler"],
+ "pool": "0000",
+ "autoscaling_group_id": "2222"
+ })
+
+ snippet = rsrc_defn.ResourceDefinition(rsrc.name,
+ rsrc.type(),
+ update_props)
+
+ scheduler.TaskRunner(rsrc.update, snippet)()
+
+ self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
+ self.assertEqual(1, update_mock.call_count)
diff --git a/heat/tests/openstack/aodh/test_composite_alarm.py b/heat/tests/openstack/aodh/test_composite_alarm.py
index c67af5956..1d43af681 100644
--- a/heat/tests/openstack/aodh/test_composite_alarm.py
+++ b/heat/tests/openstack/aodh/test_composite_alarm.py
@@ -12,9 +12,7 @@
# under the License.
import copy
-
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
@@ -138,7 +136,7 @@ class CompositeAlarmTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
res.validate)
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
def test_show_resource(self):
test_stack = self.create_stack(template=alarm_template)
diff --git a/heat/tests/openstack/aodh/test_gnocchi_alarm.py b/heat/tests/openstack/aodh/test_gnocchi_alarm.py
index 1d554efbe..2747b707b 100644
--- a/heat/tests/openstack/aodh/test_gnocchi_alarm.py
+++ b/heat/tests/openstack/aodh/test_gnocchi_alarm.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.common import exception
from heat.common import template_format
diff --git a/heat/tests/openstack/barbican/test_container.py b/heat/tests/openstack/barbican/test_container.py
index d221fd2d8..1309dcede 100644
--- a/heat/tests/openstack/barbican/test_container.py
+++ b/heat/tests/openstack/barbican/test_container.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
@@ -205,5 +204,5 @@ class TestContainer(common.HeatTestCase):
self.client.containers.get.return_value = mock_not_active
exc = self.assertRaises(exception.ResourceInError,
res.check_create_complete, 'foo')
- self.assertIn('foo', six.text_type(exc))
- self.assertIn('500', six.text_type(exc))
+ self.assertIn('foo', str(exc))
+ self.assertIn('500', str(exc))
diff --git a/heat/tests/openstack/barbican/test_order.py b/heat/tests/openstack/barbican/test_order.py
index a49373df9..c464b3fa4 100644
--- a/heat/tests/openstack/barbican/test_order.py
+++ b/heat/tests/openstack/barbican/test_order.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
@@ -231,5 +230,5 @@ class TestOrder(common.HeatTestCase):
self.barbican.orders.get.return_value = mock_not_active
exc = self.assertRaises(exception.Error,
res.check_create_complete, 'foo')
- self.assertIn('foo', six.text_type(exc))
- self.assertIn('500', six.text_type(exc))
+ self.assertIn('foo', str(exc))
+ self.assertIn('500', str(exc))
diff --git a/heat/tests/openstack/barbican/test_secret.py b/heat/tests/openstack/barbican/test_secret.py
index ffcea6f8f..673ef40da 100644
--- a/heat/tests/openstack/barbican/test_secret.py
+++ b/heat/tests/openstack/barbican/test_secret.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.common import exception
from heat.common import template_format
diff --git a/heat/tests/openstack/blazar/test_host.py b/heat/tests/openstack/blazar/test_host.py
index ab3d750a9..6021b6a8c 100644
--- a/heat/tests/openstack/blazar/test_host.py
+++ b/heat/tests/openstack/blazar/test_host.py
@@ -11,8 +11,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from blazarclient import exception as client_exception
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from heat.common import exception
diff --git a/heat/tests/openstack/blazar/test_lease.py b/heat/tests/openstack/blazar/test_lease.py
index 4991a14d2..00938e3a6 100644
--- a/heat/tests/openstack/blazar/test_lease.py
+++ b/heat/tests/openstack/blazar/test_lease.py
@@ -11,8 +11,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from blazarclient import exception as client_exception
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from heat.common import exception
diff --git a/heat/tests/openstack/cinder/test_qos_specs.py b/heat/tests/openstack/cinder/test_qos_specs.py
index e00eda73e..fc5178aa5 100644
--- a/heat/tests/openstack/cinder/test_qos_specs.py
+++ b/heat/tests/openstack/cinder/test_qos_specs.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.engine.clients.os import cinder as c_plugin
from heat.engine.resources.openstack.cinder import qos_specs
@@ -22,7 +22,7 @@ from heat.tests import utils
QOS_SPECS_TEMPLATE = {
'heat_template_version': '2015-10-15',
- 'description': 'Cinder QoS specs creation example',
+ 'description': 'Cinder QoS specs creation example',
'resources': {
'my_qos_specs': {
'type': 'OS::Cinder::QoSSpecs',
@@ -36,7 +36,7 @@ QOS_SPECS_TEMPLATE = {
QOS_ASSOCIATE_TEMPLATE = {
'heat_template_version': '2015-10-15',
- 'description': 'Cinder QoS specs association example',
+ 'description': 'Cinder QoS specs association example',
'resources': {
'my_qos_associate': {
'type': 'OS::Cinder::QoSAssociation',
diff --git a/heat/tests/openstack/cinder/test_quota.py b/heat/tests/openstack/cinder/test_quota.py
index 62b3f9cbb..62312f284 100644
--- a/heat/tests/openstack/cinder/test_quota.py
+++ b/heat/tests/openstack/cinder/test_quota.py
@@ -10,8 +10,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
@@ -80,7 +79,7 @@ class CinderQuotaTest(common.HeatTestCase):
def _test_validate(self, resource, error_msg):
exc = self.assertRaises(exception.StackValidationFailed,
resource.validate)
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
def _test_invalid_property(self, prop_name):
my_quota = self.stack['my_quota']
@@ -156,7 +155,7 @@ class CinderQuotaTest(common.HeatTestCase):
err = self.assertRaises(ValueError, self.my_quota.handle_create)
self.assertEqual(
self.err_msg % {'property': 'gigabytes', 'value': 5, 'total': 6},
- six.text_type(err))
+ str(err))
def test_quota_with_invalid_volumes(self):
fake_v = self.fv(0)
@@ -167,7 +166,7 @@ class CinderQuotaTest(common.HeatTestCase):
err = self.assertRaises(ValueError, self.my_quota.handle_create)
self.assertEqual(
self.err_msg % {'property': 'volumes', 'value': 3, 'total': 4},
- six.text_type(err))
+ str(err))
def test_quota_with_invalid_snapshots(self):
fake_v = self.fv(0)
@@ -179,7 +178,7 @@ class CinderQuotaTest(common.HeatTestCase):
err = self.assertRaises(ValueError, self.my_quota.handle_create)
self.assertEqual(
self.err_msg % {'property': 'snapshots', 'value': 2, 'total': 4},
- six.text_type(err))
+ str(err))
def _test_quota_with_unlimited_value(self, prop_name):
my_quota = self.stack['my_quota']
diff --git a/heat/tests/openstack/cinder/test_volume.py b/heat/tests/openstack/cinder/test_volume.py
index 6a9a860ab..87cc4e160 100644
--- a/heat/tests/openstack/cinder/test_volume.py
+++ b/heat/tests/openstack/cinder/test_volume.py
@@ -14,11 +14,11 @@
import collections
import copy
import json
+from unittest import mock
from cinderclient import exceptions as cinder_exp
-import mock
+from novaclient import exceptions as nova_exp
from oslo_config import cfg
-import six
from heat.common import exception
from heat.common import template_format
@@ -65,7 +65,6 @@ resources:
availability_zone: nova
size: 1
name: test_name
- multiattach: True
attachment:
type: OS::Cinder::VolumeAttachment
properties:
@@ -120,7 +119,7 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
self.t, stack, 'volume')
self.assertEqual(
"Property error: resources.volume.properties.size: "
- "0 is out of range (min: 1, max: None)", six.text_type(error))
+ "0 is out of range (min: 1, max: None)", str(error))
def test_cinder_create(self):
fv = vt_base.FakeVolume('creating')
@@ -144,8 +143,7 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
description='test_description',
name='test_name',
metadata={'key': 'value'},
- volume_type='lvm',
- multiattach=False)
+ volume_type='lvm')
self.assertEqual(2, self.cinder_fc.volumes.get.call_count)
def test_cinder_create_from_image(self):
@@ -176,7 +174,6 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
description='ImageVolumeDescription',
name='ImageVolume',
imageRef=image_id,
- multiattach=False,
metadata={})
self.assertEqual(2, self.cinder_fc.volumes.get.call_count)
@@ -208,7 +205,6 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
size=1, availability_zone='nova',
description='ImageVolumeDescription',
name='ImageVolume',
- multiattach=False,
metadata={})
self.cinder_fc.volumes.get.assert_called_once_with(fv.id)
@@ -232,7 +228,6 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
size=1, availability_zone='nova',
description=None,
name=vol_name,
- multiattach=False,
metadata={}
)
self.assertEqual(2, self.cinder_fc.volumes.get.call_count)
@@ -246,7 +241,7 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
description='desc', volume_type='lvm',
metadata={'key': 'value'}, source_volid=None,
bootable=False, created_at='2013-02-25T02:40:21.000000',
- encrypted=False, attachments=[], multiattach=False)
+ encrypted=False, attachments=[])
self._mock_create_volume(vt_base.FakeVolume('creating'),
self.stack_name,
@@ -273,12 +268,11 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
self.assertEqual(u'False', rsrc.FnGetAtt('encrypted'))
self.assertEqual(u'[]', rsrc.FnGetAtt('attachments'))
self.assertEqual([], rsrc.FnGetAtt('attachments_list'))
- self.assertEqual('False', rsrc.FnGetAtt('multiattach'))
error = self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'unknown')
self.assertEqual(
'The Referenced Attribute (volume unknown) is incorrect.',
- six.text_type(error))
+ str(error))
self.cinder_fc.volumes.get.assert_called_with('vol-123')
@@ -364,7 +358,7 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertEqual('NotSupported: resources.volume: '
'Shrinking volume is not supported.',
- six.text_type(ex))
+ str(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
@@ -414,7 +408,7 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
- self.assertIn('Over limit', six.text_type(ex))
+ self.assertIn('Over limit', str(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.cinder_fc.volumes.extend.assert_called_once_with(fv.id, 2)
@@ -442,7 +436,7 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertIn("Volume resize failed - Unknown status error_extending",
- six.text_type(ex))
+ str(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.cinder_fc.volumes.extend.assert_called_once_with(fv.id, 2)
@@ -577,7 +571,6 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
size=1, availability_zone='nova',
description='test_description',
name='test_name',
- multiattach=False,
metadata={u'key': u'value'})
self.cinder_fc.volumes.get.assert_called_with(fv.id)
@@ -605,7 +598,7 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.assertIn("NotSupported: resources.volume2: Shrinking volume is "
- "not supported", six.text_type(ex))
+ "not supported", str(ex))
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 3
@@ -617,7 +610,6 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
size=2, availability_zone='nova',
description=None,
name=vol_name,
- multiattach=False,
metadata={}
)
self.cinder_fc.volumes.extend.assert_called_once_with(fv.id, 3)
@@ -720,7 +712,6 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
size=1, availability_zone=None,
description='test_description',
name='test_name',
- multiattach=False,
metadata={}
)
self.cinder_fc.backups.create.assert_called_once_with(fv.id,
@@ -763,7 +754,6 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
size=1, availability_zone=None,
description='test_description',
name='test_name',
- multiattach=False,
metadata={}
)
self.cinder_fc.backups.create.assert_called_once_with(
@@ -857,7 +847,6 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
size=2, availability_zone='nova',
description=None,
name=vol2_name,
- multiattach=False,
metadata={}
)
self.fc.volumes.get_server_volume.assert_called_with(
@@ -904,6 +893,38 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
self.fc.volumes.delete_server_volume.assert_called_with(
'WikiDatabase', 'vol-123')
+ def test_cinder_volume_attachment_with_serv_resize_task_state(self):
+ self.stack_name = 'test_cvolume_attach_usrv_resize_task_state_stack'
+
+ fv1 = self._mock_create_server_volume_script(
+ vt_base.FakeVolume('attaching'))
+ fva = vt_base.FakeVolume('in-use')
+ fv2 = self._mock_create_server_volume_script(
+ vt_base.FakeVolume('attaching'), update=True)
+ self._mock_create_volume(vt_base.FakeVolume('creating'),
+ self.stack_name,
+ extra_get_mocks=[
+ fv1, fva,
+ vt_base.FakeVolume('available'), fv2])
+ self.stub_VolumeConstraint_validate()
+ # delete script
+ self.fc.volumes.get_server_volume.side_effect = [
+ fva, fva, fakes_nova.fake_exception()]
+ self.fc.volumes.delete_server_volume.return_value = None
+
+ stack = utils.parse_stack(self.t, stack_name=self.stack_name)
+
+ self.create_volume(self.t, stack, 'volume')
+ rsrc = self.create_attachment(self.t, stack, 'attachment')
+ prg_detach = mock.MagicMock(cinder_complete=True, nova_complete=True)
+ prg_attach = mock.MagicMock(called=False, srv_id='InstanceInResize')
+ self.fc.volumes.create_server_volume.side_effect = [
+ nova_exp.Conflict('409')]
+
+ self.assertEqual(False,
+ rsrc.check_update_complete((prg_detach, prg_attach)))
+ self.assertEqual(False, prg_attach.called)
+
def test_delete_attachment_has_not_been_created(self):
self.stack_name = 'test_delete_attachment_has_not_been_created'
stack = utils.parse_stack(self.t, stack_name=self.stack_name)
@@ -938,29 +959,10 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
size=1, name='test_name', description=None,
availability_zone='nova',
scheduler_hints={'hint1': 'good_advice'},
- multiattach=False,
metadata={}
)
self.assertEqual(2, self.cinder_fc.volumes.get.call_count)
- def test_cinder_create_with_multiattach(self):
- fv = vt_base.FakeVolume('creating')
-
- self.cinder_fc.volumes.create.return_value = fv
- fv_ready = vt_base.FakeVolume('available', id=fv.id)
- self.cinder_fc.volumes.get.side_effect = [fv, fv_ready]
-
- self.stack_name = 'test_cvolume_multiattach_stack'
- stack = utils.parse_stack(self.t, stack_name=self.stack_name)
- self.create_volume(self.t, stack, 'volume4')
-
- self.cinder_fc.volumes.create.assert_called_once_with(
- size=1, name='test_name', description=None,
- availability_zone='nova',
- multiattach=True,
- metadata={})
- self.assertEqual(2, self.cinder_fc.volumes.get.call_count)
-
def test_cinder_create_with_stack_scheduler_hints(self):
fv = vt_base.FakeVolume('creating')
sh.cfg.CONF.set_override('stack_scheduler_hints', True)
@@ -989,7 +991,6 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
size=1, name='test_name', description='test_description',
availability_zone=None,
metadata={},
- multiattach=False,
scheduler_hints={shm.HEAT_ROOT_STACK_ID: stack.root_stack_id(),
shm.HEAT_STACK_ID: stack.id,
shm.HEAT_STACK_NAME: stack.name,
@@ -1006,7 +1007,7 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
vp.update(combinations)
rsrc = stack['volume2']
ex = self.assertRaises(exc, rsrc.validate)
- self.assertEqual(err_msg, six.text_type(ex))
+ self.assertEqual(err_msg, str(ex))
def test_cinder_create_with_image_and_imageRef(self):
self.stack_name = 'test_create_with_image_and_imageRef'
@@ -1020,7 +1021,7 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
vp.update(combinations)
rsrc = stack.get('volume2')
ex = self.assertRaises(exception.StackValidationFailed, rsrc.validate)
- self.assertIn(err_msg, six.text_type(ex))
+ self.assertIn(err_msg, str(ex))
def test_cinder_create_with_image_and_size(self):
self.stack_name = 'test_create_with_image_and_size'
@@ -1171,7 +1172,6 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
size=1, availability_zone=None,
description='test_description',
name='test_name',
- multiattach=False,
metadata={}
)
self.cinder_fc.backups.create.assert_called_once_with(
@@ -1244,7 +1244,6 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
'volume_image_metadata': {'image_id': '1234',
'image_name': 'test'},
'description': None,
- 'multiattach': False,
'source_volid': None,
'name': 'test-volume-jbdbgdsy3vyg',
'volume_type': 'lvmdriver-1'
@@ -1267,3 +1266,112 @@ class CinderVolumeTest(vt_base.VolumeTestCase):
}
self.assertEqual(expected, reality)
+
+ def test_detach_volume_to_complete_with_resize_task_state(self):
+ fv = vt_base.FakeVolume('creating')
+ self.stack_name = 'test_cvolume_detach_with_resize_task_state_stack'
+
+ self.stub_SnapshotConstraint_validate()
+ self.stub_VolumeConstraint_validate()
+ self.stub_VolumeTypeConstraint_validate()
+ self.cinder_fc.volumes.create.return_value = fv
+ fv_ready = vt_base.FakeVolume('available', id=fv.id)
+ self.cinder_fc.volumes.get.side_effect = [fv, fv_ready]
+ self.fc.volumes.delete_server_volume.side_effect = [
+ nova_exp.Conflict('409')]
+ self.t['resources']['volume']['properties'].update({
+ 'volume_type': 'lvm',
+ })
+ stack = utils.parse_stack(self.t, stack_name=self.stack_name)
+ rsrc = self.create_volume(self.t, stack, 'volume')
+ prg_detach = mock.MagicMock(called=False, srv_id='InstanceInResize')
+ self.assertEqual(False, rsrc._detach_volume_to_complete(prg_detach))
+ self.assertEqual(False, prg_detach.called)
+
+ def test_detach_volume_to_complete_with_active_task_state(self):
+ fv = vt_base.FakeVolume('creating')
+ self.stack_name = 'test_cvolume_detach_with_active_task_state_stack'
+
+ self.stub_SnapshotConstraint_validate()
+ self.stub_VolumeConstraint_validate()
+ self.stub_VolumeTypeConstraint_validate()
+ self.cinder_fc.volumes.create.return_value = fv
+ fv_ready = vt_base.FakeVolume('available', id=fv.id)
+ self.cinder_fc.volumes.get.side_effect = [fv, fv_ready]
+
+ self.t['resources']['volume']['properties'].update({
+ 'volume_type': 'lvm',
+ })
+ stack = utils.parse_stack(self.t, stack_name=self.stack_name)
+ rsrc = self.create_volume(self.t, stack, 'volume')
+ prg_detach = mock.MagicMock(called=False, srv_id='InstanceInActive')
+ self.assertEqual(False, rsrc._detach_volume_to_complete(prg_detach))
+ self.assertEqual(True, prg_detach.called)
+
+ def test_attach_volume_to_complete_with_resize_task_state(self):
+ fv = vt_base.FakeVolume('creating')
+ self.stack_name = 'test_cvolume_attach_with_resize_task_state_stack'
+
+ self.stub_SnapshotConstraint_validate()
+ self.stub_VolumeConstraint_validate()
+ self.stub_VolumeTypeConstraint_validate()
+ self.cinder_fc.volumes.create.return_value = fv
+ fv_ready = vt_base.FakeVolume('available', id=fv.id)
+ self.cinder_fc.volumes.get.side_effect = [fv, fv_ready]
+ self.fc.volumes.create_server_volume.side_effect = [
+ nova_exp.Conflict('409')]
+ self.t['resources']['volume']['properties'].update({
+ 'volume_type': 'lvm',
+ })
+ stack = utils.parse_stack(self.t, stack_name=self.stack_name)
+ rsrc = self.create_volume(self.t, stack, 'volume')
+ prg_attach = mock.MagicMock(called=False, srv_id='InstanceInResize')
+ self.assertEqual(False, rsrc._attach_volume_to_complete(prg_attach))
+ self.assertEqual(False, prg_attach.called)
+
+ def test_attach_volume_to_complete_with_active_task_state(self):
+ fv = vt_base.FakeVolume('creating')
+ self.stack_name = 'test_cvolume_attach_with_active_task_state_stack'
+
+ self.stub_SnapshotConstraint_validate()
+ self.stub_VolumeConstraint_validate()
+ self.stub_VolumeTypeConstraint_validate()
+ self.cinder_fc.volumes.create.return_value = fv
+ self.cinder_fc.volumes.create.return_value = fv
+ fv_ready = vt_base.FakeVolume('available', id=fv.id)
+ self.cinder_fc.volumes.get.side_effect = [fv, fv_ready]
+
+ self.t['resources']['volume']['properties'].update({
+ 'volume_type': 'lvm',
+ })
+ stack = utils.parse_stack(self.t, stack_name=self.stack_name)
+ rsrc = self.create_volume(self.t, stack, 'volume')
+ self._mock_create_server_volume_script(
+ vt_base.FakeVolume('attaching'))
+
+ prg_attach = mock.MagicMock(called=False, srv_id='InstanceInActive')
+ self.assertEqual(False, rsrc._attach_volume_to_complete(prg_attach))
+ self.assertEqual('vol-123', prg_attach.called)
+
+ def test_empty_string_az(self):
+ fv = vt_base.FakeVolume('creating')
+ self.stack_name = 'test_cvolume_default_stack'
+
+ vol_name = utils.PhysName(self.stack_name, 'volume')
+ self.cinder_fc.volumes.create.return_value = fv
+ fv_ready = vt_base.FakeVolume('available', id=fv.id)
+ self.cinder_fc.volumes.get.side_effect = [fv, fv_ready]
+
+ self.t['resources']['volume']['properties'] = {
+ 'size': '1',
+ 'availability_zone': "",
+ }
+ stack = utils.parse_stack(self.t, stack_name=self.stack_name)
+ self.create_volume(self.t, stack, 'volume')
+
+ self.cinder_fc.volumes.create.assert_called_once_with(
+ size=1, availability_zone=None,
+ description=None,
+ name=vol_name,
+ metadata={}
+ )
diff --git a/heat/tests/openstack/cinder/test_volume_type.py b/heat/tests/openstack/cinder/test_volume_type.py
index 0d31ba24a..de094fb24 100644
--- a/heat/tests/openstack/cinder/test_volume_type.py
+++ b/heat/tests/openstack/cinder/test_volume_type.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.engine.clients.os import cinder as c_plugin
@@ -185,7 +184,7 @@ class CinderVolumeTypeTest(common.HeatTestCase):
self.my_volume_type.validate)
expected = ('Can not specify property "projects" '
'if the volume type is public.')
- self.assertEqual(expected, six.text_type(ex))
+ self.assertEqual(expected, str(ex))
def test_validate_projects_when_private(self):
tmpl = self.stack.t.t
diff --git a/heat/tests/openstack/cinder/test_volume_type_encryption.py b/heat/tests/openstack/cinder/test_volume_type_encryption.py
index 08815b7bc..563c35aa4 100644
--- a/heat/tests/openstack/cinder/test_volume_type_encryption.py
+++ b/heat/tests/openstack/cinder/test_volume_type_encryption.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.common import exception
from heat.engine.clients.os import cinder as c_plugin
diff --git a/heat/tests/openstack/cinder/test_volume_utils.py b/heat/tests/openstack/cinder/test_volume_utils.py
index e5aee1bc1..549d98058 100644
--- a/heat/tests/openstack/cinder/test_volume_utils.py
+++ b/heat/tests/openstack/cinder/test_volume_utils.py
@@ -11,9 +11,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from cinderclient.v2 import client as cinderclient
-import mock
-import six
from heat.engine.clients.os import cinder
from heat.engine.clients.os import nova
@@ -122,7 +122,7 @@ class FakeVolume(object):
def __init__(self, status, **attrs):
self.status = status
- for key, value in six.iteritems(attrs):
+ for key, value in attrs.items():
setattr(self, key, value)
if 'id' not in attrs:
self.id = self._ID
diff --git a/heat/tests/openstack/designate/test_domain.py b/heat/tests/openstack/designate/test_domain.py
deleted file mode 100644
index ceff73da9..000000000
--- a/heat/tests/openstack/designate/test_domain.py
+++ /dev/null
@@ -1,216 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from designateclient import exceptions as designate_exception
-from designateclient.v1 import domains
-
-from heat.engine.resources.openstack.designate import domain
-from heat.engine import stack
-from heat.engine import template
-from heat.tests import common
-from heat.tests import utils
-
-
-sample_template = {
- 'heat_template_version': '2015-04-30',
- 'resources': {
- 'test_resource': {
- 'type': 'OS::Designate::Domain',
- 'properties': {
- 'name': 'test-domain.com',
- 'description': 'Test domain',
- 'ttl': 3600,
- 'email': 'abc@test-domain.com'
- }
- }
- }
-}
-
-
-class DesignateDomainTest(common.HeatTestCase):
-
- def setUp(self):
- super(DesignateDomainTest, self).setUp()
-
- self.ctx = utils.dummy_context()
-
- self.stack = stack.Stack(
- self.ctx, 'test_stack',
- template.Template(sample_template)
- )
-
- self.test_resource = self.stack['test_resource']
-
- # Mock client plugin
- self.test_client_plugin = mock.MagicMock()
- self.test_resource.client_plugin = mock.MagicMock(
- return_value=self.test_client_plugin)
-
- # Mock client
- self.test_client = mock.MagicMock()
- self.test_resource.client = mock.MagicMock(
- return_value=self.test_client)
-
- def _get_mock_resource(self):
- value = mock.MagicMock()
- value.id = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
- value.serial = '1434596972'
-
- return value
-
- def test_resource_handle_create(self):
- mock_domain_create = self.test_client_plugin.domain_create
- mock_resource = self._get_mock_resource()
- mock_domain_create.return_value = mock_resource
-
- # validate the properties
- self.assertEqual(
- 'test-domain.com',
- self.test_resource.properties.get(domain.DesignateDomain.NAME))
- self.assertEqual(
- 'Test domain',
- self.test_resource.properties.get(
- domain.DesignateDomain.DESCRIPTION))
- self.assertEqual(
- 3600,
- self.test_resource.properties.get(domain.DesignateDomain.TTL))
- self.assertEqual(
- 'abc@test-domain.com',
- self.test_resource.properties.get(domain.DesignateDomain.EMAIL))
-
- self.test_resource.data_set = mock.Mock()
- self.test_resource.handle_create()
-
- args = dict(
- name='test-domain.com',
- description='Test domain',
- ttl=3600,
- email='abc@test-domain.com'
- )
-
- mock_domain_create.assert_called_once_with(**args)
-
- # validate physical resource id
- self.assertEqual(mock_resource.id, self.test_resource.resource_id)
-
- def test_resource_handle_update(self):
- mock_domain_update = self.test_client_plugin.domain_update
- self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
-
- prop_diff = {domain.DesignateDomain.EMAIL: 'xyz@test-domain.com',
- domain.DesignateDomain.DESCRIPTION: 'updated description',
- domain.DesignateDomain.TTL: 4200}
-
- self.test_resource.handle_update(json_snippet=None,
- tmpl_diff=None,
- prop_diff=prop_diff)
-
- args = dict(
- id=self.test_resource.resource_id,
- description='updated description',
- ttl=4200,
- email='xyz@test-domain.com'
- )
- mock_domain_update.assert_called_once_with(**args)
-
- def test_resource_handle_delete(self):
- mock_domain_delete = self.test_client.domains.delete
- self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
- mock_domain_delete.return_value = None
-
- self.assertEqual('477e8273-60a7-4c41-b683-fdb0bc7cd151',
- self.test_resource.handle_delete())
- mock_domain_delete.assert_called_once_with(
- self.test_resource.resource_id
- )
-
- def test_resource_handle_delete_resource_id_is_none(self):
- self.test_resource.resource_id = None
- self.assertIsNone(self.test_resource.handle_delete())
-
- def test_resource_handle_delete_not_found(self):
- mock_domain_delete = self.test_client.domains.delete
- mock_domain_delete.side_effect = designate_exception.NotFound
- self.assertIsNone(self.test_resource.handle_delete())
-
- def test_resolve_attributes(self):
- mock_domain = self._get_mock_resource()
- self.test_resource.resource_id = mock_domain.id
- self.test_client.domains.get.return_value = mock_domain
- self.assertEqual(mock_domain.serial,
- self.test_resource._resolve_attribute(
- domain.DesignateDomain.SERIAL
- ))
- self.test_client.domains.get.assert_called_once_with(
- self.test_resource.resource_id
- )
-
- def test_resource_show_resource(self):
- args = dict(
- name='test',
- description='updated description',
- ttl=4200,
- email='xyz@test-domain.com'
- )
-
- rsc = domains.Domain(args)
- mock_notification_get = self.test_client.domains.get
- mock_notification_get.return_value = rsc
-
- self.assertEqual(args,
- self.test_resource._show_resource(),
- 'Failed to show resource')
-
- def test_no_ttl(self):
- mock_domain_create = self.test_client_plugin.domain_create
- mock_resource = self._get_mock_resource()
- mock_domain_create.return_value = mock_resource
-
- self.test_resource.properties.data['ttl'] = None
-
- self.test_resource.handle_create()
- mock_domain_create.assert_called_once_with(
- name='test-domain.com', description='Test domain',
- email='abc@test-domain.com')
-
- def test_domain_get_live_state(self):
- return_domain = {
- 'name': 'test-domain.com',
- 'description': 'Test domain',
- 'ttl': 3600,
- 'email': 'abc@test-domain.com'
- }
- self.test_client.domains.get.return_value = return_domain
- self.test_resource.resource_id = '1234'
-
- reality = self.test_resource.get_live_state(
- self.test_resource.properties)
-
- self.assertEqual(return_domain, reality)
-
- def test_domain_get_live_state_ttl_equals_zero(self):
- return_domain = {
- 'name': 'test-domain.com',
- 'description': 'Test domain',
- 'ttl': 0,
- 'email': 'abc@test-domain.com'
- }
- self.test_client.domains.get.return_value = return_domain
- self.test_resource.resource_id = '1234'
-
- reality = self.test_resource.get_live_state(
- self.test_resource.properties)
-
- self.assertEqual(return_domain, reality)
diff --git a/heat/tests/openstack/designate/test_record.py b/heat/tests/openstack/designate/test_record.py
deleted file mode 100644
index 747ed51e8..000000000
--- a/heat/tests/openstack/designate/test_record.py
+++ /dev/null
@@ -1,290 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from designateclient import exceptions as designate_exception
-from designateclient.v1 import records
-import mock
-
-from heat.engine.resources.openstack.designate import record
-from heat.engine import stack
-from heat.engine import template
-from heat.tests import common
-from heat.tests import utils
-
-
-sample_template = {
- 'heat_template_version': '2015-04-30',
- 'resources': {
- 'test_resource': {
- 'type': 'OS::Designate::Record',
- 'properties': {
- 'name': 'test-record.com',
- 'description': 'Test record',
- 'ttl': 3600,
- 'type': 'MX',
- 'priority': 1,
- 'data': '1.1.1.1',
- 'domain': '1234567'
- }
- }
- }
-}
-
-
-class DesignateRecordTest(common.HeatTestCase):
-
- def setUp(self):
- super(DesignateRecordTest, self).setUp()
-
- self.ctx = utils.dummy_context()
-
- self.stack = stack.Stack(
- self.ctx, 'test_stack',
- template.Template(sample_template)
- )
-
- self.test_resource = self.stack['test_resource']
-
- # Mock client plugin
- self.test_client_plugin = mock.MagicMock()
- self.test_resource.client_plugin = mock.MagicMock(
- return_value=self.test_client_plugin)
-
- # Mock client
- self.test_client = mock.MagicMock()
- self.test_resource.client = mock.MagicMock(
- return_value=self.test_client)
-
- def _get_mock_resource(self):
- value = mock.MagicMock()
- value.id = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
-
- return value
-
- def test_resource_validate_properties(self):
- mock_record_create = self.test_client_plugin.record_create
- mock_resource = self._get_mock_resource()
- mock_record_create.return_value = mock_resource
-
- # validate the properties
- self.assertEqual(
- 'test-record.com',
- self.test_resource.properties.get(record.DesignateRecord.NAME))
- self.assertEqual(
- 'Test record',
- self.test_resource.properties.get(
- record.DesignateRecord.DESCRIPTION))
- self.assertEqual(
- 3600,
- self.test_resource.properties.get(record.DesignateRecord.TTL))
- self.assertEqual(
- 'MX',
- self.test_resource.properties.get(record.DesignateRecord.TYPE))
- self.assertEqual(
- 1,
- self.test_resource.properties.get(record.DesignateRecord.PRIORITY))
- self.assertEqual(
- '1.1.1.1',
- self.test_resource.properties.get(record.DesignateRecord.DATA))
- self.assertEqual(
- '1234567',
- self.test_resource.properties.get(
- record.DesignateRecord.DOMAIN))
-
- def test_resource_handle_create_non_mx_or_srv(self):
- mock_record_create = self.test_client_plugin.record_create
- mock_resource = self._get_mock_resource()
- mock_record_create.return_value = mock_resource
-
- for type in (set(self.test_resource._ALLOWED_TYPES) -
- set([self.test_resource.MX,
- self.test_resource.SRV])):
- self.test_resource.properties = args = dict(
- name='test-record.com',
- description='Test record',
- ttl=3600,
- type=type,
- priority=1,
- data='1.1.1.1',
- domain='1234567'
- )
-
- self.test_resource.handle_create()
-
- # Make sure priority is set to None for non mx or srv records
- args['priority'] = None
- mock_record_create.assert_called_with(
- **args
- )
-
- # validate physical resource id
- self.assertEqual(mock_resource.id, self.test_resource.resource_id)
-
- def test_resource_handle_create_mx_or_srv(self):
- mock_record_create = self.test_client_plugin.record_create
- mock_resource = self._get_mock_resource()
- mock_record_create.return_value = mock_resource
-
- for type in [self.test_resource.MX, self.test_resource.SRV]:
- self.test_resource.properties = args = dict(
- name='test-record.com',
- description='Test record',
- ttl=3600,
- type=type,
- priority=1,
- data='1.1.1.1',
- domain='1234567'
- )
-
- self.test_resource.handle_create()
-
- mock_record_create.assert_called_with(
- **args
- )
-
- # validate physical resource id
- self.assertEqual(mock_resource.id, self.test_resource.resource_id)
-
- def test_resource_handle_update_non_mx_or_srv(self):
- mock_record_update = self.test_client_plugin.record_update
- self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
-
- for type in (set(self.test_resource._ALLOWED_TYPES) -
- set([self.test_resource.MX,
- self.test_resource.SRV])):
- prop_diff = args = {
- record.DesignateRecord.DESCRIPTION: 'updated description',
- record.DesignateRecord.TTL: 4200,
- record.DesignateRecord.TYPE: type,
- record.DesignateRecord.DATA: '2.2.2.2',
- record.DesignateRecord.PRIORITY: 1}
-
- self.test_resource.handle_update(json_snippet=None,
- tmpl_diff=None,
- prop_diff=prop_diff)
-
- # priority is not considered for records other than mx or srv
- args.update(dict(
- id=self.test_resource.resource_id,
- priority=None,
- domain='1234567',
- ))
- mock_record_update.assert_called_with(**args)
-
- def test_resource_handle_update_mx_or_srv(self):
- mock_record_update = self.test_client_plugin.record_update
- self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
-
- for type in [self.test_resource.MX, self.test_resource.SRV]:
- prop_diff = args = {
- record.DesignateRecord.DESCRIPTION: 'updated description',
- record.DesignateRecord.TTL: 4200,
- record.DesignateRecord.TYPE: type,
- record.DesignateRecord.DATA: '2.2.2.2',
- record.DesignateRecord.PRIORITY: 1}
-
- self.test_resource.handle_update(json_snippet=None,
- tmpl_diff=None,
- prop_diff=prop_diff)
-
- args.update(dict(
- id=self.test_resource.resource_id,
- domain='1234567',
- ))
- mock_record_update.assert_called_with(**args)
-
- def test_resource_handle_delete(self):
- mock_record_delete = self.test_client_plugin.record_delete
- self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
- mock_record_delete.return_value = None
-
- self.assertIsNone(self.test_resource.handle_delete())
- mock_record_delete.assert_called_once_with(
- domain='1234567',
- id=self.test_resource.resource_id
- )
-
- def test_resource_handle_delete_resource_id_is_none(self):
- self.test_resource.resource_id = None
- self.assertIsNone(self.test_resource.handle_delete())
-
- def test_resource_handle_delete_not_found(self):
- mock_record_delete = self.test_client_plugin.record_delete
- mock_record_delete.side_effect = designate_exception.NotFound
- self.assertIsNone(self.test_resource.handle_delete())
-
- def test_resource_show_resource(self):
- args = dict(
- name='test-record.com',
- description='Test record',
- ttl=3600,
- type='A',
- priority=1,
- data='1.1.1.1'
- )
- rsc = records.Record(args)
- mock_notification_get = self.test_client_plugin.record_show
- mock_notification_get.return_value = rsc
-
- self.assertEqual(args,
- self.test_resource._show_resource(),
- 'Failed to show resource')
-
- def test_resource_get_live_state(self):
- tmpl = {
- 'heat_template_version': '2015-04-30',
- 'resources': {
- 'test_resource': {
- 'type': 'OS::Designate::Record',
- 'properties': {
- 'name': 'test-record.com',
- 'description': 'Test record',
- 'ttl': 3600,
- 'type': 'MX',
- 'priority': 1,
- 'data': '1.1.1.1',
- 'domain': 'example.com.'
- }
- }
- }
- }
- s = stack.Stack(
- self.ctx, 'test_stack',
- template.Template(tmpl)
- )
-
- test_resource = s['test_resource']
- test_resource.resource_id = '1234'
- test_resource.client_plugin().get_domain_id = mock.MagicMock()
- test_resource.client_plugin().get_domain_id.return_value = '1234567'
-
- test_resource.client().records = mock.MagicMock()
- test_resource.client().records.get.return_value = {
- 'type': 'MX',
- 'data': '1.1.1.1',
- 'ttl': 3600,
- 'description': 'test',
- 'domain_id': '1234567',
- 'name': 'www.example.com.',
- 'priority': 0
- }
-
- reality = test_resource.get_live_state(test_resource.properties)
- expected = {
- 'type': 'MX',
- 'data': '1.1.1.1',
- 'ttl': 3600,
- 'description': 'test',
- 'priority': 0
- }
- self.assertEqual(expected, reality)
diff --git a/heat/tests/openstack/designate/test_recordset.py b/heat/tests/openstack/designate/test_recordset.py
index 7980fc245..53d56fba3 100644
--- a/heat/tests/openstack/designate/test_recordset.py
+++ b/heat/tests/openstack/designate/test_recordset.py
@@ -11,8 +11,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from designateclient import exceptions as designate_exception
-import mock
from heat.common import exception
from heat.engine.resources.openstack.designate import recordset
@@ -165,7 +166,7 @@ class DesignateRecordSetTest(common.HeatTestCase):
self.assertFalse(self.test_resource.check_update_complete())
self.assertTrue(self.test_resource.check_update_complete())
ex = self.assertRaises(exception.ResourceInError,
- self.test_resource.check_create_complete)
+ self.test_resource.check_update_complete)
self.assertIn('Error in RecordSet',
ex.message)
@@ -174,7 +175,8 @@ class DesignateRecordSetTest(common.HeatTestCase):
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
mock_record_delete.return_value = None
- self.assertIsNone(self.test_resource.handle_delete())
+ self.assertEqual(self.test_resource.resource_id,
+ self.test_resource.handle_delete())
mock_record_delete.assert_called_once_with(
zone='1234567',
recordset=self.test_resource.resource_id
@@ -192,10 +194,13 @@ class DesignateRecordSetTest(common.HeatTestCase):
def test_check_delete_complete(self):
self.test_resource.resource_id = self._get_mock_resource()['id']
self._mock_check_status_active()
- self.assertFalse(self.test_resource.check_delete_complete())
- self.assertTrue(self.test_resource.check_delete_complete())
+ self.assertFalse(self.test_resource.check_delete_complete(
+ self.test_resource.resource_id))
+ self.assertTrue(self.test_resource.check_delete_complete(
+ self.test_resource.resource_id))
ex = self.assertRaises(exception.ResourceInError,
- self.test_resource.check_create_complete)
+ self.test_resource.check_delete_complete,
+ self.test_resource.resource_id)
self.assertIn('Error in RecordSet',
ex.message)
diff --git a/heat/tests/openstack/designate/test_zone.py b/heat/tests/openstack/designate/test_zone.py
index 8f5a0288c..598f7d329 100644
--- a/heat/tests/openstack/designate/test_zone.py
+++ b/heat/tests/openstack/designate/test_zone.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.common import exception
from heat.engine.resources.openstack.designate import zone
@@ -211,3 +211,94 @@ class DesignateZoneTest(common.HeatTestCase):
self.test_client.zones.get.assert_called_once_with(
self.test_resource.resource_id
)
+
+
+class DesignateSecondaryZoneTest(common.HeatTestCase):
+
+ def setUp(self):
+ super(DesignateSecondaryZoneTest, self).setUp()
+
+ self.ctx = utils.dummy_context()
+
+ self.primaries = ['::1']
+
+ def _get_mock_resource(self):
+ value = {}
+ value['id'] = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
+ value['serial'] = '1434596972'
+
+ return value
+
+ def test_masters(self):
+ self.do_test({
+ 'heat_template_version': '2015-04-30',
+ 'resources': {
+ 'test_resource': {
+ 'type': 'OS::Designate::Zone',
+ 'properties': {
+ 'name': 'test-zone.com',
+ 'description': 'Test zone',
+ 'ttl': 3600,
+ 'email': 'abc@test-zone.com',
+ 'type': 'SECONDARY',
+ 'masters': self.primaries,
+ }
+ }
+ }
+ })
+
+ def test_primaries(self):
+ self.do_test({
+ 'heat_template_version': '2015-04-30',
+ 'resources': {
+ 'test_resource': {
+ 'type': 'OS::Designate::Zone',
+ 'properties': {
+ 'name': 'test-zone.com',
+ 'description': 'Test zone',
+ 'ttl': 3600,
+ 'email': 'abc@test-zone.com',
+ 'type': 'SECONDARY',
+ 'primaries': self.primaries,
+ }
+ }
+ }
+ })
+
+ def do_test(self, sample_template):
+ self.stack = stack.Stack(
+ self.ctx, 'test_stack',
+ template.Template(sample_template)
+ )
+
+ self.test_resource = self.stack['test_resource']
+
+ # Mock client plugin
+ self.test_client_plugin = mock.MagicMock()
+ self.test_resource.client_plugin = mock.MagicMock(
+ return_value=self.test_client_plugin)
+
+ # Mock client
+ self.test_client = mock.MagicMock()
+ self.test_resource.client = mock.MagicMock(
+ return_value=self.test_client)
+
+ mock_zone_create = self.test_client.zones.create
+ mock_resource = self._get_mock_resource()
+ mock_zone_create.return_value = mock_resource
+
+ self.test_resource.data_set = mock.Mock()
+ self.test_resource.handle_create()
+
+ args = dict(
+ name='test-zone.com',
+ description='Test zone',
+ ttl=3600,
+ email='abc@test-zone.com',
+ type_='SECONDARY',
+ masters=self.primaries
+ )
+
+ mock_zone_create.assert_called_once_with(**args)
+ # validate physical resource id
+ self.assertEqual(mock_resource['id'], self.test_resource.resource_id)
diff --git a/heat/tests/openstack/glance/test_image.py b/heat/tests/openstack/glance/test_image.py
index 01b3a094a..3f4a8c6d1 100644
--- a/heat/tests/openstack/glance/test_image.py
+++ b/heat/tests/openstack/glance/test_image.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from glanceclient import exc
from heat.common import exception
@@ -112,11 +111,12 @@ class GlanceImageTest(common.HeatTestCase):
glance.return_value = self.glanceclient
self.images = self.glanceclient.images
self.image_tags = self.glanceclient.image_tags
+ self.image_members = self.glanceclient.image_members
def _test_validate(self, resource, error_msg):
exc = self.assertRaises(exception.StackValidationFailed,
resource.validate)
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
def test_invalid_min_disk(self):
# invalid 'min_disk'
@@ -478,11 +478,12 @@ class GlanceWebImageTest(common.HeatTestCase):
glance.return_value = self.glanceclient
self.images = self.glanceclient.images
self.image_tags = self.glanceclient.image_tags
+ self.image_members = self.glanceclient.image_members
def _test_validate(self, resource, error_msg):
exc = self.assertRaises(exception.StackValidationFailed,
resource.validate)
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
def test_invalid_min_disk(self):
# invalid 'min_disk'
@@ -641,9 +642,49 @@ class GlanceWebImageTest(common.HeatTestCase):
name=u'cirros_image',
protected=False,
owner=u'test_owner',
- tags=['tag1'],
+ tags=['tag1']
)
+ def test_image_active_property_image_not_active(self):
+ self.images.reactivate.return_value = None
+ self.images.deactivate.return_value = None
+ value = mock.MagicMock()
+ image_id = '41f0e60c-ebb4-4375-a2b4-845ae8b9c995'
+ value.id = image_id
+ value.status = 'pending'
+ self.images.create.return_value = value
+ self.my_image.handle_create()
+ self.my_image.check_create_complete(image_id)
+ self.images.deactivate.assert_not_called()
+
+ def test_image_active_property_image_active_to_deactivate(self):
+ self.images.reactivate.return_value = None
+ self.images.deactivate.return_value = None
+ value = mock.MagicMock()
+ image_id = '41f0e60c-ebb4-4375-a2b4-845ae8b9c995'
+ value.id = image_id
+ value.status = 'active'
+ self.my_image.resource_id = image_id
+ self.images.create.return_value = value
+ self.images.get.return_value = value
+ self.my_image.check_create_complete(False)
+ self.images.deactivate.assert_called_once_with(
+ self.my_image.resource_id)
+
+ def test_image_active_property_image_status_killed(self):
+ self.images.reactivate.return_value = None
+ self.images.deactivate.return_value = None
+ value = mock.MagicMock()
+ image_id = '41f0e60c-ebb4-4375-a2b4-845ae8b9c995'
+ value.id = image_id
+ value.status = 'killed'
+ self.my_image.resource_id = image_id
+ self.images.create.return_value = value
+ self.images.get.return_value = value
+ ex = self.assertRaises(exception.ResourceInError,
+ self.my_image.check_create_complete, False)
+ self.assertIn('killed', ex.message)
+
def _handle_update_tags(self, prop_diff):
self.my_image.handle_update(json_snippet=None,
tmpl_diff=None,
@@ -679,6 +720,49 @@ class GlanceWebImageTest(common.HeatTestCase):
ramdisk_id='12345678-1234-1234-1234-123456789012'
)
+ def test_image_handle_update_deactivate(self):
+ self.images.reactivate.return_value = None
+ self.images.deactivate.return_value = None
+ value = mock.MagicMock()
+ image_id = '41f0e60c-ebb4-4375-a2b4-845ae8b9c995'
+ value.id = image_id
+ value.status = 'active'
+ self.my_image.resource_id = image_id
+ props = self.stack.t.t['resources']['my_image']['properties'].copy()
+ props['active'] = False
+ self.my_image.t = self.my_image.t.freeze(properties=props)
+ prop_diff = {'active': False}
+ self.my_image.reparse()
+ self.images.update.return_value = value
+ self.images.get.return_value = value
+ self.my_image.handle_update(json_snippet=None,
+ tmpl_diff=None,
+ prop_diff=prop_diff)
+ self.images.deactivate.assert_called_once_with(
+ self.my_image.resource_id)
+
+ def test_image_handle_update_reactivate(self):
+ self.images.reactivate.return_value = None
+ self.images.deactivate.return_value = None
+ value = mock.MagicMock()
+ image_id = '41f0e60c-ebb4-4375-a2b4-845ae8b9c995'
+ value.id = image_id
+ value.status = 'deactivated'
+ self.my_image.resource_id = image_id
+ props = self.stack.t.t['resources']['my_image']['properties'].copy()
+ props['active'] = True
+ self.my_image.t = self.my_image.t.freeze(properties=props)
+ prop_diff = {'active': True}
+ self.my_image.reparse()
+ self.images.update.return_value = value
+ self.images.get.return_value = value
+ self.my_image.handle_update(json_snippet=None,
+ tmpl_diff=None,
+ prop_diff=prop_diff)
+ self.my_image.check_update_complete(True)
+ self.images.reactivate.assert_called_once_with(
+ self.my_image.resource_id)
+
def test_image_handle_update_tags(self):
self.my_image.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
@@ -708,6 +792,49 @@ class GlanceWebImageTest(common.HeatTestCase):
'tag1'
)
+ def _handle_update_members(self, prop_diff):
+ self.my_image.handle_update(json_snippet=None,
+ tmpl_diff=None,
+ prop_diff=prop_diff)
+
+ self.image_members.create.assert_called_once_with(
+ self.my_image.resource_id,
+ 'member2'
+ )
+ self.image_members.delete.assert_called_once_with(
+ self.my_image.resource_id,
+ 'member1'
+ )
+
+ def test_image_handle_update_members(self):
+ self.my_image.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
+
+ props = self.stack.t.t['resources']['my_image']['properties'].copy()
+ props['members'] = ['member1']
+ self.my_image.t = self.my_image.t.freeze(properties=props)
+ self.my_image.reparse()
+ prop_diff = {'members': ['member2']}
+
+ self._handle_update_members(prop_diff)
+
+ def test_image_handle_update_remove_members(self):
+ self.my_image.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
+
+ props = self.stack.t.t['resources']['my_image']['properties'].copy()
+ props['members'] = ['member1']
+ self.my_image.t = self.my_image.t.freeze(properties=props)
+ self.my_image.reparse()
+ prop_diff = {'members': None}
+
+ self.my_image.handle_update(json_snippet=None,
+ tmpl_diff=None,
+ prop_diff=prop_diff)
+
+ self.image_members.delete.assert_called_once_with(
+ self.my_image.resource_id,
+ 'member1'
+ )
+
def test_image_handle_update_tags_delete_not_found(self):
self.my_image.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
@@ -751,6 +878,7 @@ class GlanceWebImageTest(common.HeatTestCase):
'name': 'test',
'disk_format': 'qcow2',
'container_format': 'bare',
+ 'active': None,
'protected': False,
'is_public': False,
'min_disk': 0,
@@ -763,6 +891,7 @@ class GlanceWebImageTest(common.HeatTestCase):
'os_version': '1.0',
'owner': 'test_owner',
'ramdisk_id': '12345678-1234-1234-1234-123456789012',
+ 'members': None,
'visibility': 'private'
}
image = show_value
@@ -774,6 +903,7 @@ class GlanceWebImageTest(common.HeatTestCase):
'name': 'test',
'disk_format': 'qcow2',
'container_format': 'bare',
+ 'active': None,
'protected': False,
'min_disk': 0,
'min_ram': 0,
@@ -785,6 +915,7 @@ class GlanceWebImageTest(common.HeatTestCase):
'os_version': '1.0',
'owner': 'test_owner',
'ramdisk_id': '12345678-1234-1234-1234-123456789012',
+ 'members': None,
'visibility': 'private'
}
diff --git a/heat/tests/openstack/heat/test_cloud_config.py b/heat/tests/openstack/heat/test_cloud_config.py
index 9f740da5b..90fc7fbfe 100644
--- a/heat/tests/openstack/heat/test_cloud_config.py
+++ b/heat/tests/openstack/heat/test_cloud_config.py
@@ -11,10 +11,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
import uuid
import yaml
-import mock
from heat.engine import stack
from heat.engine import template
diff --git a/heat/tests/openstack/heat/test_deployed_server.py b/heat/tests/openstack/heat/test_deployed_server.py
index acf8e820e..ba459c12b 100644
--- a/heat/tests/openstack/heat/test_deployed_server.py
+++ b/heat/tests/openstack/heat/test_deployed_server.py
@@ -11,11 +11,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
from heat.common import exception
from heat.common import template_format
diff --git a/heat/tests/openstack/heat/test_instance_group.py b/heat/tests/openstack/heat/test_instance_group.py
index c13214c8e..c6c84a54a 100644
--- a/heat/tests/openstack/heat/test_instance_group.py
+++ b/heat/tests/openstack/heat/test_instance_group.py
@@ -12,13 +12,10 @@
# under the License.
import copy
-
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import grouputils
-from heat.common import short_id
from heat.common import template_format
from heat.engine.clients.os import neutron
from heat.engine import resource
@@ -86,14 +83,14 @@ class TestInstanceGroup(common.HeatTestCase):
props['LaunchConfigurationName'] = 'JobServerConfig'
error = self.assertRaises(ValueError, self.instance_group.validate)
self.assertIn('(JobServerConfig) reference can not be found',
- six.text_type(error))
+ str(error))
# test resource name of instance group not WebServerGroup, so no ref
props['LaunchConfigurationName'] = 'LaunchConfig'
error = self.assertRaises(ValueError, self.instance_group.validate)
self.assertIn('LaunchConfigurationName (LaunchConfig) requires a '
'reference to the configuration not just the '
'name of the resource.',
- six.text_type(error))
+ str(error))
# test validate ok if change instance_group name to 'WebServerGroup'
self.instance_group.name = 'WebServerGroup'
self.instance_group.validate()
@@ -150,7 +147,7 @@ class TestInstanceGroup(common.HeatTestCase):
'2.1.3.3'])
mock_members = self.patchobject(grouputils, 'get_members')
instances = []
- for ip_ex in six.moves.range(1, 4):
+ for ip_ex in range(1, 4):
inst = mock.Mock()
inst.FnGetAtt.return_value = '2.1.3.%d' % ip_ex
instances.append(inst)
@@ -163,7 +160,7 @@ class TestInstanceGroup(common.HeatTestCase):
side_effect=exception.NotFound)
mock_members = self.patchobject(grouputils, 'get_members')
instances = []
- for ip_ex in six.moves.range(1, 4):
+ for ip_ex in range(1, 4):
inst = mock.Mock()
inst.FnGetAtt.return_value = '2.1.3.%d' % ip_ex
instances.append(inst)
@@ -173,12 +170,9 @@ class TestInstanceGroup(common.HeatTestCase):
def test_instance_group_refid_rsrc_name(self):
self.instance_group.id = '123'
-
self.instance_group.uuid = '9bfb9456-3fe8-41f4-b318-9dba18eeef74'
self.instance_group.action = 'CREATE'
- expected = '%s-%s-%s' % (self.instance_group.stack.name,
- self.instance_group.name,
- short_id.get_id(self.instance_group.uuid))
+ expected = self.instance_group.name
self.assertEqual(expected, self.instance_group.FnGetRefId())
def test_instance_group_refid_rsrc_id(self):
@@ -332,7 +326,7 @@ class LoadbalancerReloadTest(common.HeatTestCase):
self.assertEqual(
"Unsupported resource 'ElasticLoadBalancer' in "
"LoadBalancerNames",
- six.text_type(error))
+ str(error))
def test_lb_reload_static_resolve(self):
t = template_format.parse(inline_templates.as_template)
@@ -482,42 +476,45 @@ class ResizeWithFailedInstancesTest(InstanceGroupWithNestedStack):
class TestGetBatches(common.HeatTestCase):
scenarios = [
- ('4_1_0', dict(curr_cap=4, bat_size=1, min_serv=0,
- batches=[(4, 1)] * 4)),
- ('4_1_4', dict(curr_cap=4, bat_size=1, min_serv=4,
- batches=([(5, 1)] * 4) + [(4, 0)])),
- ('4_1_5', dict(curr_cap=4, bat_size=1, min_serv=5,
- batches=([(5, 1)] * 4) + [(4, 0)])),
- ('4_2_0', dict(curr_cap=4, bat_size=2, min_serv=0,
- batches=[(4, 2)] * 2)),
- ('4_2_4', dict(curr_cap=4, bat_size=2, min_serv=4,
- batches=([(6, 2)] * 2) + [(4, 0)])),
- ('5_2_0', dict(curr_cap=5, bat_size=2, min_serv=0,
- batches=([(5, 2)] * 2) + [(5, 1)])),
- ('5_2_4', dict(curr_cap=5, bat_size=2, min_serv=4,
- batches=([(6, 2)] * 2) + [(5, 1)])),
- ('3_2_0', dict(curr_cap=3, bat_size=2, min_serv=0,
- batches=[(3, 2), (3, 1)])),
- ('3_2_4', dict(curr_cap=3, bat_size=2, min_serv=4,
- batches=[(5, 2), (4, 1), (3, 0)])),
- ('4_4_0', dict(curr_cap=4, bat_size=4, min_serv=0,
- batches=[(4, 4)])),
- ('4_5_0', dict(curr_cap=4, bat_size=5, min_serv=0,
- batches=[(4, 4)])),
- ('4_4_1', dict(curr_cap=4, bat_size=4, min_serv=1,
- batches=[(5, 4), (4, 0)])),
- ('4_6_1', dict(curr_cap=4, bat_size=6, min_serv=1,
- batches=[(5, 4), (4, 0)])),
- ('4_4_2', dict(curr_cap=4, bat_size=4, min_serv=2,
- batches=[(6, 4), (4, 0)])),
- ('4_4_4', dict(curr_cap=4, bat_size=4, min_serv=4,
- batches=[(8, 4), (4, 0)])),
- ('4_5_6', dict(curr_cap=4, bat_size=5, min_serv=6,
- batches=[(8, 4), (4, 0)])),
+ ('4_4_1_0', dict(tgt_cap=4, curr_cap=4, bat_size=1, min_serv=0,
+ batches=[(4, 1)] * 4)),
+ ('3_4_1_0', dict(tgt_cap=3, curr_cap=4, bat_size=1, min_serv=0,
+ batches=[(3, 1)] * 3)),
+ ('4_4_1_4', dict(tgt_cap=4, curr_cap=4, bat_size=1, min_serv=4,
+ batches=([(5, 1)] * 4) + [(4, 0)])),
+ ('4_4_1_5', dict(tgt_cap=4, curr_cap=4, bat_size=1, min_serv=5,
+ batches=([(5, 1)] * 4) + [(4, 0)])),
+ ('4_4_2_0', dict(tgt_cap=4, curr_cap=4, bat_size=2, min_serv=0,
+ batches=[(4, 2)] * 2)),
+ ('4_4_2_4', dict(tgt_cap=4, curr_cap=4, bat_size=2, min_serv=4,
+ batches=([(6, 2)] * 2) + [(4, 0)])),
+ ('5_5_2_0', dict(tgt_cap=5, curr_cap=5, bat_size=2, min_serv=0,
+ batches=([(5, 2)] * 2) + [(5, 1)])),
+ ('5_5_2_4', dict(tgt_cap=5, curr_cap=5, bat_size=2, min_serv=4,
+ batches=([(6, 2)] * 2) + [(5, 1)])),
+ ('3_3_2_0', dict(tgt_cap=3, curr_cap=3, bat_size=2, min_serv=0,
+ batches=[(3, 2), (3, 1)])),
+ ('3_3_2_4', dict(tgt_cap=3, curr_cap=3, bat_size=2, min_serv=4,
+ batches=[(5, 2), (4, 1), (3, 0)])),
+ ('4_4_4_0', dict(tgt_cap=4, curr_cap=4, bat_size=4, min_serv=0,
+ batches=[(4, 4)])),
+ ('4_4_5_0', dict(tgt_cap=4, curr_cap=4, bat_size=5, min_serv=0,
+ batches=[(4, 4)])),
+ ('4_4_4_1', dict(tgt_cap=4, curr_cap=4, bat_size=4, min_serv=1,
+ batches=[(5, 4), (4, 0)])),
+ ('4_4_6_1', dict(tgt_cap=4, curr_cap=4, bat_size=6, min_serv=1,
+ batches=[(5, 4), (4, 0)])),
+ ('4_4_4_2', dict(tgt_cap=4, curr_cap=4, bat_size=4, min_serv=2,
+ batches=[(6, 4), (4, 0)])),
+ ('4_4_4_4', dict(tgt_cap=4, curr_cap=4, bat_size=4, min_serv=4,
+ batches=[(8, 4), (4, 0)])),
+ ('4_4_5_6', dict(tgt_cap=4, curr_cap=4, bat_size=5, min_serv=6,
+ batches=[(8, 4), (4, 0)])),
]
def test_get_batches(self):
- batches = list(instgrp.InstanceGroup._get_batches(self.curr_cap,
+ batches = list(instgrp.InstanceGroup._get_batches(self.tgt_cap,
+ self.curr_cap,
self.bat_size,
self.min_serv))
self.assertEqual(self.batches, batches)
diff --git a/heat/tests/openstack/heat/test_instance_group_update_policy.py b/heat/tests/openstack/heat/test_instance_group_update_policy.py
index 9ece3aa28..0cb7ceb28 100644
--- a/heat/tests/openstack/heat/test_instance_group_update_policy.py
+++ b/heat/tests/openstack/heat/test_instance_group_update_policy.py
@@ -12,8 +12,8 @@
# under the License.
import json
+from unittest import mock
-import mock
from heat.common import exception
from heat.common import template_format
@@ -298,5 +298,6 @@ class InstanceGroupReplaceTest(common.HeatTestCase):
group = instgrp.InstanceGroup('asg', defn, stack)
group._group_data().size = mock.Mock(return_value=12)
+ group.get_size = mock.Mock(return_value=12)
self.assertRaises(ValueError,
group._replace, 10, 1, 14 * 60)
diff --git a/heat/tests/openstack/heat/test_multi_part.py b/heat/tests/openstack/heat/test_multi_part.py
index 85919dfc9..ad8d676c1 100644
--- a/heat/tests/openstack/heat/test_multi_part.py
+++ b/heat/tests/openstack/heat/test_multi_part.py
@@ -13,10 +13,9 @@
import contextlib
import email
+from unittest import mock
import uuid
-import mock
-
from heat.common import exception as exc
from heat.engine import stack as parser
from heat.engine import template
@@ -31,7 +30,7 @@ class MultipartMimeTest(common.HeatTestCase):
self.ctx = utils.dummy_context()
self.init_config()
- def init_config(self, parts=None):
+ def init_config(self, parts=None, group='Heat::Ungrouped'):
parts = parts or []
stack = parser.Stack(
self.ctx, 'software_config_test_stack',
@@ -41,13 +40,15 @@ class MultipartMimeTest(common.HeatTestCase):
'config_mysql': {
'Type': 'OS::Heat::MultipartMime',
'Properties': {
+ 'group': group,
'parts': parts
}}}}))
self.config = stack['config_mysql']
self.rpc_client = mock.MagicMock()
self.config._rpc_client = self.rpc_client
- def test_handle_create(self):
+ def _test_create(self, group='Heat::Ungrouped'):
+ self.init_config(group=group)
config_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
sc = {'id': config_id}
self.rpc_client.create_software_config.return_value = sc
@@ -59,9 +60,15 @@ class MultipartMimeTest(common.HeatTestCase):
self.assertEqual({
'name': self.config.physical_resource_name(),
'config': self.config.message,
- 'group': 'Heat::Ungrouped'
+ 'group': group
}, kwargs)
+ def test_handle_create(self):
+ self._test_create()
+
+ def test_handle_create_with_group(self):
+ self._test_create(group='script')
+
def test_get_message_not_none(self):
self.config.message = 'Not none'
result = self.config.get_message()
diff --git a/heat/tests/openstack/heat/test_none_resource.py b/heat/tests/openstack/heat/test_none_resource.py
index 13370ebaf..7344b36e5 100644
--- a/heat/tests/openstack/heat/test_none_resource.py
+++ b/heat/tests/openstack/heat/test_none_resource.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.common import template_format
from heat.engine import resource
diff --git a/heat/tests/openstack/heat/test_random_string.py b/heat/tests/openstack/heat/test_random_string.py
index 81df3f5c5..6b36818ac 100644
--- a/heat/tests/openstack/heat/test_random_string.py
+++ b/heat/tests/openstack/heat/test_random_string.py
@@ -12,9 +12,8 @@
# under the License.
import re
+from unittest import mock
-import mock
-import six
from testtools import matchers
from heat.common import exception
@@ -193,7 +192,7 @@ Resources:
self.create_stack, template_random_string)
self.assertEqual("Length property cannot be smaller than combined "
"character class and character sequence minimums",
- six.text_type(exc))
+ str(exc))
def test_max_length(self):
template_random_string = '''
@@ -222,7 +221,7 @@ Resources:
exc = self.assertRaises(exception.StackValidationFailed,
self.create_stack, template_random_string)
self.assertIn('513 is out of range (min: 1, max: 512)',
- six.text_type(exc))
+ str(exc))
class TestGenerateRandomString(common.HeatTestCase):
diff --git a/heat/tests/openstack/heat/test_remote_stack.py b/heat/tests/openstack/heat/test_remote_stack.py
index 69a10a228..b5df410a2 100644
--- a/heat/tests/openstack/heat/test_remote_stack.py
+++ b/heat/tests/openstack/heat/test_remote_stack.py
@@ -13,13 +13,12 @@
import collections
import json
+from unittest import mock
from heatclient import exc
from heatclient.v1 import stacks
from keystoneauth1 import loading as ks_loading
-import mock
from oslo_config import cfg
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -261,7 +260,7 @@ class RemoteStackTest(tests_common.HeatTestCase):
rsrc.validate)
msg = ('Cannot establish connection to Heat endpoint '
'at region "%s"' % self.bad_region)
- self.assertIn(msg, six.text_type(ex))
+ self.assertIn(msg, str(ex))
def test_remote_validation_failed(self):
parent, rsrc = self.create_parent_stack(remote_region=self.that_region,
@@ -282,7 +281,7 @@ class RemoteStackTest(tests_common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed, rsrc.validate)
msg = ('Failed validating stack template using Heat endpoint at region'
' "%s"') % self.that_region
- self.assertIn(msg, six.text_type(ex))
+ self.assertIn(msg, str(ex))
def test_create(self):
rsrc = self.create_remote_stack()
@@ -351,13 +350,13 @@ class RemoteStackTest(tests_common.HeatTestCase):
ks_loading, 'get_plugin_loader', return_value=self.m_plugin)
self._create_with_remote_credential('cred_2')
self.assertEqual(
- [mock.call(secret_ref='secrets/cred_2')]*2,
+ [mock.call(secret_ref='secrets/cred_2')] * 2,
m_gsbr.call_args_list)
expected_load_options = [
mock.call(
application_credential_id='9dfa187e5a354484bf9c49a2b674333a',
application_credential_secret='sec',
- auth_url='http://192.168.1.101/identity/v3')]*2
+ auth_url='http://192.168.1.101/identity/v3')] * 2
self.assertEqual(expected_load_options,
self.m_plugin.load_from_options.call_args_list)
@@ -395,7 +394,7 @@ class RemoteStackTest(tests_common.HeatTestCase):
error_msg = ('ResourceInError: resources.remote_stack: '
'Went to status CREATE_FAILED due to '
'"Remote stack creation failed"')
- self.assertEqual(error_msg, six.text_type(error))
+ self.assertEqual(error_msg, str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
def test_delete(self):
@@ -441,7 +440,7 @@ class RemoteStackTest(tests_common.HeatTestCase):
error_msg = ('ResourceInError: resources.remote_stack: '
'Went to status DELETE_FAILED due to '
'"Remote stack deletion failed"')
- self.assertIn(error_msg, six.text_type(error))
+ self.assertIn(error_msg, str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.heat.stacks.delete.assert_called_with(stack_id=remote_stack_id)
self.assertEqual(rsrc.resource_id, remote_stack_id)
@@ -470,7 +469,7 @@ class RemoteStackTest(tests_common.HeatTestCase):
self.assertEqual(
'The Referenced Attribute (remote_stack non-existent_property) is '
'incorrect.',
- six.text_type(error))
+ str(error))
def test_snapshot(self):
stacks = [get_stack(stack_status='SNAPSHOT_IN_PROGRESS'),
@@ -563,7 +562,7 @@ class RemoteStackTest(tests_common.HeatTestCase):
error_msg = ('ResourceInError: resources.remote_stack: '
'Went to status CHECK_FAILED due to '
'"Remote stack check failed"')
- self.assertEqual(error_msg, six.text_type(error))
+ self.assertEqual(error_msg, str(error))
self.assertEqual((rsrc.CHECK, rsrc.FAILED), rsrc.state)
self.heat.actions.check.assert_called_with(stack_id=rsrc.resource_id)
@@ -596,7 +595,7 @@ class RemoteStackTest(tests_common.HeatTestCase):
error_msg = ('ResourceInError: resources.remote_stack: '
'Went to status RESUME_FAILED due to '
'"Remote stack resume failed"')
- self.assertEqual(error_msg, six.text_type(error))
+ self.assertEqual(error_msg, str(error))
self.assertEqual((rsrc.RESUME, rsrc.FAILED), rsrc.state)
self.heat.actions.resume.assert_called_with(stack_id=rsrc.resource_id)
@@ -608,7 +607,7 @@ class RemoteStackTest(tests_common.HeatTestCase):
scheduler.TaskRunner(rsrc.resume))
error_msg = ('Error: resources.remote_stack: '
'Cannot resume remote_stack, resource not found')
- self.assertEqual(error_msg, six.text_type(error))
+ self.assertEqual(error_msg, str(error))
self.assertEqual((rsrc.RESUME, rsrc.FAILED), rsrc.state)
def test_suspend(self):
@@ -638,7 +637,7 @@ class RemoteStackTest(tests_common.HeatTestCase):
error_msg = ('ResourceInError: resources.remote_stack: '
'Went to status SUSPEND_FAILED due to '
'"Remote stack suspend failed"')
- self.assertEqual(error_msg, six.text_type(error))
+ self.assertEqual(error_msg, str(error))
self.assertEqual((rsrc.SUSPEND, rsrc.FAILED), rsrc.state)
# assert suspend was not called
self.heat.actions.suspend.assert_has_calls([])
@@ -652,7 +651,7 @@ class RemoteStackTest(tests_common.HeatTestCase):
scheduler.TaskRunner(rsrc.suspend))
error_msg = ('Error: resources.remote_stack: '
'Cannot suspend remote_stack, resource not found')
- self.assertEqual(error_msg, six.text_type(error))
+ self.assertEqual(error_msg, str(error))
self.assertEqual((rsrc.SUSPEND, rsrc.FAILED), rsrc.state)
# assert suspend was not called
self.heat.actions.suspend.assert_has_calls([])
@@ -719,7 +718,7 @@ class RemoteStackTest(tests_common.HeatTestCase):
error_msg = _('ResourceInError: resources.remote_stack: '
'Went to status UPDATE_FAILED due to '
'"Remote stack update failed"')
- self.assertEqual(error_msg, six.text_type(error))
+ self.assertEqual(error_msg, str(error))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.assertEqual(2, len(self.heat.stacks.get.call_args_list))
diff --git a/heat/tests/openstack/heat/test_resource_chain.py b/heat/tests/openstack/heat/test_resource_chain.py
index d2899fc25..960c9ac3b 100644
--- a/heat/tests/openstack/heat/test_resource_chain.py
+++ b/heat/tests/openstack/heat/test_resource_chain.py
@@ -12,8 +12,7 @@
# under the License.
import copy
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.engine import node_data
@@ -348,7 +347,7 @@ class ResourceChainAttrTest(common.HeatTestCase):
ex = self.assertRaises(exception.NotFound, chain.FnGetAtt,
'resource.2')
self.assertIn("Member '2' not found in group resource 'test'",
- six.text_type(ex))
+ str(ex))
def _create_dummy_stack(self, expect_count=2, expect_attrs=None):
self.stack = utils.parse_stack(TEMPLATE)
diff --git a/heat/tests/openstack/heat/test_resource_group.py b/heat/tests/openstack/heat/test_resource_group.py
index deb59c6c9..ca26af6e6 100644
--- a/heat/tests/openstack/heat/test_resource_group.py
+++ b/heat/tests/openstack/heat/test_resource_group.py
@@ -12,9 +12,7 @@
# under the License.
import copy
-
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import grouputils
@@ -650,7 +648,7 @@ class ResourceGroupTest(common.HeatTestCase):
resg = stack.resources['group1']
self.assertIsNone(resg.validate())
- def test_validate_with_blacklist(self):
+ def test_validate_with_skiplist(self):
templ = copy.deepcopy(template_server)
self.mock_flavor = mock.Mock(ram=4, disk=4)
self.mock_active_image = mock.Mock(min_ram=1, min_disk=1,
@@ -685,7 +683,7 @@ class ResourceGroupTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
resg.validate)
exp_msg = 'The Resource Type (idontexist) could not be found.'
- self.assertIn(exp_msg, six.text_type(exc))
+ self.assertIn(exp_msg, str(exc))
def test_reference_attr(self):
stack = utils.parse_stack(template2)
@@ -712,7 +710,7 @@ class ResourceGroupTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
resg.validate)
errstr = "removal_policies: \"'notallowed'\" is not a list"
- self.assertIn(errstr, six.text_type(exc))
+ self.assertIn(errstr, str(exc))
def test_invalid_removal_policies_nomap(self):
"""Test that error raised for malformed removal_policies."""
@@ -725,7 +723,7 @@ class ResourceGroupTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
resg.validate)
errstr = '"notallowed" is not a map'
- self.assertIn(errstr, six.text_type(exc))
+ self.assertIn(errstr, str(exc))
def test_child_template(self):
stack = utils.parse_stack(template2)
@@ -824,11 +822,11 @@ class ResourceGroupTest(common.HeatTestCase):
self.assertTrue(resgrp._assemble_nested.called)
-class ResourceGroupBlackList(common.HeatTestCase):
- """This class tests ResourceGroup._name_blacklist()."""
+class ResourceGroupSkiplistTest(common.HeatTestCase):
+ """This class tests ResourceGroup._name_skiplist()."""
- # 1) no resource_list, empty blacklist
- # 2) no resource_list, existing blacklist
+ # 1) no resource_list, empty skiplist
+ # 2) no resource_list, existing skiplist
# 3) resource_list not in nested()
# 4) resource_list (refid) not in nested()
# 5) resource_list in nested() -> saved
@@ -878,7 +876,7 @@ class ResourceGroupBlackList(common.HeatTestCase):
saved=True, fallback=True, rm_mode='update')),
]
- def test_blacklist(self):
+ def test_skiplist(self):
stack = utils.parse_stack(template)
resg = stack['group1']
@@ -925,13 +923,13 @@ class ResourceGroupBlackList(common.HeatTestCase):
nested.resource_by_refid.side_effect = by_refid
resg.nested = mock.Mock(return_value=nested)
- resg._update_name_blacklist(properties)
+ resg._update_name_skiplist(properties)
if self.saved:
resg.data_set.assert_called_once_with('name_blacklist',
','.join(self.expected))
else:
resg.data_set.assert_not_called()
- self.assertEqual(set(self.expected), resg._name_blacklist())
+ self.assertEqual(set(self.expected), resg._name_skiplist())
class ResourceGroupEmptyParams(common.HeatTestCase):
@@ -981,18 +979,18 @@ class ResourceGroupEmptyParams(common.HeatTestCase):
class ResourceGroupNameListTest(common.HeatTestCase):
"""This class tests ResourceGroup._resource_names()."""
- # 1) no blacklist, 0 count
- # 2) no blacklist, x count
- # 3) blacklist (not effecting)
- # 4) blacklist with pruning
+ # 1) no skiplist, 0 count
+ # 2) no skiplist, x count
+ # 3) skiplist (not effecting)
+ # 4) skiplist with pruning
scenarios = [
- ('1', dict(blacklist=[], count=0,
+ ('1', dict(skiplist=[], count=0,
expected=[])),
- ('2', dict(blacklist=[], count=4,
+ ('2', dict(skiplist=[], count=4,
expected=['0', '1', '2', '3'])),
- ('3', dict(blacklist=['5', '6'], count=3,
+ ('3', dict(skiplist=['5', '6'], count=3,
expected=['0', '1', '2'])),
- ('4', dict(blacklist=['2', '4'], count=4,
+ ('4', dict(skiplist=['2', '4'], count=4,
expected=['0', '1', '3', '5'])),
]
@@ -1002,7 +1000,7 @@ class ResourceGroupNameListTest(common.HeatTestCase):
resg.properties = mock.MagicMock()
resg.properties.get.return_value = self.count
- resg._name_blacklist = mock.MagicMock(return_value=self.blacklist)
+ resg._name_skiplist = mock.MagicMock(return_value=self.skiplist)
self.assertEqual(self.expected, list(resg._resource_names()))
@@ -1103,7 +1101,7 @@ class ResourceGroupAttrTest(common.HeatTestCase):
ex = self.assertRaises(exception.NotFound, resg.FnGetAtt,
'resource.2')
self.assertIn("Member '2' not found in group resource 'group1'.",
- six.text_type(ex))
+ str(ex))
def test_get_attribute_convg(self):
cache_data = {'group1': node_data.NodeData.from_dict({
@@ -1117,7 +1115,7 @@ class ResourceGroupAttrTest(common.HeatTestCase):
rsrc = stack.defn['group1']
self.assertEqual(['rsrc1', 'rsrc2'], rsrc.FnGetAtt('refs'))
- def test_get_attribute_blacklist(self):
+ def test_get_attribute_skiplist(self):
resg = self._create_dummy_stack()
resg.data = mock.Mock(return_value={'name_blacklist': '3,5'})
@@ -1203,48 +1201,48 @@ class ResourceGroupAttrFallbackTest(ResourceGroupAttrTest):
class ReplaceTest(common.HeatTestCase):
# 1. no min_in_service
- # 2. min_in_service > count and existing with no blacklist
- # 3. min_in_service > count and existing with blacklist
- # 4. existing > count and min_in_service with blacklist
- # 5. existing > count and min_in_service with no blacklist
- # 6. all existing blacklisted
- # 7. count > existing and min_in_service with no blacklist
- # 8. count > existing and min_in_service with blacklist
- # 9. count < existing - blacklisted
+ # 2. min_in_service > count and existing with no skiplist
+ # 3. min_in_service > count and existing with skiplist
+ # 4. existing > count and min_in_service with skiplist
+ # 5. existing > count and min_in_service with no skiplist
+ # 6. all existing skipped
+ # 7. count > existing and min_in_service with no skiplist
+ # 8. count > existing and min_in_service with skiplist
+ # 9. count < existing - skiplisted
# 10. pause_sec > 0
scenarios = [
('1', dict(min_in_service=0, count=2,
- existing=['0', '1'], black_listed=['0'],
+ existing=['0', '1'], skipped=['0'],
batch_size=1, pause_sec=0, tasks=2)),
('2', dict(min_in_service=3, count=2,
- existing=['0', '1'], black_listed=[],
+ existing=['0', '1'], skipped=[],
batch_size=2, pause_sec=0, tasks=3)),
('3', dict(min_in_service=3, count=2,
- existing=['0', '1'], black_listed=['0'],
+ existing=['0', '1'], skipped=['0'],
batch_size=2, pause_sec=0, tasks=3)),
('4', dict(min_in_service=3, count=2,
- existing=['0', '1', '2', '3'], black_listed=['2', '3'],
+ existing=['0', '1', '2', '3'], skipped=['2', '3'],
batch_size=1, pause_sec=0, tasks=4)),
('5', dict(min_in_service=2, count=2,
- existing=['0', '1', '2', '3'], black_listed=[],
+ existing=['0', '1', '2', '3'], skipped=[],
batch_size=2, pause_sec=0, tasks=2)),
('6', dict(min_in_service=2, count=3,
- existing=['0', '1'], black_listed=['0', '1'],
+ existing=['0', '1'], skipped=['0', '1'],
batch_size=2, pause_sec=0, tasks=2)),
('7', dict(min_in_service=0, count=5,
- existing=['0', '1'], black_listed=[],
+ existing=['0', '1'], skipped=[],
batch_size=1, pause_sec=0, tasks=5)),
('8', dict(min_in_service=0, count=5,
- existing=['0', '1'], black_listed=['0'],
+ existing=['0', '1'], skipped=['0'],
batch_size=1, pause_sec=0, tasks=5)),
('9', dict(min_in_service=0, count=3,
existing=['0', '1', '2', '3', '4', '5'],
- black_listed=['0'],
+ skipped=['0'],
batch_size=2, pause_sec=0, tasks=2)),
('10', dict(min_in_service=0, count=3,
existing=['0', '1', '2', '3', '4', '5'],
- black_listed=['0'],
+ skipped=['0'],
batch_size=2, pause_sec=10, tasks=3))]
def setUp(self):
@@ -1265,8 +1263,8 @@ class ReplaceTest(common.HeatTestCase):
def test_rolling_updates(self):
self.group._nested = get_fake_nested_stack(self.existing)
self.group.get_size = mock.Mock(return_value=self.count)
- self.group._name_blacklist = mock.Mock(
- return_value=set(self.black_listed))
+ self.group._name_skiplist = mock.Mock(
+ return_value=set(self.skipped))
tasks = self.group._replace(self.min_in_service, self.batch_size,
self.pause_sec)
self.assertEqual(self.tasks, len(tasks))
@@ -1363,7 +1361,7 @@ class RollingUpdatePolicyTest(common.HeatTestCase):
stack = utils.parse_stack(tmpl)
error = self.assertRaises(
exception.StackValidationFailed, stack.validate)
- self.assertIn("foo", six.text_type(error))
+ self.assertIn("foo", str(error))
class RollingUpdatePolicyDiffTest(common.HeatTestCase):
@@ -1459,7 +1457,7 @@ class RollingUpdateTest(common.HeatTestCase):
err = self.assertRaises(ValueError, self.current_grp._update_timeout,
3, 100)
self.assertIn('The current update policy will result in stack update '
- 'timeout.', six.text_type(err))
+ 'timeout.', str(err))
def test_update_time_sufficient(self):
current = copy.deepcopy(template)
@@ -1470,18 +1468,18 @@ class RollingUpdateTest(common.HeatTestCase):
class TestUtils(common.HeatTestCase):
- # 1. No existing no blacklist
- # 2. Existing with no blacklist
- # 3. Existing with blacklist
+ # 1. No existing no skiplist
+ # 2. Existing with no skiplist
+ # 3. Existing with skiplist
scenarios = [
- ('1', dict(existing=[], black_listed=[], count=0)),
- ('2', dict(existing=['0', '1'], black_listed=[], count=0)),
- ('3', dict(existing=['0', '1'], black_listed=['0'], count=1)),
- ('4', dict(existing=['0', '1'], black_listed=['1', '2'], count=1))
+ ('1', dict(existing=[], skipped=[], count=0)),
+ ('2', dict(existing=['0', '1'], skipped=[], count=0)),
+ ('3', dict(existing=['0', '1'], skipped=['0'], count=1)),
+ ('4', dict(existing=['0', '1'], skipped=['1', '2'], count=1))
]
- def test_count_black_listed(self):
+ def test_count_skipped(self):
inspector = mock.Mock(spec=grouputils.GroupInspector)
self.patchobject(grouputils.GroupInspector, 'from_parent_resource',
return_value=inspector)
@@ -1490,8 +1488,8 @@ class TestUtils(common.HeatTestCase):
stack = utils.parse_stack(template2)
snip = stack.t.resource_definitions(stack)['group1']
resgrp = resource_group.ResourceGroup('test', snip, stack)
- resgrp._name_blacklist = mock.Mock(return_value=set(self.black_listed))
- rcount = resgrp._count_black_listed(self.existing)
+ resgrp._name_skiplist = mock.Mock(return_value=set(self.skipped))
+ rcount = resgrp._count_skipped(self.existing)
self.assertEqual(self.count, rcount)
@@ -1763,7 +1761,7 @@ class TestGetBatches(common.HeatTestCase):
self.stack = utils.parse_stack(template)
self.grp = self.stack['group1']
- self.grp._name_blacklist = mock.Mock(return_value={'0'})
+ self.grp._name_skiplist = mock.Mock(return_value={'0'})
def test_get_batches(self):
batches = list(self.grp._get_batches(self.targ_cap,
diff --git a/heat/tests/openstack/heat/test_software_component.py b/heat/tests/openstack/heat/test_software_component.py
index 65846a057..feae76c36 100644
--- a/heat/tests/openstack/heat/test_software_component.py
+++ b/heat/tests/openstack/heat/test_software_component.py
@@ -12,8 +12,7 @@
# under the License.
import contextlib
-import mock
-import six
+from unittest import mock
from heat.common import exception as exc
from heat.common import template_format
@@ -282,6 +281,6 @@ class SoftwareComponentValidationTest(common.HeatTestCase):
if self.err:
err = self.assertRaises(self.err, self.stack.validate)
if self.err_msg:
- self.assertIn(self.err_msg, six.text_type(err))
+ self.assertIn(self.err_msg, str(err))
else:
self.assertIsNone(self.stack.validate())
diff --git a/heat/tests/openstack/heat/test_software_config.py b/heat/tests/openstack/heat/test_software_config.py
index 0ac5b643e..f194ec735 100644
--- a/heat/tests/openstack/heat/test_software_config.py
+++ b/heat/tests/openstack/heat/test_software_config.py
@@ -12,7 +12,7 @@
# under the License.
import contextlib
-import mock
+from unittest import mock
from heat.common import exception as exc
from heat.engine import stack
diff --git a/heat/tests/openstack/heat/test_software_deployment.py b/heat/tests/openstack/heat/test_software_deployment.py
index 985030813..307ec572f 100644
--- a/heat/tests/openstack/heat/test_software_deployment.py
+++ b/heat/tests/openstack/heat/test_software_deployment.py
@@ -14,11 +14,9 @@
import contextlib
import copy
import re
+from unittest import mock
import uuid
-import mock
-import six
-
from oslo_serialization import jsonutils
from heat.common import exception as exc
@@ -230,7 +228,7 @@ class SoftwareDeploymentTest(common.HeatTestCase):
err = self.assertRaises(exc.StackValidationFailed, deployment.validate)
self.assertEqual("Property error: "
"Resources.deployment_mysql.Properties: "
- "Property server not assigned", six.text_type(err))
+ "Property server not assigned", str(err))
def test_validate_failed(self):
template = dict(self.template_with_server)
@@ -242,7 +240,7 @@ class SoftwareDeploymentTest(common.HeatTestCase):
self.assertEqual("Resource server's property "
"user_data_format should be set to "
"SOFTWARE_CONFIG since there are "
- "software deployments on it.", six.text_type(err))
+ "software deployments on it.", str(err))
def mock_software_config(self):
config = {
@@ -682,7 +680,7 @@ class SoftwareDeploymentTest(common.HeatTestCase):
err = self.assertRaises(
exc.Error, self.deployment.check_create_complete, mock_sd)
self.assertEqual(
- 'Deployment to server failed: something wrong', six.text_type(err))
+ 'Deployment to server failed: something wrong', str(err))
def test_handle_create_cancel(self):
self._create_stack(self.template)
@@ -1145,7 +1143,7 @@ class SoftwareDeploymentTest(common.HeatTestCase):
self.deployment.FnGetAtt, 'foo2')
self.assertEqual(
'The Referenced Attribute (deployment_mysql foo2) is incorrect.',
- six.text_type(err))
+ str(err))
def test_handle_action(self):
self._create_stack(self.template)
diff --git a/heat/tests/openstack/heat/test_structured_config.py b/heat/tests/openstack/heat/test_structured_config.py
index ba389c19f..fa5c7511e 100644
--- a/heat/tests/openstack/heat/test_structured_config.py
+++ b/heat/tests/openstack/heat/test_structured_config.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.common import exception
from heat.engine.resources.openstack.heat import structured_config as sc
diff --git a/heat/tests/openstack/heat/test_swiftsignal.py b/heat/tests/openstack/heat/test_swiftsignal.py
index aa3a89f27..c3d2db0d6 100644
--- a/heat/tests/openstack/heat/test_swiftsignal.py
+++ b/heat/tests/openstack/heat/test_swiftsignal.py
@@ -13,11 +13,10 @@
import datetime
import json
+from unittest import mock
import uuid
-import mock
from oslo_utils import timeutils
-import six
from swiftclient import client as swiftclient_client
from swiftclient import exceptions as swiftclient_exceptions
from testtools import matchers
@@ -201,7 +200,7 @@ class SwiftSignalHandleTest(common.HeatTestCase):
scheduler.TaskRunner(rsrc.delete))
self.assertEqual('ClientException: '
'resources.test_wait_condition_handle: '
- 'Overlimit: 413', six.text_type(exc))
+ 'Overlimit: 413', str(exc))
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
@@ -232,7 +231,7 @@ class SwiftSignalHandleTest(common.HeatTestCase):
scheduler.TaskRunner(rsrc.delete))
self.assertEqual('ClientException: '
'resources.test_wait_condition_handle: '
- 'Overlimit: 413', six.text_type(exc))
+ 'Overlimit: 413', str(exc))
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
@@ -328,7 +327,7 @@ class SwiftSignalTest(common.HeatTestCase):
st.create()
self.assertIn('not a valid SwiftSignalHandle. The Swift TempURL path',
- six.text_type(st.status_reason))
+ str(st.status_reason))
@mock.patch.object(swift.SwiftClientPlugin, 'get_signal_url')
def test_validate_handle_url_bad_container_name(self, mock_handle_url):
@@ -341,7 +340,7 @@ class SwiftSignalTest(common.HeatTestCase):
st.create()
self.assertIn('not a valid SwiftSignalHandle. The container name',
- six.text_type(st.status_reason))
+ str(st.status_reason))
@mock.patch.object(swift.SwiftClientPlugin, '_create')
@mock.patch.object(resource.Resource, 'physical_resource_name')
@@ -391,7 +390,7 @@ class SwiftSignalTest(common.HeatTestCase):
time_now = timeutils.utcnow()
time_series = [datetime.timedelta(0, t) + time_now
- for t in six.moves.xrange(1, 100)]
+ for t in range(1, 100)]
timeutils.set_time_override(time_series)
self.addCleanup(timeutils.clear_time_override)
diff --git a/heat/tests/openstack/heat/test_value.py b/heat/tests/openstack/heat/test_value.py
index 0ce0a500e..eae93c83f 100644
--- a/heat/tests/openstack/heat/test_value.py
+++ b/heat/tests/openstack/heat/test_value.py
@@ -15,6 +15,7 @@ import copy
import json
from heat.common import exception
+from heat.common import short_id
from heat.common import template_format
from heat.engine import environment
from heat.engine import stack as parser
@@ -55,7 +56,7 @@ outputs:
return (template_strict, template_loose)
def parse_stack(self, templ_obj):
- stack_name = 'test_value_stack'
+ stack_name = 'test_value_stack_%s' % short_id.generate_id()
stack = parser.Stack(utils.dummy_context(), stack_name, templ_obj)
stack.validate()
stack.store()
diff --git a/heat/tests/openstack/heat/test_waitcondition.py b/heat/tests/openstack/heat/test_waitcondition.py
index b3ec213dd..b3b17bb76 100644
--- a/heat/tests/openstack/heat/test_waitcondition.py
+++ b/heat/tests/openstack/heat/test_waitcondition.py
@@ -12,12 +12,11 @@
# under the License.
import datetime
+from unittest import mock
import uuid
-import mock
from oslo_serialization import jsonutils as json
from oslo_utils import timeutils
-import six
from heat.common import identifier
from heat.common import template_format
@@ -262,7 +261,7 @@ class HeatWaitConditionTest(common.HeatTestCase):
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
wc_att = rsrc.FnGetAtt('data')
- self.assertEqual(six.text_type({}), wc_att)
+ self.assertEqual(str({}), wc_att)
handle = self.stack['wait_handle']
self.assertEqual((handle.CREATE, handle.COMPLETE), handle.state)
diff --git a/heat/tests/openstack/ironic/__init__.py b/heat/tests/openstack/ironic/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/tests/openstack/ironic/__init__.py
diff --git a/heat/tests/openstack/ironic/test_port.py b/heat/tests/openstack/ironic/test_port.py
new file mode 100644
index 000000000..4e9326f54
--- /dev/null
+++ b/heat/tests/openstack/ironic/test_port.py
@@ -0,0 +1,276 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+from unittest import mock
+
+from ironicclient.common.apiclient import exceptions as ic_exc
+from oslo_config import cfg
+
+from heat.common import exception
+from heat.common import template_format
+from heat.engine.clients.os import ironic as ic
+from heat.engine import resource
+from heat.engine.resources.openstack.ironic import port
+from heat.engine import scheduler
+from heat.engine import template
+from heat.tests import common
+from heat.tests import utils
+
+cfg.CONF.import_opt('max_ironic_api_microversion', 'heat.common.config')
+
+port_template = '''
+ heat_template_version: rocky
+ resources:
+ test_port:
+ type: OS::Ironic::Port
+ properties:
+ node: node_1
+ address: 52:54:00:4d:e1:5e
+ portgroup: pg1
+ local_link_connection:
+ switch_info: brbm
+ port_id: ovs-node-1i1
+ switch_id: 70:4d:7b:88:ff:3a
+ pxe_enabled: true
+ physical_network: fake_phy_net
+ extra: {}
+ is_smartnic: false
+'''
+
+
+min_port_template = '''
+ heat_template_version: ocata
+ resources:
+ test_port:
+ type: OS::Ironic::Port
+ properties:
+ node: node_2
+ address: 54:54:00:4d:e1:5e
+'''
+
+RESOURCE_TYPE = 'OS::Ironic::Port'
+
+
+class TestIronicPort(common.HeatTestCase):
+ def setUp(self):
+ super(TestIronicPort, self).setUp()
+ cfg.CONF.set_override('max_ironic_api_microversion', 1.11)
+ cfg.CONF.set_override('action_retry_limit', 0)
+ self.fake_node_name = 'node_1'
+ self.fake_portgroup_name = 'pg1'
+
+ self.resource_id = '9cc6fd32-f711-4e1f-a82d-59e6ae074e95'
+ self.fake_name = 'test_port'
+ self.fake_address = u'52:54:00:4d:e1:5e'
+ self.fake_node_uuid = u'22767a68-a7f2-45fe-bc08-335a83e2b919'
+ self.fake_portgroup_uuid = '92972f88-a1e7-490f-866c-b6704d65c4de'
+ self.fake_local_link_connection = {'switch_info': 'brbm',
+ 'port_id': 'ovs-node-1i1',
+ 'switch_id': '70:4d:7b:88:ff:3a'}
+ self.fake_internal_info = {'foo': 'bar'}
+ self.fake_pxe_enabled = True
+ self.fake_physical_network = 'fake_phy_net'
+ self.fake_internal_info = {}
+ self.fake_extra = {}
+ self.fake_is_smartnic = False
+ resource._register_class(RESOURCE_TYPE, port.Port)
+ t = template_format.parse(port_template)
+ self.stack = utils.parse_stack(t)
+ resource_defns = self.stack.t.resource_definitions(self.stack)
+ self.rsrc_defn = resource_defns[self.fake_name]
+ self.client = mock.Mock()
+ self.patchobject(port.Port, 'client', return_value=self.client)
+ self.m_fgn = self.patchobject(ic.IronicClientPlugin,
+ 'get_node')
+ self.m_fgpg = self.patchobject(ic.IronicClientPlugin,
+ 'get_portgroup')
+ self.m_fgn.return_value = self.fake_node_uuid
+ self.m_fgpg.return_value = self.fake_portgroup_uuid
+ self._mock_get_client()
+
+ def _mock_get_client(self):
+ value = mock.MagicMock(
+ address=self.fake_address,
+ node_uuid=self.fake_node_uuid,
+ portgroup_uuid=self.fake_portgroup_uuid,
+ local_link_connection=self.fake_local_link_connection,
+ pxe_enabled=self.fake_pxe_enabled,
+ physical_network=self.fake_physical_network,
+ internal_info=self.fake_internal_info,
+ extra=self.fake_extra,
+ is_smartnic=self.fake_is_smartnic,
+ uuid=self.resource_id,
+ )
+ value.to_dict.return_value = value.__dict__
+ self.client.port.get.return_value = value
+
+ def _create_resource(self, name, snippet, stack, get_exception=None):
+ value = mock.MagicMock(uuid=self.resource_id)
+ self.client.port.create.return_value = value
+ get_rv = mock.MagicMock()
+ if get_exception:
+ self.client.port.get.side_effect = get_exception
+ else:
+ self.client.port.get.return_value = get_rv
+ p = port.Port(name, snippet, stack)
+ return p
+
+ def test_port_create(self):
+ b = self._create_resource('port', self.rsrc_defn, self.stack)
+ # validate the properties
+ self.assertEqual(
+ self.fake_node_name,
+ b.properties.get(port.Port.NODE))
+ self.assertEqual(
+ self.fake_address,
+ b.properties.get(port.Port.ADDRESS))
+ self.assertEqual(
+ self.fake_portgroup_name,
+ b.properties.get(port.Port.PORTGROUP))
+ self.assertEqual(
+ self.fake_local_link_connection,
+ b.properties.get(port.Port.LOCAL_LINK_CONNECTION))
+ self.assertEqual(
+ self.fake_pxe_enabled,
+ b.properties.get(port.Port.PXE_ENABLED))
+ self.assertEqual(
+ self.fake_physical_network,
+ b.properties.get(port.Port.PHYSICAL_NETWORK))
+ self.assertEqual(
+ self.fake_extra,
+ b.properties.get(port.Port.EXTRA))
+ self.assertEqual(
+ self.fake_is_smartnic,
+ b.properties.get(port.Port.IS_SMARTNIC))
+ scheduler.TaskRunner(b.create)()
+ self.assertEqual(self.resource_id, b.resource_id)
+ expected = [mock.call(self.fake_node_name),
+ mock.call(self.fake_node_uuid)]
+ self.assertEqual(expected, self.m_fgn.call_args_list)
+ expected = [mock.call(self.fake_portgroup_name),
+ mock.call(self.fake_portgroup_uuid)]
+ self.assertEqual(expected, self.m_fgpg.call_args_list)
+ self.client.port.create.assert_called_once_with(
+ address=self.fake_address,
+ extra=self.fake_extra,
+ is_smartnic=self.fake_is_smartnic,
+ local_link_connection=self.fake_local_link_connection,
+ node_uuid=self.fake_node_uuid,
+ physical_network=self.fake_physical_network,
+ portgroup_uuid=self.fake_portgroup_uuid,
+ pxe_enabled=self.fake_pxe_enabled)
+
+ def _property_not_supported(self, property_name, version):
+ t = template_format.parse(min_port_template)
+ new_t = copy.deepcopy(t)
+ new_t['resources'][self.fake_name]['properties'][
+ property_name] = self.rsrc_defn._properties[property_name]
+ rsrc_defns = template.Template(new_t).resource_definitions(
+ self.stack)
+ new_port = rsrc_defns[self.fake_name]
+ p = self._create_resource('port-with-%s' % property_name,
+ new_port, self.stack)
+
+ p.client_plugin().max_microversion = version - 0.01
+
+ feature = "OS::Ironic::Port with %s property" % property_name
+ err = self.assertRaises(exception.ResourceFailure,
+ scheduler.TaskRunner(p.create))
+ self.assertEqual("NotSupported: resources.port-with-%(key)s: "
+ "%(feature)s is not supported." % {
+ 'feature': feature, 'key': property_name},
+ str(err))
+
+ def test_port_create_with_pxe_enabled_not_supported(self):
+ self._property_not_supported(port.Port.PXE_ENABLED, 1.19)
+
+ def test_port_create_with_local_link_connection_not_supported(self):
+ self._property_not_supported(port.Port.LOCAL_LINK_CONNECTION, 1.19)
+
+ def test_port_create_with_portgroup_not_supported(self):
+ self._property_not_supported(port.Port.PORTGROUP, 1.24)
+
+ def test_port_create_with_physical_network_not_supported(self):
+ self._property_not_supported(port.Port.PHYSICAL_NETWORK, 1.34)
+
+ def test_port_create_with_is_smartnic_not_supported(self):
+ self._property_not_supported(port.Port.IS_SMARTNIC, 1.53)
+
+ def test_port_check_create_complete(self):
+ b = self._create_resource('port', self.rsrc_defn, self.stack)
+ self.assertTrue(b.check_create_complete(self.resource_id))
+
+ def test_port_check_create_complete_with_not_found(self):
+ b = self._create_resource('port', self.rsrc_defn, self.stack,
+ get_exception=ic_exc.NotFound)
+ self.assertFalse(b.check_create_complete(self.resource_id))
+
+ def test_port_check_create_complete_with_non_not_found_exception(self):
+ b = self._create_resource('port', self.rsrc_defn, self.stack,
+ get_exception=ic_exc.Conflict())
+ exc = self.assertRaises(ic_exc.Conflict, b.check_create_complete,
+ self.resource_id)
+ self.assertIn('Conflict', str(exc))
+
+ def _port_update(self, exc_msg=None):
+ b = self._create_resource('port', self.rsrc_defn, self.stack)
+ scheduler.TaskRunner(b.create)()
+ if exc_msg:
+ self.client.port.update.side_effect = ic_exc.Conflict(exc_msg)
+ t = template_format.parse(port_template)
+ new_t = copy.deepcopy(t)
+ new_extra = {'foo': 'bar'}
+ m_pg = mock.Mock(extra=new_extra)
+ self.client.port.get.return_value = m_pg
+ new_t['resources'][self.fake_name]['properties']['extra'] = new_extra
+ rsrc_defns = template.Template(new_t).resource_definitions(self.stack)
+ new_port = rsrc_defns[self.fake_name]
+ if exc_msg:
+ exc = self.assertRaises(
+ exception.ResourceFailure,
+ scheduler.TaskRunner(b.update, new_port))
+ self.assertIn(exc_msg, str(exc))
+ else:
+ scheduler.TaskRunner(b.update, new_port)()
+ self.client.port.update.assert_called_once_with(
+ self.resource_id,
+ [{'op': 'replace', 'path': '/extra', 'value': new_extra}])
+
+ def test_port_update(self):
+ self._port_update()
+
+ def test_port_update_failed(self):
+ exc_msg = ("Port 9cc6fd32-f711-4e1f-a82d-59e6ae074e95 can not have "
+ "any connectivity attributes (pxe_enabled, portgroup_id, "
+ "physical_network, local_link_connection) updated unless "
+ "node 9ccee9ec-92a5-4580-9242-82eb7f454d3f is in a enroll, "
+ "inspecting, inspect wait, manageable state or in "
+ "maintenance mode.")
+ self._port_update(exc_msg)
+
+ def test_port_check_delete_complete_with_no_id(self):
+ b = self._create_resource('port', self.rsrc_defn, self.stack)
+ self.assertTrue(b.check_delete_complete(None))
+
+ def test_port_check_delete_complete_with_not_found(self):
+ b = self._create_resource('port', self.rsrc_defn, self.stack,
+ get_exception=ic_exc.NotFound)
+ self.assertTrue(b.check_delete_complete(self.resource_id))
+
+ def test_port_check_delete_complete_with_exception(self):
+ b = self._create_resource('port', self.rsrc_defn, self.stack,
+ get_exception=ic_exc.Conflict())
+ exc = self.assertRaises(ic_exc.Conflict,
+ b.check_delete_complete, self.resource_id)
+ self.assertIn('Conflict', str(exc))
diff --git a/heat/tests/openstack/keystone/test_domain.py b/heat/tests/openstack/keystone/test_domain.py
index 1f6e45259..204b09205 100644
--- a/heat/tests/openstack/keystone/test_domain.py
+++ b/heat/tests/openstack/keystone/test_domain.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine import resource
diff --git a/heat/tests/openstack/keystone/test_endpoint.py b/heat/tests/openstack/keystone/test_endpoint.py
index 9bcc8b4c6..9a53476a1 100644
--- a/heat/tests/openstack/keystone/test_endpoint.py
+++ b/heat/tests/openstack/keystone/test_endpoint.py
@@ -12,8 +12,7 @@
# under the License.
import copy
-
-import mock
+from unittest import mock
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine import constraints
diff --git a/heat/tests/openstack/keystone/test_group.py b/heat/tests/openstack/keystone/test_group.py
index 680382030..01ecd9c93 100644
--- a/heat/tests/openstack/keystone/test_group.py
+++ b/heat/tests/openstack/keystone/test_group.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine import constraints
diff --git a/heat/tests/openstack/keystone/test_project.py b/heat/tests/openstack/keystone/test_project.py
index 9b3c4ef63..095251a29 100644
--- a/heat/tests/openstack/keystone/test_project.py
+++ b/heat/tests/openstack/keystone/test_project.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.engine import constraints
from heat.engine import properties
diff --git a/heat/tests/openstack/keystone/test_region.py b/heat/tests/openstack/keystone/test_region.py
index 646802cd5..bd3a3872f 100644
--- a/heat/tests/openstack/keystone/test_region.py
+++ b/heat/tests/openstack/keystone/test_region.py
@@ -11,8 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-from six.moves.urllib import parse
+from unittest import mock
+from urllib import parse
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine import resource
diff --git a/heat/tests/openstack/keystone/test_role.py b/heat/tests/openstack/keystone/test_role.py
index df9eda150..65fa0138a 100644
--- a/heat/tests/openstack/keystone/test_role.py
+++ b/heat/tests/openstack/keystone/test_role.py
@@ -12,7 +12,7 @@
# under the License.
import copy
-import mock
+from unittest import mock
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine import resource
diff --git a/heat/tests/openstack/keystone/test_role_assignments.py b/heat/tests/openstack/keystone/test_role_assignments.py
index a2f235866..0674d816a 100644
--- a/heat/tests/openstack/keystone/test_role_assignments.py
+++ b/heat/tests/openstack/keystone/test_role_assignments.py
@@ -12,7 +12,7 @@
# under the License.
import copy
-import mock
+from unittest import mock
from heat.common import exception
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
@@ -348,7 +348,7 @@ class KeystoneRoleAssignmentMixinTest(common.HeatTestCase):
({'role': 'role_1', 'user': 'user_1', 'domain': 'domain_1'},)
]
- self.assertItemsEqual(expected, self.roles.revoke.call_args_list)
+ self.assertCountEqual(expected, self.roles.revoke.call_args_list)
def test_validate_1(self):
self.test_role_assignment.properties = mock.MagicMock()
diff --git a/heat/tests/openstack/keystone/test_service.py b/heat/tests/openstack/keystone/test_service.py
index ba2a7e0ad..2f773db2a 100644
--- a/heat/tests/openstack/keystone/test_service.py
+++ b/heat/tests/openstack/keystone/test_service.py
@@ -12,8 +12,7 @@
# under the License.
import copy
-
-import mock
+from unittest import mock
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine import properties
diff --git a/heat/tests/openstack/keystone/test_user.py b/heat/tests/openstack/keystone/test_user.py
index 7f67d5c34..6f7dda8f2 100644
--- a/heat/tests/openstack/keystone/test_user.py
+++ b/heat/tests/openstack/keystone/test_user.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine import resource
diff --git a/heat/tests/openstack/magnum/test_bay.py b/heat/tests/openstack/magnum/test_bay.py
index 68f766d04..66803e80e 100644
--- a/heat/tests/openstack/magnum/test_bay.py
+++ b/heat/tests/openstack/magnum/test_bay.py
@@ -12,9 +12,9 @@
# under the License.
import copy
-import mock
+from unittest import mock
+
from oslo_config import cfg
-import six
from heat.common import exception
from heat.common import template_format
@@ -78,7 +78,7 @@ class TestMagnumBay(common.HeatTestCase):
exc = self.assertRaises(
exception.ResourceFailure,
scheduler.TaskRunner(b.create))
- self.assertIn("Failed to create Bay", six.text_type(exc))
+ self.assertIn("Failed to create Bay", str(exc))
def test_bay_create_unknown_status(self):
b = self._create_resource('bay', self.rsrc_defn, self.stack,
@@ -86,7 +86,7 @@ class TestMagnumBay(common.HeatTestCase):
exc = self.assertRaises(
exception.ResourceFailure,
scheduler.TaskRunner(b.create))
- self.assertIn("Unknown status creating Bay", six.text_type(exc))
+ self.assertIn("Unknown status creating Bay", str(exc))
def test_bay_update(self):
b = self._create_resource('bay', self.rsrc_defn, self.stack)
@@ -114,7 +114,7 @@ class TestMagnumBay(common.HeatTestCase):
exc = self.assertRaises(
exception.ResourceFailure,
scheduler.TaskRunner(b.update, new_bm))
- self.assertIn("Failed to update Bay", six.text_type(exc))
+ self.assertIn("Failed to update Bay", str(exc))
def test_bay_update_unknown_status(self):
b = self._create_resource('bay', self.rsrc_defn, self.stack)
@@ -129,7 +129,7 @@ class TestMagnumBay(common.HeatTestCase):
exc = self.assertRaises(
exception.ResourceFailure,
scheduler.TaskRunner(b.update, new_bm))
- self.assertIn("Unknown status updating Bay", six.text_type(exc))
+ self.assertIn("Unknown status updating Bay", str(exc))
def test_bay_delete(self):
b = self._create_resource('bay', self.rsrc_defn, self.stack)
diff --git a/heat/tests/openstack/magnum/test_cluster.py b/heat/tests/openstack/magnum/test_cluster.py
index c9160bd3c..4bc58aecb 100644
--- a/heat/tests/openstack/magnum/test_cluster.py
+++ b/heat/tests/openstack/magnum/test_cluster.py
@@ -12,9 +12,9 @@
# under the License.
import copy
-import mock
+from unittest import mock
+
from oslo_config import cfg
-import six
from heat.common import exception
from heat.common import template_format
@@ -200,7 +200,7 @@ class TestMagnumCluster(common.HeatTestCase):
exc = self.assertRaises(
exception.ResourceFailure,
scheduler.TaskRunner(b.create))
- self.assertIn("Failed to create Cluster", six.text_type(exc))
+ self.assertIn("Failed to create Cluster", str(exc))
def test_cluster_create_unknown_status(self):
b = self._create_resource('cluster', self.rsrc_defn, self.stack,
@@ -208,7 +208,7 @@ class TestMagnumCluster(common.HeatTestCase):
exc = self.assertRaises(
exception.ResourceFailure,
scheduler.TaskRunner(b.create))
- self.assertIn("Unknown status creating Cluster", six.text_type(exc))
+ self.assertIn("Unknown status creating Cluster", str(exc))
def _cluster_update(self, update_status='UPDATE_COMPLETE', exc_msg=None):
b = self._create_resource('cluster', self.rsrc_defn, self.stack)
@@ -227,7 +227,7 @@ class TestMagnumCluster(common.HeatTestCase):
exc = self.assertRaises(
exception.ResourceFailure,
scheduler.TaskRunner(b.update, new_bm))
- self.assertIn(exc_msg, six.text_type(exc))
+ self.assertIn(exc_msg, str(exc))
def test_cluster_update(self):
self._cluster_update()
diff --git a/heat/tests/openstack/magnum/test_cluster_template.py b/heat/tests/openstack/magnum/test_cluster_template.py
index 3d9176c00..22d2aa31c 100644
--- a/heat/tests/openstack/magnum/test_cluster_template.py
+++ b/heat/tests/openstack/magnum/test_cluster_template.py
@@ -12,9 +12,9 @@
# under the License.
import copy
-import mock
+from unittest import mock
+
from neutronclient.neutron import v2_0 as neutronV20
-import six
from heat.common import exception
from heat.common import template_format
@@ -133,7 +133,7 @@ class TestMagnumClusterTemplate(common.HeatTestCase):
"expecting a ['rexray'] volume driver.")
ex = self.assertRaises(exception.StackValidationFailed,
stack['test_cluster_template'].validate)
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
def _cluster_template_update(self, update_status='UPDATE_COMPLETE',
exc_msg=None):
@@ -153,7 +153,7 @@ class TestMagnumClusterTemplate(common.HeatTestCase):
exc = self.assertRaises(
exception.ResourceFailure,
scheduler.TaskRunner(ct.update, new_ct))
- self.assertIn(exc_msg, six.text_type(exc))
+ self.assertIn(exc_msg, str(exc))
def test_cluster_update(self):
self._cluster_template_update()
diff --git a/heat/tests/openstack/manila/test_security_service.py b/heat/tests/openstack/manila/test_security_service.py
index 23f291cc4..7a4d17c00 100644
--- a/heat/tests/openstack/manila/test_security_service.py
+++ b/heat/tests/openstack/manila/test_security_service.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
@@ -112,7 +111,7 @@ class ManilaSecurityServiceTest(common.HeatTestCase):
expected_state = (ss.CREATE, ss.FAILED)
self.assertEqual(expected_state, ss.state)
self.assertIn('Exception: resources.security_service: error',
- six.text_type(exc))
+ str(exc))
def test_update(self):
ss = self._create_resource('security_service', self.rsrc_defn,
@@ -142,4 +141,4 @@ class ManilaSecurityServiceTest(common.HeatTestCase):
err = self.assertRaises(resource.UpdateReplace,
scheduler.TaskRunner(ss.update, new_ss))
msg = 'The Resource security_service requires replacement.'
- self.assertEqual(msg, six.text_type(err))
+ self.assertEqual(msg, str(err))
diff --git a/heat/tests/openstack/manila/test_share.py b/heat/tests/openstack/manila/test_share.py
index ff837c9d7..cf4e787ed 100644
--- a/heat/tests/openstack/manila/test_share.py
+++ b/heat/tests/openstack/manila/test_share.py
@@ -12,9 +12,7 @@
# under the License.
import collections
import copy
-
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
@@ -48,13 +46,22 @@ class DummyShare(object):
def __init__(self):
self.availability_zone = 'az'
self.host = 'host'
- self.export_locations = 'el'
self.share_server_id = 'id'
self.created_at = 'ca'
self.status = 's'
self.project_id = 'p_id'
+class DummyShareExportLocation(object):
+ def __init__(self):
+ self.export_location = {
+ 'path': 'el'
+ }
+
+ def to_dict(self):
+ return self.export_location
+
+
class ManilaShareTest(common.HeatTestCase):
def setUp(self):
@@ -125,7 +132,7 @@ class ManilaShareTest(common.HeatTestCase):
exc = self.assertRaises(exception.ResourceInError,
share.check_create_complete,
self.failed_share)
- self.assertIn("Error during creation", six.text_type(exc))
+ self.assertIn("Error during creation", str(exc))
def test_share_create_unknown_status(self):
share = self._init_share("stack_share_create_unknown")
@@ -133,7 +140,7 @@ class ManilaShareTest(common.HeatTestCase):
exc = self.assertRaises(exception.ResourceUnknownStatus,
share.check_create_complete,
self.deleting_share)
- self.assertIn("Unknown status", six.text_type(exc))
+ self.assertIn("Unknown status", str(exc))
def test_share_check(self):
share = self._create_share("stack_share_check")
@@ -148,7 +155,7 @@ class ManilaShareTest(common.HeatTestCase):
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(share.check))
self.assertIn("Error: resources.test_share: 'status': expected "
- "'['available']'", six.text_type(exc))
+ "'['available']'", str(exc))
def test_share_update(self):
share = self._create_share("stack_share_update")
@@ -214,9 +221,11 @@ class ManilaShareTest(common.HeatTestCase):
def test_attributes(self):
share = self._create_share("share")
share.client().shares.get.return_value = DummyShare()
+ share.client().share_export_locations.list.return_value = [
+ DummyShareExportLocation()]
self.assertEqual('az', share.FnGetAtt('availability_zone'))
self.assertEqual('host', share.FnGetAtt('host'))
- self.assertEqual('el', share.FnGetAtt('export_locations'))
+ self.assertEqual("['el']", share.FnGetAtt('export_locations'))
self.assertEqual('id', share.FnGetAtt('share_server_id'))
self.assertEqual('ca', share.FnGetAtt('created_at'))
self.assertEqual('s', share.FnGetAtt('status'))
diff --git a/heat/tests/openstack/manila/test_share_network.py b/heat/tests/openstack/manila/test_share_network.py
index da723b35b..e550137b9 100644
--- a/heat/tests/openstack/manila/test_share_network.py
+++ b/heat/tests/openstack/manila/test_share_network.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.common import exception
from heat.common import template_format
diff --git a/heat/tests/openstack/manila/test_share_type.py b/heat/tests/openstack/manila/test_share_type.py
index cf1032aad..08a79c040 100644
--- a/heat/tests/openstack/manila/test_share_type.py
+++ b/heat/tests/openstack/manila/test_share_type.py
@@ -12,8 +12,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from heat.common import template_format
from heat.engine.resources.openstack.manila import share_type as mshare_type
diff --git a/heat/tests/openstack/mistral/test_cron_trigger.py b/heat/tests/openstack/mistral/test_cron_trigger.py
index 58c6a15d3..939452f61 100644
--- a/heat/tests/openstack/mistral/test_cron_trigger.py
+++ b/heat/tests/openstack/mistral/test_cron_trigger.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.common import exception
from heat.common import template_format
diff --git a/heat/tests/openstack/mistral/test_external_resource.py b/heat/tests/openstack/mistral/test_external_resource.py
index 77032b817..fd92440a8 100644
--- a/heat/tests/openstack/mistral/test_external_resource.py
+++ b/heat/tests/openstack/mistral/test_external_resource.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.common import exception
from heat.common import template_format
diff --git a/heat/tests/openstack/mistral/test_workflow.py b/heat/tests/openstack/mistral/test_workflow.py
index 641014e07..9f0057dc9 100644
--- a/heat/tests/openstack/mistral/test_workflow.py
+++ b/heat/tests/openstack/mistral/test_workflow.py
@@ -11,10 +11,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
import yaml
+from mistralclient.api import base as mistral_base
from mistralclient.api.v2 import executions
from oslo_serialization import jsonutils
@@ -302,6 +302,21 @@ resources:
result: <% $.hello %>
"""
+workflow_template_update_replace_failed = """
+heat_template_version: 2013-05-23
+resources:
+ workflow:
+ type: OS::Mistral::Workflow
+ properties:
+ name: hello_action
+ type: direct
+ tasks:
+ - name: hello
+ action: std.echo output='Good Morning!'
+ publish:
+ result: <% $.hello %>
+"""
+
workflow_template_update = """
heat_template_version: 2013-05-23
resources:
@@ -373,12 +388,6 @@ class TestMistralWorkflow(common.HeatTestCase):
def setUp(self):
super(TestMistralWorkflow, self).setUp()
self.ctx = utils.dummy_context()
- tmpl = template_format.parse(workflow_template)
- self.stack = utils.parse_stack(tmpl, stack_name='test_stack')
-
- resource_defns = self.stack.t.resource_definitions(self.stack)
- self.rsrc_defn = resource_defns['workflow']
-
self.mistral = mock.Mock()
self.patchobject(workflow.Workflow, 'client',
return_value=self.mistral)
@@ -400,15 +409,20 @@ class TestMistralWorkflow(common.HeatTestCase):
for patch in self.patches:
patch.stop()
- def _create_resource(self, name, snippet, stack):
- wf = workflow.Workflow(name, snippet, stack)
+ def _create_resource(self, name, template=workflow_template):
+ tmpl = template_format.parse(template)
+ self.stack = utils.parse_stack(tmpl, stack_name='test_stack')
+
+ resource_defns = self.stack.t.resource_definitions(self.stack)
+ rsrc_defn = resource_defns['workflow']
+ wf = workflow.Workflow(name, rsrc_defn, self.stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('test_stack-workflow-b5fiekfci3yc')]
scheduler.TaskRunner(wf.create)()
return wf
def test_create(self):
- wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
+ wf = self._create_resource('workflow')
expected_state = (wf.CREATE, wf.COMPLETE)
self.assertEqual(expected_state, wf.state)
self.assertEqual('test_stack-workflow-b5fiekfci3yc', wf.resource_id)
@@ -460,7 +474,7 @@ class TestMistralWorkflow(common.HeatTestCase):
break
def test_attributes(self):
- wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
+ wf = self._create_resource('workflow')
self.mistral.workflows.get.return_value = (
FakeWorkflow('test_stack-workflow-b5fiekfci3yc'))
self.assertEqual({'name': 'test_stack-workflow-b5fiekfci3yc',
@@ -501,7 +515,7 @@ class TestMistralWorkflow(common.HeatTestCase):
error_cls = exception.StackValidationFailed
exc = self.assertRaises(error_cls, wf.validate)
- self.assertEqual(error_msg, six.text_type(exc))
+ self.assertEqual(error_msg, str(exc))
def test_create_wrong_definition(self):
tmpl = template_format.parse(workflow_template)
@@ -518,10 +532,10 @@ class TestMistralWorkflow(common.HeatTestCase):
expected_state = (wf.CREATE, wf.FAILED)
self.assertEqual(expected_state, wf.state)
self.assertIn('Exception: resources.workflow: boom!',
- six.text_type(exc))
+ str(exc))
def test_update_replace(self):
- wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
+ wf = self._create_resource('workflow')
t = template_format.parse(workflow_template_update_replace)
rsrc_defns = template.Template(t).resource_definitions(self.stack)
@@ -535,11 +549,10 @@ class TestMistralWorkflow(common.HeatTestCase):
scheduler.TaskRunner(wf.update,
new_workflow))
msg = 'The Resource workflow requires replacement.'
- self.assertEqual(msg, six.text_type(err))
+ self.assertEqual(msg, str(err))
def test_update(self):
- wf = self._create_resource('workflow', self.rsrc_defn,
- self.stack)
+ wf = self._create_resource('workflow')
t = template_format.parse(workflow_template_update)
rsrc_defns = template.Template(t).resource_definitions(self.stack)
new_wf = rsrc_defns['workflow']
@@ -550,8 +563,7 @@ class TestMistralWorkflow(common.HeatTestCase):
self.assertEqual((wf.UPDATE, wf.COMPLETE), wf.state)
def test_update_input(self):
- wf = self._create_resource('workflow', self.rsrc_defn,
- self.stack)
+ wf = self._create_resource('workflow')
t = template_format.parse(workflow_template)
t['resources']['workflow']['properties']['input'] = {'foo': 'bar'}
rsrc_defns = template.Template(t).resource_definitions(self.stack)
@@ -563,8 +575,7 @@ class TestMistralWorkflow(common.HeatTestCase):
self.assertEqual((wf.UPDATE, wf.COMPLETE), wf.state)
def test_update_failed(self):
- wf = self._create_resource('workflow', self.rsrc_defn,
- self.stack)
+ wf = self._create_resource('workflow')
t = template_format.parse(workflow_template_update)
rsrc_defns = template.Template(t).resource_definitions(self.stack)
new_wf = rsrc_defns['workflow']
@@ -573,8 +584,42 @@ class TestMistralWorkflow(common.HeatTestCase):
scheduler.TaskRunner(wf.update, new_wf))
self.assertEqual((wf.UPDATE, wf.FAILED), wf.state)
+ def test_update_failed_no_replace(self):
+ wf = self._create_resource('workflow',
+ workflow_template_update_replace)
+ t = template_format.parse(workflow_template_update_replace_failed)
+ rsrc_defns = template.Template(t).resource_definitions(self.stack)
+ new_wf = rsrc_defns['workflow']
+ self.mistral.workflows.get.return_value = (
+ FakeWorkflow('test_stack-workflow-b5fiekfci3yc'))
+ self.mistral.workflows.update.side_effect = [
+ Exception('boom!'),
+ [FakeWorkflow('test_stack-workflow-b5fiekfci3yc')]]
+ self.assertRaises(exception.ResourceFailure,
+ scheduler.TaskRunner(wf.update, new_wf))
+ self.assertEqual((wf.UPDATE, wf.FAILED), wf.state)
+ scheduler.TaskRunner(wf.update, new_wf)()
+ self.assertTrue(self.mistral.workflows.update.called)
+ self.assertEqual((wf.UPDATE, wf.COMPLETE), wf.state)
+
+ def test_update_failed_replace_not_found(self):
+ wf = self._create_resource('workflow',
+ workflow_template_update_replace)
+ t = template_format.parse(workflow_template_update_replace_failed)
+ rsrc_defns = template.Template(t).resource_definitions(self.stack)
+ new_wf = rsrc_defns['workflow']
+ self.mistral.workflows.update.side_effect = Exception('boom!')
+ self.assertRaises(exception.ResourceFailure,
+ scheduler.TaskRunner(wf.update, new_wf))
+ self.assertEqual((wf.UPDATE, wf.FAILED), wf.state)
+ self.mistral.workflows.get.side_effect = [
+ mistral_base.APIException(error_code=404)]
+ self.assertRaises(resource.UpdateReplace,
+ scheduler.TaskRunner(wf.update,
+ new_wf))
+
def test_delete_super_call_successful(self):
- wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
+ wf = self._create_resource('workflow')
scheduler.TaskRunner(wf.delete)()
self.assertEqual((wf.DELETE, wf.COMPLETE), wf.state)
@@ -582,7 +627,7 @@ class TestMistralWorkflow(common.HeatTestCase):
self.assertEqual(1, self.mistral.workflows.delete.call_count)
def test_delete_executions_successful(self):
- wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
+ wf = self._create_resource('workflow')
self.mistral.executuions.delete.return_value = None
wf._data = {'executions': '1234,5678'}
@@ -594,7 +639,7 @@ class TestMistralWorkflow(common.HeatTestCase):
data_delete.assert_called_once_with('executions')
def test_delete_executions_not_found(self):
- wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
+ wf = self._create_resource('workflow')
self.mistral.executuions.delete.side_effect = [
self.mistral.mistral_base.APIException(error_code=404),
@@ -621,7 +666,7 @@ class TestMistralWorkflow(common.HeatTestCase):
err = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.signal, details))
self.assertEqual('Exception: resources.create_vm: boom!',
- six.text_type(err))
+ str(err))
def test_signal_wrong_input_and_params_type(self):
tmpl = template_format.parse(workflow_template_full)
@@ -634,22 +679,17 @@ class TestMistralWorkflow(common.HeatTestCase):
details = {'input': '3'}
err = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.signal, details))
- if six.PY3:
- entity = 'class'
- else:
- entity = 'type'
- error_message = ("StackValidationFailed: resources.create_vm: "
- "Signal data error: Input in"
- " signal data must be a map, find a <%s 'str'>" %
- entity)
- self.assertEqual(error_message, six.text_type(err))
+ error_message = "StackValidationFailed: resources.create_vm: " \
+ "Signal data error: Input in" \
+ " signal data must be a map, find a <class 'str'>"
+ self.assertEqual(error_message, str(err))
details = {'params': '3'}
err = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.signal, details))
error_message = ("StackValidationFailed: resources.create_vm: "
"Signal data error: Params "
- "must be a map, find a <%s 'str'>" % entity)
- self.assertEqual(error_message, six.text_type(err))
+ "must be a map, find a <class 'str'>")
+ self.assertEqual(error_message, str(err))
def test_signal_wrong_input_key(self):
tmpl = template_format.parse(workflow_template_full)
@@ -664,7 +704,7 @@ class TestMistralWorkflow(common.HeatTestCase):
scheduler.TaskRunner(wf.signal, details))
error_message = ("StackValidationFailed: resources.create_vm: "
"Signal data error: Unknown input 1")
- self.assertEqual(error_message, six.text_type(err))
+ self.assertEqual(error_message, str(err))
def test_signal_with_body_as_input_and_delete_with_executions(self):
tmpl = template_format.parse(workflow_template_full)
@@ -816,7 +856,7 @@ class TestMistralWorkflow(common.HeatTestCase):
workflow_rsrc.validate)
error_msg = ("Cannot define the following properties at "
"the same time: tasks.retry, tasks.policies.retry")
- self.assertIn(error_msg, six.text_type(ex))
+ self.assertIn(error_msg, str(ex))
def validate_json_inputs(self, actual_input, expected_input):
actual_json_input = jsonutils.loads(actual_input)
diff --git a/heat/tests/openstack/monasca/test_alarm_definition.py b/heat/tests/openstack/monasca/test_alarm_definition.py
index f0ea7927c..8dba8d04d 100644
--- a/heat/tests/openstack/monasca/test_alarm_definition.py
+++ b/heat/tests/openstack/monasca/test_alarm_definition.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.engine.clients.os import monasca as client_plugin
from heat.engine.resources.openstack.monasca import alarm_definition
diff --git a/heat/tests/openstack/monasca/test_notification.py b/heat/tests/openstack/monasca/test_notification.py
index d82732139..500597950 100644
--- a/heat/tests/openstack/monasca/test_notification.py
+++ b/heat/tests/openstack/monasca/test_notification.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.engine.cfn import functions as cfn_funcs
@@ -94,7 +93,7 @@ class MonascaNotificationTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
self.test_resource.validate)
self.assertEqual('Address "abc@def.com" doesn\'t have '
- 'required URL scheme', six.text_type(ex))
+ 'required URL scheme', str(ex))
def test_validate_no_netloc_address_for_webhook(self):
self.test_resource.properties.data['type'] = self.test_resource.WEBHOOK
@@ -102,7 +101,7 @@ class MonascaNotificationTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
self.test_resource.validate)
self.assertEqual('Address "https://" doesn\'t have '
- 'required network location', six.text_type(ex))
+ 'required network location', str(ex))
def test_validate_prohibited_address_for_webhook(self):
self.test_resource.properties.data['type'] = self.test_resource.WEBHOOK
@@ -110,7 +109,7 @@ class MonascaNotificationTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
self.test_resource.validate)
self.assertEqual('Address "ftp://127.0.0.1" doesn\'t satisfies '
- 'allowed schemes: http, https', six.text_type(ex))
+ 'allowed schemes: http, https', str(ex))
def test_validate_incorrect_address_for_email(self):
self.test_resource.properties.data['type'] = self.test_resource.EMAIL
@@ -120,7 +119,7 @@ class MonascaNotificationTest(common.HeatTestCase):
self.test_resource.validate)
self.assertEqual('Address "abc#def.com" doesn\'t satisfies allowed '
'format for "email" type of "type" property',
- six.text_type(ex))
+ str(ex))
def test_validate_invalid_address_parsing(self):
self.test_resource.properties.data['type'] = self.test_resource.WEBHOOK
@@ -129,7 +128,7 @@ class MonascaNotificationTest(common.HeatTestCase):
self.test_resource.validate)
self.assertEqual('Address "https://example.com]" should have correct '
'format required by "webhook" type of "type" '
- 'property', six.text_type(ex))
+ 'property', str(ex))
def test_resource_handle_create(self):
mock_notification_create = self.test_client.notifications.create
diff --git a/heat/tests/openstack/neutron/lbaas/test_health_monitor.py b/heat/tests/openstack/neutron/lbaas/test_health_monitor.py
index bed3e2b45..4f0fd64a2 100644
--- a/heat/tests/openstack/neutron/lbaas/test_health_monitor.py
+++ b/heat/tests/openstack/neutron/lbaas/test_health_monitor.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from neutronclient.common import exceptions
diff --git a/heat/tests/openstack/neutron/lbaas/test_l7policy.py b/heat/tests/openstack/neutron/lbaas/test_l7policy.py
index 95ab88779..e5b9de155 100644
--- a/heat/tests/openstack/neutron/lbaas/test_l7policy.py
+++ b/heat/tests/openstack/neutron/lbaas/test_l7policy.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import yaml
from neutronclient.common import exceptions
diff --git a/heat/tests/openstack/neutron/lbaas/test_l7rule.py b/heat/tests/openstack/neutron/lbaas/test_l7rule.py
index 258343ffc..b6103a966 100644
--- a/heat/tests/openstack/neutron/lbaas/test_l7rule.py
+++ b/heat/tests/openstack/neutron/lbaas/test_l7rule.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import yaml
from neutronclient.common import exceptions
diff --git a/heat/tests/openstack/neutron/lbaas/test_listener.py b/heat/tests/openstack/neutron/lbaas/test_listener.py
index 0b5887e8e..c7aa118fc 100644
--- a/heat/tests/openstack/neutron/lbaas/test_listener.py
+++ b/heat/tests/openstack/neutron/lbaas/test_listener.py
@@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import yaml
from neutronclient.common import exceptions
diff --git a/heat/tests/openstack/neutron/lbaas/test_loadbalancer.py b/heat/tests/openstack/neutron/lbaas/test_loadbalancer.py
index 24bb7047d..bc38d1aaf 100644
--- a/heat/tests/openstack/neutron/lbaas/test_loadbalancer.py
+++ b/heat/tests/openstack/neutron/lbaas/test_loadbalancer.py
@@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from neutronclient.common import exceptions
diff --git a/heat/tests/openstack/neutron/lbaas/test_pool.py b/heat/tests/openstack/neutron/lbaas/test_pool.py
index b71c25bc8..debd5523f 100644
--- a/heat/tests/openstack/neutron/lbaas/test_pool.py
+++ b/heat/tests/openstack/neutron/lbaas/test_pool.py
@@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import yaml
from neutronclient.common import exceptions
diff --git a/heat/tests/openstack/neutron/lbaas/test_pool_member.py b/heat/tests/openstack/neutron/lbaas/test_pool_member.py
index d6c6c0af3..3138c2527 100644
--- a/heat/tests/openstack/neutron/lbaas/test_pool_member.py
+++ b/heat/tests/openstack/neutron/lbaas/test_pool_member.py
@@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from neutronclient.common import exceptions
diff --git a/heat/tests/openstack/neutron/test_address_scope.py b/heat/tests/openstack/neutron/test_address_scope.py
index 1473501c5..82d7df97c 100644
--- a/heat/tests/openstack/neutron/test_address_scope.py
+++ b/heat/tests/openstack/neutron/test_address_scope.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.common import template_format
from heat.engine.clients.os import neutron
diff --git a/heat/tests/openstack/neutron/test_extraroute.py b/heat/tests/openstack/neutron/test_extraroute.py
index 5a2e2385c..6ef33a0f2 100644
--- a/heat/tests/openstack/neutron/test_extraroute.py
+++ b/heat/tests/openstack/neutron/test_extraroute.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from neutronclient.v2_0 import client as neutronclient
@@ -74,11 +74,18 @@ class NeutronExtraRouteTest(common.HeatTestCase):
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
return rsrc
- def test_extraroute(self):
- route1 = {"destination": "192.168.0.0/24",
- "nexthop": "1.1.1.1"}
- route2 = {"destination": "192.168.255.0/24",
- "nexthop": "1.1.1.1"}
+ def _test_extraroute(self, ipv6=False):
+
+ if ipv6:
+ route1 = {"destination": "ffff:f53b:82e4::56/46",
+ "nexthop": "dce7:f53b:82e4::56"}
+ route2 = {"destination": "ffff:f53b:ffff::56/46",
+ "nexthop": "dce7:f53b:82e4::56"}
+ else:
+ route1 = {"destination": "192.168.0.0/24",
+ "nexthop": "1.1.1.1"}
+ route2 = {"destination": "192.168.255.0/24",
+ "nexthop": "1.1.1.1"}
self.stub_RouterConstraint_validate()
@@ -97,17 +104,30 @@ class NeutronExtraRouteTest(common.HeatTestCase):
t = template_format.parse(neutron_template)
stack = utils.parse_stack(t)
- rsrc1 = self.create_extraroute(
- t, stack, 'extraroute1', properties={
- 'router_id': '3e46229d-8fce-4733-819a-b5fe630550f8',
- 'destination': '192.168.0.0/24',
- 'nexthop': '1.1.1.1'})
-
- self.create_extraroute(
- t, stack, 'extraroute2', properties={
- 'router_id': '3e46229d-8fce-4733-819a-b5fe630550f8',
- 'destination': '192.168.255.0/24',
- 'nexthop': '1.1.1.1'})
+ if ipv6:
+ rsrc1 = self.create_extraroute(
+ t, stack, 'extraroute1', properties={
+ 'router_id': '3e46229d-8fce-4733-819a-b5fe630550f8',
+ 'destination': 'ffff:f53b:82e4::56/46',
+ 'nexthop': 'dce7:f53b:82e4::56'})
+
+ self.create_extraroute(
+ t, stack, 'extraroute2', properties={
+ 'router_id': '3e46229d-8fce-4733-819a-b5fe630550f8',
+ 'destination': 'ffff:f53b:ffff::56/46',
+ 'nexthop': 'dce7:f53b:82e4::56'})
+ else:
+ rsrc1 = self.create_extraroute(
+ t, stack, 'extraroute1', properties={
+ 'router_id': '3e46229d-8fce-4733-819a-b5fe630550f8',
+ 'destination': '192.168.0.0/24',
+ 'nexthop': '1.1.1.1'})
+
+ self.create_extraroute(
+ t, stack, 'extraroute2', properties={
+ 'router_id': '3e46229d-8fce-4733-819a-b5fe630550f8',
+ 'destination': '192.168.255.0/24',
+ 'nexthop': '1.1.1.1'})
scheduler.TaskRunner(rsrc1.delete)()
rsrc1.state_set(rsrc1.CREATE, rsrc1.COMPLETE, 'to delete again')
@@ -127,3 +147,9 @@ class NeutronExtraRouteTest(common.HeatTestCase):
mock.call('3e46229d-8fce-4733-819a-b5fe630550f8',
{'router': {'routes': [route2.copy()]}}),
])
+
+ def test_extraroute_ipv4(self):
+ self._test_extraroute(ipv6=False)
+
+ def test_extraroute_ipv6(self):
+ self._test_extraroute(ipv6=True)
diff --git a/heat/tests/openstack/neutron/test_neutron.py b/heat/tests/openstack/neutron/test_neutron.py
index d01050d62..14f00258e 100644
--- a/heat/tests/openstack/neutron/test_neutron.py
+++ b/heat/tests/openstack/neutron/test_neutron.py
@@ -12,7 +12,6 @@
# under the License.
from neutronclient.common import exceptions as qe
-import six
from heat.common import exception
from heat.engine import attributes
@@ -42,7 +41,7 @@ class NeutronTest(common.HeatTestCase):
banned_keys = {'shared': True,
'name': 'foo',
'tenant_id': '1234'}
- for key, val in six.iteritems(banned_keys):
+ for key, val in banned_keys.items():
vs.update({key: val})
msg = '%s not allowed in value_specs' % key
self.assertEqual(msg, nr.NeutronResource.validate_properties(p))
@@ -67,13 +66,13 @@ class NeutronTest(common.HeatTestCase):
nr.NeutronResource.is_built, {'status': 'ERROR'})
self.assertEqual(
'Went to status ERROR due to "Unknown"',
- six.text_type(e))
+ str(e))
e = self.assertRaises(
exception.ResourceUnknownStatus,
nr.NeutronResource.is_built, {'status': 'FROBULATING'})
self.assertEqual('Resource is not built - Unknown status '
'FROBULATING due to "Unknown"',
- six.text_type(e))
+ str(e))
def _get_some_neutron_resource(self):
class SomeNeutronResource(nr.NeutronResource):
diff --git a/heat/tests/openstack/neutron/test_neutron_extrarouteset.py b/heat/tests/openstack/neutron/test_neutron_extrarouteset.py
new file mode 100644
index 000000000..fb9294e7b
--- /dev/null
+++ b/heat/tests/openstack/neutron/test_neutron_extrarouteset.py
@@ -0,0 +1,237 @@
+# Copyright 2019 Ericsson Software Technology
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from oslo_log import log as logging
+
+from heat.common import exception
+from heat.common import template_format
+from heat.engine.clients.os import neutron
+from heat.engine.resources.openstack.neutron import extrarouteset
+from heat.engine import scheduler
+from heat.tests import common
+from heat.tests import utils
+from neutronclient.common import exceptions as ncex
+from neutronclient.neutron import v2_0 as neutronV20
+from neutronclient.v2_0 import client as neutronclient
+
+
+LOG = logging.getLogger(__name__)
+
+template = '''
+heat_template_version: rocky
+description: Test create OS::Neutron::ExtraRouteSet
+resources:
+ extrarouteset0:
+ type: OS::Neutron::ExtraRouteSet
+ properties:
+ router: 88ce38c4-be8e-11e9-a0a5-5f64570eeec8
+ routes:
+ - destination: 10.0.1.0/24
+ nexthop: 10.0.0.11
+ - destination: 10.0.2.0/24
+ nexthop: 10.0.0.12
+'''
+
+
+class NeutronExtraRouteSetTest(common.HeatTestCase):
+
+ def setUp(self):
+ super(NeutronExtraRouteSetTest, self).setUp()
+
+ self.patchobject(
+ neutron.NeutronClientPlugin, 'has_extension', return_value=True)
+
+ self.add_extra_routes_mock = self.patchobject(
+ neutronclient.Client, 'add_extra_routes_to_router')
+ self.add_extra_routes_mock.return_value = {
+ 'router': {
+ 'id': '85b91046-be84-11e9-b518-2714ef1d76c3',
+ 'routes': [
+ {'destination': '10.0.1.0/24', 'nexthop': '10.0.0.11'},
+ {'destination': '10.0.2.0/24', 'nexthop': '10.0.0.12'},
+ ],
+ }
+ }
+
+ self.remove_extra_routes_mock = self.patchobject(
+ neutronclient.Client, 'remove_extra_routes_from_router')
+ self.remove_extra_routes_mock.return_value = {
+ 'router': {
+ 'id': '85b91046-be84-11e9-b518-2714ef1d76c3',
+ 'routes': [
+ {'destination': '10.0.1.0/24', 'nexthop': '10.0.0.11'},
+ {'destination': '10.0.2.0/24', 'nexthop': '10.0.0.12'},
+ ],
+ }
+ }
+
+ self.show_router_mock = self.patchobject(
+ neutronclient.Client, 'show_router')
+ self.show_router_mock.return_value = {
+ 'router': {
+ 'id': '85b91046-be84-11e9-b518-2714ef1d76c3',
+ 'routes': [],
+ }
+ }
+
+ def find_resourceid_by_name_or_id(
+ _client, _resource, name_or_id, **_kwargs):
+ return name_or_id
+
+ self.find_resource_mock = self.patchobject(
+ neutronV20, 'find_resourceid_by_name_or_id')
+ self.find_resource_mock.side_effect = find_resourceid_by_name_or_id
+
+ def test_routes_to_set_to_routes(self):
+ routes = [{'destination': '10.0.1.0/24', 'nexthop': '10.0.0.11'}]
+ self.assertEqual(
+ routes,
+ extrarouteset._set_to_routes(extrarouteset._routes_to_set(routes))
+ )
+
+ def test_diff_routes(self):
+ old = [
+ {'destination': '10.0.1.0/24', 'nexthop': '10.0.0.11'},
+ {'destination': '10.0.2.0/24', 'nexthop': '10.0.0.12'},
+ ]
+ new = [
+ {'destination': '10.0.1.0/24', 'nexthop': '10.0.0.11'},
+ {'destination': '10.0.3.0/24', 'nexthop': '10.0.0.13'},
+ ]
+
+ add = extrarouteset._set_to_routes(
+ extrarouteset._routes_to_set(new) -
+ extrarouteset._routes_to_set(old))
+ remove = extrarouteset._set_to_routes(
+ extrarouteset._routes_to_set(old) -
+ extrarouteset._routes_to_set(new))
+
+ self.assertEqual(
+ [{'destination': '10.0.3.0/24', 'nexthop': '10.0.0.13'}], add)
+ self.assertEqual(
+ [{'destination': '10.0.2.0/24', 'nexthop': '10.0.0.12'}], remove)
+
+ def test__raise_if_duplicate_positive(self):
+ self.assertRaises(
+ exception.PhysicalResourceExists,
+ extrarouteset._raise_if_duplicate,
+ {'router': {'routes': [
+ {'destination': 'dst1', 'nexthop': 'hop1'},
+ ]}},
+ [{'destination': 'dst1', 'nexthop': 'hop1'}],
+ )
+
+ def test__raise_if_duplicate_negative(self):
+ try:
+ extrarouteset._raise_if_duplicate(
+ {'router': {'routes': [
+ {'destination': 'dst1', 'nexthop': 'hop1'},
+ ]}},
+ [{'destination': 'dst2', 'nexthop': 'hop2'}],
+ )
+ except exception.PhysicalResourceExists:
+ self.fail('Unexpected exception in detecting duplicate routes')
+
+ def test_create(self):
+ t = template_format.parse(template)
+ stack = utils.parse_stack(t)
+
+ extra_routes = stack['extrarouteset0']
+ scheduler.TaskRunner(extra_routes.create)()
+
+ self.assertEqual(
+ (extra_routes.CREATE, extra_routes.COMPLETE), extra_routes.state)
+ self.add_extra_routes_mock.assert_called_once_with(
+ '88ce38c4-be8e-11e9-a0a5-5f64570eeec8',
+ {'router': {
+ 'routes': [
+ {'destination': '10.0.1.0/24', 'nexthop': '10.0.0.11'},
+ {'destination': '10.0.2.0/24', 'nexthop': '10.0.0.12'},
+ ]}})
+
+ def test_delete_proper(self):
+ t = template_format.parse(template)
+ stack = utils.parse_stack(t)
+
+ extra_routes = stack['extrarouteset0']
+ scheduler.TaskRunner(extra_routes.create)()
+ scheduler.TaskRunner(extra_routes.delete)()
+
+ self.assertEqual(
+ (extra_routes.DELETE, extra_routes.COMPLETE), extra_routes.state)
+ self.remove_extra_routes_mock.assert_called_once_with(
+ '88ce38c4-be8e-11e9-a0a5-5f64570eeec8',
+ {'router': {
+ 'routes': [
+ {'destination': '10.0.1.0/24', 'nexthop': '10.0.0.11'},
+ {'destination': '10.0.2.0/24', 'nexthop': '10.0.0.12'},
+ ]}})
+
+ def test_delete_router_already_gone(self):
+ t = template_format.parse(template)
+ stack = utils.parse_stack(t)
+
+ self.remove_extra_routes_mock.side_effect = (
+ ncex.NeutronClientException(status_code=404))
+
+ extra_routes = stack['extrarouteset0']
+ scheduler.TaskRunner(extra_routes.create)()
+ scheduler.TaskRunner(extra_routes.delete)()
+
+ self.assertEqual(
+ (extra_routes.DELETE, extra_routes.COMPLETE), extra_routes.state)
+ self.remove_extra_routes_mock.assert_called_once_with(
+ '88ce38c4-be8e-11e9-a0a5-5f64570eeec8',
+ {'router': {
+ 'routes': [
+ {'destination': '10.0.1.0/24', 'nexthop': '10.0.0.11'},
+ {'destination': '10.0.2.0/24', 'nexthop': '10.0.0.12'},
+ ]}})
+
+ def test_update(self):
+ t = template_format.parse(template)
+ stack = utils.parse_stack(t)
+
+ extra_routes = stack['extrarouteset0']
+ scheduler.TaskRunner(extra_routes.create)()
+ self.assertEqual(
+ (extra_routes.CREATE, extra_routes.COMPLETE), extra_routes.state)
+
+ self.add_extra_routes_mock.reset_mock()
+
+ rsrc_defn = stack.defn.resource_definition('extrarouteset0')
+
+ props = copy.deepcopy(t['resources']['extrarouteset0']['properties'])
+ props['routes'][1] = {
+ 'destination': '10.0.3.0/24', 'nexthop': '10.0.0.13'}
+ rsrc_defn = rsrc_defn.freeze(properties=props)
+
+ scheduler.TaskRunner(extra_routes.update, rsrc_defn)()
+ self.assertEqual(
+ (extra_routes.UPDATE, extra_routes.COMPLETE), extra_routes.state)
+
+ self.remove_extra_routes_mock.assert_called_once_with(
+ '88ce38c4-be8e-11e9-a0a5-5f64570eeec8',
+ {'router': {
+ 'routes': [
+ {'destination': '10.0.2.0/24', 'nexthop': '10.0.0.12'},
+ ]}})
+ self.add_extra_routes_mock.assert_called_once_with(
+ '88ce38c4-be8e-11e9-a0a5-5f64570eeec8',
+ {'router': {
+ 'routes': [
+ {'destination': '10.0.3.0/24', 'nexthop': '10.0.0.13'},
+ ]}})
diff --git a/heat/tests/openstack/neutron/test_neutron_firewall.py b/heat/tests/openstack/neutron/test_neutron_firewall.py
index 6f024fc28..b2b288bb9 100644
--- a/heat/tests/openstack/neutron/test_neutron_firewall.py
+++ b/heat/tests/openstack/neutron/test_neutron_firewall.py
@@ -11,11 +11,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from neutronclient.common import exceptions
from neutronclient.v2_0 import client as neutronclient
from oslo_config import cfg
-import six
from heat.common import exception
from heat.common import template_format
@@ -128,7 +128,7 @@ class FirewallTest(common.HeatTestCase):
self.assertEqual(
'ResourceInError: resources.firewall: '
'Went to status ERROR due to "Error in Firewall"',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mockclient.create_firewall.assert_called_once_with({
@@ -155,7 +155,7 @@ class FirewallTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.firewall: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mockclient.create_firewall.assert_called_once_with({
@@ -224,7 +224,7 @@ class FirewallTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.firewall: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.mockclient.create_firewall.assert_called_once_with({
@@ -274,7 +274,7 @@ class FirewallTest(common.HeatTestCase):
rsrc.FnGetAtt, 'subnet_id')
self.assertEqual(
'The Referenced Attribute (firewall subnet_id) is '
- 'incorrect.', six.text_type(error))
+ 'incorrect.', str(error))
self.mockclient.create_firewall.assert_called_once_with({
'firewall': {
@@ -427,7 +427,7 @@ class FirewallPolicyTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.firewall_policy: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mockclient.create_firewall_policy.assert_called_once_with({
@@ -485,7 +485,7 @@ class FirewallPolicyTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.firewall_policy: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.mockclient.create_firewall_policy.assert_called_once_with({
@@ -523,7 +523,7 @@ class FirewallPolicyTest(common.HeatTestCase):
rsrc.FnGetAtt, 'subnet_id')
self.assertEqual(
'The Referenced Attribute (firewall_policy subnet_id) is '
- 'incorrect.', six.text_type(error))
+ 'incorrect.', str(error))
self.mockclient.create_firewall_policy.assert_called_once_with({
'firewall_policy': {
@@ -635,7 +635,7 @@ class FirewallRuleTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.firewall_rule: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mockclient.create_firewall_rule.assert_called_once_with({
@@ -696,7 +696,7 @@ class FirewallRuleTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.firewall_rule: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.mockclient.create_firewall_rule.assert_called_once_with({
@@ -736,7 +736,7 @@ class FirewallRuleTest(common.HeatTestCase):
rsrc.FnGetAtt, 'subnet_id')
self.assertEqual(
'The Referenced Attribute (firewall_rule subnet_id) is '
- 'incorrect.', six.text_type(error))
+ 'incorrect.', str(error))
self.mockclient.create_firewall_rule.assert_called_once_with({
'firewall_rule': {
diff --git a/heat/tests/openstack/neutron/test_neutron_floating_ip.py b/heat/tests/openstack/neutron/test_neutron_floating_ip.py
index dc2ebac63..59f54147b 100644
--- a/heat/tests/openstack/neutron/test_neutron_floating_ip.py
+++ b/heat/tests/openstack/neutron/test_neutron_floating_ip.py
@@ -12,8 +12,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from neutronclient.common import exceptions as qe
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as neutronclient
diff --git a/heat/tests/openstack/neutron/test_neutron_l2_gateway.py b/heat/tests/openstack/neutron/test_neutron_l2_gateway.py
index dcf2930f2..c711bc9d6 100644
--- a/heat/tests/openstack/neutron/test_neutron_l2_gateway.py
+++ b/heat/tests/openstack/neutron/test_neutron_l2_gateway.py
@@ -12,10 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from neutronclient.common import exceptions
from neutronclient.v2_0 import client as neutronclient
-import six
from heat.common import template_format
from heat.engine.clients.os import neutron
@@ -294,10 +294,10 @@ class NeutronL2GatewayTest(common.HeatTestCase):
self.l2gw_resource = self.stack['l2gw']
self.assertIsNone(self.l2gw_resource.validate())
self.assertEqual(
- six.text_type('Resource CREATE failed: '
- 'L2GatewaySegmentationRequired: resources.l2gw: '
- 'L2 gateway segmentation id must be consistent for '
- 'all the interfaces'),
+ 'Resource CREATE failed: '
+ 'L2GatewaySegmentationRequired: resources.l2gw: '
+ 'L2 gateway segmentation id must be consistent for '
+ 'all the interfaces',
self.stack.status_reason)
self.assertEqual((self.l2gw_resource.CREATE,
self.l2gw_resource.FAILED),
diff --git a/heat/tests/openstack/neutron/test_neutron_l2_gateway_connection.py b/heat/tests/openstack/neutron/test_neutron_l2_gateway_connection.py
index b040281d3..6fecffd1b 100644
--- a/heat/tests/openstack/neutron/test_neutron_l2_gateway_connection.py
+++ b/heat/tests/openstack/neutron/test_neutron_l2_gateway_connection.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from neutronclient.v2_0 import client as neutronclient
from heat.common import template_format
diff --git a/heat/tests/openstack/neutron/test_neutron_loadbalancer.py b/heat/tests/openstack/neutron/test_neutron_loadbalancer.py
index da5658d06..388c8056e 100644
--- a/heat/tests/openstack/neutron/test_neutron_loadbalancer.py
+++ b/heat/tests/openstack/neutron/test_neutron_loadbalancer.py
@@ -11,12 +11,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from neutronclient.common import exceptions
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as neutronclient
from oslo_config import cfg
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -212,7 +212,7 @@ class HealthMonitorTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.monitor: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mock_create.assert_called_once_with(self.create_snippet)
@@ -247,7 +247,7 @@ class HealthMonitorTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.monitor: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.mock_create.assert_called_once_with(self.create_snippet)
self.mock_delete.assert_called_once_with('5678')
@@ -269,7 +269,7 @@ class HealthMonitorTest(common.HeatTestCase):
rsrc.FnGetAtt, 'subnet_id')
self.assertEqual(
'The Referenced Attribute (monitor subnet_id) is incorrect.',
- six.text_type(error))
+ str(error))
self.mock_create.assert_called_once_with(self.create_snippet)
def test_update(self):
@@ -423,7 +423,7 @@ class PoolTest(common.HeatTestCase):
self.assertEqual(
'ResourceInError: resources.pool: '
'Went to status ERROR due to "error in pool"',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mock_create.assert_called_once_with(pool_create_snippet)
self.mock_create_vip.assert_called_once_with(vip_create_snippet)
@@ -457,7 +457,7 @@ class PoolTest(common.HeatTestCase):
self.assertEqual('ResourceUnknownStatus: resources.pool: '
'Pool creation failed due to '
'vip - Unknown status SOMETHING due to "Unknown"',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mock_create.assert_called_once_with(pool_create_snippet)
self.mock_create_vip.assert_called_once_with(vip_create_snippet)
@@ -482,7 +482,7 @@ class PoolTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.pool: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mock_create.assert_called_once_with(pool_create_snippet)
@@ -571,7 +571,7 @@ class PoolTest(common.HeatTestCase):
error = self.assertRaises(exception.StackValidationFailed,
resource.validate)
- self.assertEqual(msg, six.text_type(error))
+ self.assertEqual(msg, str(error))
def test_validation_not_failing_without_session_persistence(self):
snippet = template_format.parse(pool_template)
@@ -670,7 +670,7 @@ class PoolTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.pool: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.mock_delete_vip.assert_called_once_with('xyz')
self.mock_delete.assert_not_called()
@@ -688,7 +688,7 @@ class PoolTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.pool: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.mock_delete.assert_called_once_with('5678')
self.mock_delete_vip.assert_called_once_with('xyz')
@@ -716,7 +716,7 @@ class PoolTest(common.HeatTestCase):
rsrc.FnGetAtt, 'net_id')
self.assertEqual(
'The Referenced Attribute (pool net_id) is incorrect.',
- six.text_type(error))
+ str(error))
def test_update(self):
rsrc = self.create_pool()
diff --git a/heat/tests/openstack/neutron/test_neutron_metering.py b/heat/tests/openstack/neutron/test_neutron_metering.py
index 997c629a2..256da1b81 100644
--- a/heat/tests/openstack/neutron/test_neutron_metering.py
+++ b/heat/tests/openstack/neutron/test_neutron_metering.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from neutronclient.common import exceptions
from neutronclient.v2_0 import client as neutronclient
@@ -91,7 +90,7 @@ class MeteringLabelTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.label: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mockclient.create_metering_label.assert_called_once_with({
@@ -152,7 +151,7 @@ class MeteringLabelTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.label: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.mockclient.create_metering_label.assert_called_once_with({
@@ -241,7 +240,7 @@ class MeteringRuleTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.rule: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mockclient.create_metering_label_rule.assert_called_once_with({
@@ -308,7 +307,7 @@ class MeteringRuleTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.rule: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.mockclient.create_metering_label_rule.assert_called_once_with({
diff --git a/heat/tests/openstack/neutron/test_neutron_net.py b/heat/tests/openstack/neutron/test_neutron_net.py
index 0937de51b..63a68c582 100644
--- a/heat/tests/openstack/neutron/test_neutron_net.py
+++ b/heat/tests/openstack/neutron/test_neutron_net.py
@@ -40,6 +40,7 @@ resources:
- 28c25a04-3f73-45a7-a2b4-59e183943ddc
port_security_enabled: False
dns_domain: openstack.org.
+ value_specs: {'mtu': 1500}
tags:
- tag1
- tag2
@@ -184,7 +185,8 @@ class NeutronNetTest(common.HeatTestCase):
'tenant_id': u'c1210485b2424d48804aad5d39c61b8f',
'dns_domain': u'openstack.org.',
'shared': True,
- 'port_security_enabled': False}
+ 'port_security_enabled': False,
+ 'mtu': 1500}
}
)
add_dhcp_agent_mock.assert_called_with(
@@ -250,7 +252,8 @@ class NeutronNetTest(common.HeatTestCase):
}}
)
# Update with value_specs
- prop_diff['value_specs'] = {"port_security_enabled": True}
+ prop_diff['value_specs'] = {"port_security_enabled": True,
+ 'mtu': 1500}
rsrc.handle_update(update_snippet, {}, prop_diff)
update_net_mock.assert_called_with(
'fc68ea2c-b60b-4b4f-bd82-94ec81110766',
diff --git a/heat/tests/openstack/neutron/test_neutron_network_gateway.py b/heat/tests/openstack/neutron/test_neutron_network_gateway.py
index 73682aacb..c88387850 100644
--- a/heat/tests/openstack/neutron/test_neutron_network_gateway.py
+++ b/heat/tests/openstack/neutron/test_neutron_network_gateway.py
@@ -14,8 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
-import six
+from unittest import mock
from neutronclient.common import exceptions as qe
from neutronclient.neutron import v2_0 as neutronV20
@@ -472,7 +471,7 @@ class NeutronNetworkGatewayTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.network_gateway: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.assertIsNone(scheduler.TaskRunner(rsrc.delete)())
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
@@ -502,7 +501,7 @@ class NeutronNetworkGatewayTest(common.HeatTestCase):
self.assertEqual(
'segmentation_id must be specified for using vlan',
- six.text_type(error))
+ str(error))
def test_gateway_validate_failed_with_flat(self):
t = template_format.parse(gw_template)
@@ -520,7 +519,7 @@ class NeutronNetworkGatewayTest(common.HeatTestCase):
self.assertEqual(
'segmentation_id cannot be specified except 0 for using flat',
- six.text_type(error))
+ str(error))
def test_network_gateway_attribute(self):
rsrc = self.prepare_create_network_gateway()
@@ -535,7 +534,7 @@ class NeutronNetworkGatewayTest(common.HeatTestCase):
rsrc.FnGetAtt, 'hoge')
self.assertEqual(
'The Referenced Attribute (test_network_gateway hoge) is '
- 'incorrect.', six.text_type(error))
+ 'incorrect.', str(error))
self.mockclient.create_network_gateway.assert_called_once_with({
'network_gateway': {
diff --git a/heat/tests/openstack/neutron/test_neutron_port.py b/heat/tests/openstack/neutron/test_neutron_port.py
index 149539fc8..4e022e1ed 100644
--- a/heat/tests/openstack/neutron/test_neutron_port.py
+++ b/heat/tests/openstack/neutron/test_neutron_port.py
@@ -11,12 +11,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from neutronclient.common import exceptions as qe
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as neutronclient
from oslo_serialization import jsonutils
-import six
from heat.common import exception
from heat.common import template_format
@@ -68,6 +68,18 @@ resources:
'''
+neutron_port_propagate_ul_status_template = '''
+heat_template_version: 2015-04-30
+description: Template to test port Neutron resource
+resources:
+ port:
+ type: OS::Neutron::Port
+ properties:
+ network: abcd1234
+ propagate_uplink_status: True
+'''
+
+
class NeutronPortTest(common.HeatTestCase):
def setUp(self):
@@ -240,6 +252,34 @@ class NeutronPortTest(common.HeatTestCase):
'device_owner': ''
}})
+ def test_port_propagate_uplink_status(self):
+ t = template_format.parse(neutron_port_propagate_ul_status_template)
+ stack = utils.parse_stack(t)
+
+ self.find_mock.return_value = 'abcd1234'
+
+ self.create_mock.return_value = {'port': {
+ "status": "BUILD",
+ "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
+ }}
+
+ self.port_show_mock.return_value = {'port': {
+ "status": "ACTIVE",
+ "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
+ }}
+
+ port = stack['port']
+ scheduler.TaskRunner(port.create)()
+ self.create_mock.assert_called_once_with({'port': {
+ 'network_id': u'abcd1234',
+ 'propagate_uplink_status': True,
+ 'name': utils.PhysName(stack.name, 'port'),
+ 'admin_state_up': True,
+ 'binding:vnic_type': 'normal',
+ 'device_id': '',
+ 'device_owner': ''
+ }})
+
def test_missing_mac_address(self):
t = template_format.parse(neutron_port_with_address_pair_template)
t['resources']['port']['properties']['allowed_address_pairs'][0].pop(
@@ -580,6 +620,7 @@ class NeutronPortTest(common.HeatTestCase):
'ipv4_address_scope': None, 'description': '',
'subnets': [subnet_dict['id']],
'port_security_enabled': True,
+ 'propagate_uplink_status': True,
'tenant_id': '58a61fc3992944ce971404a2ece6ff98',
'tags': [], 'ipv6_address_scope': None,
'project_id': '58a61fc3992944ce971404a2ece6ff98',
@@ -883,6 +924,7 @@ class NeutronPortTest(common.HeatTestCase):
'tenant_id': '30f466e3d14b4251853899f9c26e2b66',
'binding:profile': {},
'port_security_enabled': True,
+ 'propagate_uplink_status': True,
'binding:vnic_type': 'normal',
'fixed_ips': [
{'subnet_id': '02d9608f-8f30-4611-ad02-69855c82457f',
@@ -902,6 +944,7 @@ class NeutronPortTest(common.HeatTestCase):
'admin_state_up': True,
'device_owner': '',
'port_security_enabled': True,
+ 'propagate_uplink_status': True,
'binding:vnic_type': 'normal',
'fixed_ips': [
{'subnet': '02d9608f-8f30-4611-ad02-69855c82457f',
@@ -927,6 +970,7 @@ class UpdatePortTest(common.HeatTestCase):
addr_pair=None,
vnic_type=None)),
('with_no_name', dict(secgrp=['8a2f582a-e1cd-480f-b85d-b02631c10656'],
+ orig_name='original',
name=None,
value_specs={},
fixed_ips=None,
@@ -977,6 +1021,16 @@ class UpdatePortTest(common.HeatTestCase):
fixed_ips=None,
addr_pair=None,
vnic_type='baremetal')),
+ ('virtio_forwarder_vnic', dict(secgrp=None,
+ value_specs={},
+ fixed_ips=None,
+ addr_pair=None,
+ vnic_type='virtio-forwarder')),
+ ('smart_nic_vnic', dict(secgrp=None,
+ value_specs={},
+ fixed_ips=None,
+ addr_pair=None,
+ vnic_type='smart-nic')),
('with_all', dict(secgrp=['8a2f582a-e1cd-480f-b85d-b02631c10656'],
value_specs={},
fixed_ips=[
@@ -990,12 +1044,43 @@ class UpdatePortTest(common.HeatTestCase):
def test_update_port(self):
t = template_format.parse(neutron_port_template)
+ create_name = getattr(self, 'orig_name', None)
+ if create_name is not None:
+ t['resources']['port']['properties']['name'] = create_name
stack = utils.parse_stack(t)
+ def res_id(client, resource, name_or_id, cmd_resource=None):
+ return {
+ 'network': 'net1234',
+ 'subnet': 'sub1234',
+ }[resource]
+
self.patchobject(neutronV20, 'find_resourceid_by_name_or_id',
- return_value='net1234')
- create_port = self.patchobject(neutronclient.Client, 'create_port')
+ side_effect=res_id)
+
+ create_port_result = {
+ 'port': {
+ "status": "BUILD",
+ "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766"
+ }
+ }
+ show_port_result = {
+ 'port': {
+ "status": "ACTIVE",
+ "id": "fc68ea2c-b60b-4b4f-bd82-94ec81110766",
+ "fixed_ips": {
+ "subnet_id": "d0e971a6-a6b4-4f4c-8c88-b75e9c120b7e",
+ "ip_address": "10.0.0.2"
+ }
+ }
+ }
+
+ create_port = self.patchobject(neutronclient.Client, 'create_port',
+ return_value=create_port_result)
update_port = self.patchobject(neutronclient.Client, 'update_port')
+ show_port = self.patchobject(neutronclient.Client, 'show_port',
+ return_value=show_port_result)
+
fake_groups_list = {
'security_groups': [
{
@@ -1012,11 +1097,17 @@ class UpdatePortTest(common.HeatTestCase):
set_tag_mock = self.patchobject(neutronclient.Client, 'replace_tag')
props = {'network_id': u'net1234',
- 'name': str(utils.PhysName(stack.name, 'port')),
+ 'fixed_ips': [{'subnet_id': 'sub1234',
+ 'ip_address': '10.0.3.21'}],
+ 'name': (create_name if create_name is not None else
+ utils.PhysName(stack.name, 'port')),
'admin_state_up': True,
- 'device_owner': u'network:dhcp'}
+ 'device_owner': u'network:dhcp',
+ 'device_id': '',
+ 'binding:vnic_type': 'normal'}
update_props = props.copy()
+ update_props['name'] = getattr(self, 'name', create_name)
update_props['security_groups'] = self.secgrp
update_props['value_specs'] = self.value_specs
update_props['tags'] = ['test_tag']
@@ -1027,23 +1118,28 @@ class UpdatePortTest(common.HeatTestCase):
update_dict = update_props.copy()
+ if update_props['allowed_address_pairs'] is None:
+ update_dict['allowed_address_pairs'] = []
+
if update_props['security_groups'] is None:
- update_dict['security_groups'] = ['default']
+ update_dict['security_groups'] = [
+ '0389f747-7785-4757-b7bb-2ab07e4b09c3',
+ ]
if update_props['name'] is None:
- update_dict['name'] = utils.PhysName(stack.name, 'test_subnet')
+ update_dict['name'] = utils.PhysName(stack.name, 'port')
value_specs = update_dict.pop('value_specs')
if value_specs:
- for value_spec in six.iteritems(value_specs):
+ for value_spec in value_specs.items():
update_dict[value_spec[0]] = value_spec[1]
tags = update_dict.pop('tags')
# create port
port = stack['port']
- self.assertIsNone(scheduler.TaskRunner(port.handle_create)())
- create_port.assset_called_once_with(props)
+ self.assertIsNone(scheduler.TaskRunner(port.create)())
+ create_port.assert_called_once_with({'port': props})
# update port
update_snippet = rsrc_defn.ResourceDefinition(port.name, port.type(),
update_props)
@@ -1051,7 +1147,8 @@ class UpdatePortTest(common.HeatTestCase):
update_snippet, {},
update_props)())
- update_port.assset_called_once_with(update_dict)
+ update_port.assert_called_once_with(
+ 'fc68ea2c-b60b-4b4f-bd82-94ec81110766', {'port': update_dict})
set_tag_mock.assert_called_with('ports', port.resource_id,
{'tags': tags})
# check, that update does not cause of Update Replace
@@ -1069,3 +1166,4 @@ class UpdatePortTest(common.HeatTestCase):
# update with empty prop_diff
scheduler.TaskRunner(port.handle_update, update_snippet, {}, {})()
self.assertEqual(1, update_port.call_count)
+ show_port.assert_called_with('fc68ea2c-b60b-4b4f-bd82-94ec81110766')
diff --git a/heat/tests/openstack/neutron/test_neutron_provider_net.py b/heat/tests/openstack/neutron/test_neutron_provider_net.py
index 3d95714ed..a528c4d4d 100644
--- a/heat/tests/openstack/neutron/test_neutron_provider_net.py
+++ b/heat/tests/openstack/neutron/test_neutron_provider_net.py
@@ -12,7 +12,7 @@
# under the License.
import copy
-import mock
+from unittest import mock
from neutronclient.common import exceptions as qe
from neutronclient.v2_0 import client as neutronclient
diff --git a/heat/tests/openstack/neutron/test_neutron_rbac_policy.py b/heat/tests/openstack/neutron/test_neutron_rbac_policy.py
index 0ee21acf7..765a4caa1 100644
--- a/heat/tests/openstack/neutron/test_neutron_rbac_policy.py
+++ b/heat/tests/openstack/neutron/test_neutron_rbac_policy.py
@@ -12,7 +12,7 @@
# under the License.
-import mock
+from unittest import mock
import yaml
from neutronclient.common import exceptions
diff --git a/heat/tests/openstack/neutron/test_neutron_router.py b/heat/tests/openstack/neutron/test_neutron_router.py
index cf949b167..442668a2b 100644
--- a/heat/tests/openstack/neutron/test_neutron_router.py
+++ b/heat/tests/openstack/neutron/test_neutron_router.py
@@ -12,11 +12,10 @@
# under the License.
import copy
+from unittest import mock
-import mock
from neutronclient.common import exceptions as qe
from neutronclient.v2_0 import client as neutronclient
-import six
from heat.common import exception
from heat.common import template_format
@@ -201,7 +200,7 @@ class NeutronRouterTest(common.HeatTestCase):
exc = self.assertRaises(exception.ResourcePropertyConflict,
rsrc.validate)
self.assertIn('distributed, l3_agent_id/l3_agent_ids',
- six.text_type(exc))
+ str(exc))
# test distributed can not specify l3_agent_ids
props['l3_agent_ids'] = ['id1', 'id2']
stack = utils.parse_stack(t)
@@ -209,7 +208,7 @@ class NeutronRouterTest(common.HeatTestCase):
exc = self.assertRaises(exception.ResourcePropertyConflict,
rsrc.validate)
self.assertIn('distributed, l3_agent_id/l3_agent_ids',
- six.text_type(exc))
+ str(exc))
def test_router_validate_l3_agents(self):
t = template_format.parse(neutron_template)
@@ -222,7 +221,7 @@ class NeutronRouterTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertIn('Non HA routers can only have one L3 agent',
- six.text_type(exc))
+ str(exc))
self.assertIsNone(rsrc.properties.get(rsrc.L3_AGENT_ID))
def test_router_validate_ha_distribute(self):
@@ -240,7 +239,7 @@ class NeutronRouterTest(common.HeatTestCase):
rsrc.reparse()
exc = self.assertRaises(exception.ResourcePropertyConflict,
rsrc.validate)
- self.assertIn('distributed, ha', six.text_type(exc))
+ self.assertIn('distributed, ha', str(exc))
def test_router_validate_ha_l3_agents(self):
t = template_format.parse(neutron_template)
@@ -253,7 +252,7 @@ class NeutronRouterTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertIn('Non HA routers can only have one L3 agent.',
- six.text_type(exc))
+ str(exc))
def test_router(self):
t = template_format.parse(neutron_template)
@@ -609,7 +608,7 @@ class NeutronRouterTest(common.HeatTestCase):
res.validate)
self.assertEqual("At least one of the following properties "
"must be specified: subnet, port.",
- six.text_type(ex))
+ str(ex))
def test_gateway_router(self):
def find_rsrc(resource, name_or_id, cmd_resource=None):
diff --git a/heat/tests/openstack/neutron/test_neutron_security_group.py b/heat/tests/openstack/neutron/test_neutron_security_group.py
index ea269aa22..28a931f51 100644
--- a/heat/tests/openstack/neutron/test_neutron_security_group.py
+++ b/heat/tests/openstack/neutron/test_neutron_security_group.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from neutronclient.common import exceptions as neutron_exc
from neutronclient.neutron import v2_0 as neutronV20
diff --git a/heat/tests/openstack/neutron/test_neutron_security_group_rule.py b/heat/tests/openstack/neutron/test_neutron_security_group_rule.py
index 407f5e9ce..49b427634 100644
--- a/heat/tests/openstack/neutron/test_neutron_security_group_rule.py
+++ b/heat/tests/openstack/neutron/test_neutron_security_group_rule.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.common import exception
from heat.common import template_format
diff --git a/heat/tests/openstack/neutron/test_neutron_segment.py b/heat/tests/openstack/neutron/test_neutron_segment.py
index d0900571b..fa00cf4c9 100644
--- a/heat/tests/openstack/neutron/test_neutron_segment.py
+++ b/heat/tests/openstack/neutron/test_neutron_segment.py
@@ -11,11 +11,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from neutronclient.neutron import v2_0 as neutronV20
from openstack import exceptions
from oslo_utils import excutils
-import six
from heat.common import exception
from heat.common import template_format
@@ -141,7 +141,7 @@ class NeutronSegmentTest(common.HeatTestCase):
errMsg = 'physical_network is required for vlan provider network.'
error = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
- self.assertEqual(errMsg, six.text_type(error))
+ self.assertEqual(errMsg, str(error))
props['physical_network'] = 'physnet'
props['segmentation_id'] = '4095'
@@ -151,7 +151,7 @@ class NeutronSegmentTest(common.HeatTestCase):
'on each physical_network.')
error = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
- self.assertEqual(errMsg, six.text_type(error))
+ self.assertEqual(errMsg, str(error))
def test_validate_flat_type(self):
self.t = template_format.parse(inline_templates.SEGMENT_TEMPLATE)
@@ -163,7 +163,7 @@ class NeutronSegmentTest(common.HeatTestCase):
errMsg = ('segmentation_id is prohibited for flat provider network.')
error = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
- self.assertEqual(errMsg, six.text_type(error))
+ self.assertEqual(errMsg, str(error))
def test_validate_tunnel_type(self):
self.t = template_format.parse(inline_templates.SEGMENT_TEMPLATE)
@@ -175,7 +175,7 @@ class NeutronSegmentTest(common.HeatTestCase):
errMsg = ('physical_network is prohibited for vxlan provider network.')
error = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
- self.assertEqual(errMsg, six.text_type(error))
+ self.assertEqual(errMsg, str(error))
def test_segment_get_attr(self):
segment_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
diff --git a/heat/tests/openstack/neutron/test_neutron_subnet.py b/heat/tests/openstack/neutron/test_neutron_subnet.py
index 4804a376f..a90e01230 100644
--- a/heat/tests/openstack/neutron/test_neutron_subnet.py
+++ b/heat/tests/openstack/neutron/test_neutron_subnet.py
@@ -12,11 +12,11 @@
# under the License.
import copy
-import mock
+from unittest import mock
+
from neutronclient.common import exceptions as qe
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as neutronclient
-import six
from heat.common import exception
from heat.common import template_format
@@ -553,7 +553,7 @@ class NeutronSubnetTest(common.HeatTestCase):
"resources.sub_net.properties.host_routes[0].destination: "
"Error validating value 'invalid_cidr': Invalid net cidr "
"invalid IPNetwork invalid_cidr ")
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
def test_ipv6_validate_ra_mode(self):
t = template_format.parse(neutron_template)
@@ -568,7 +568,7 @@ class NeutronSubnetTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertEqual("When both ipv6_ra_mode and ipv6_address_mode are "
- "set, they must be equal.", six.text_type(ex))
+ "set, they must be equal.", str(ex))
def test_ipv6_validate_ip_version(self):
t = template_format.parse(neutron_template)
@@ -583,7 +583,7 @@ class NeutronSubnetTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
self.assertEqual("ipv6_ra_mode and ipv6_address_mode are not "
- "supported for ipv4.", six.text_type(ex))
+ "supported for ipv4.", str(ex))
def test_validate_both_subnetpool_cidr(self):
self.patchobject(neutronV20, 'find_resourceid_by_name_or_id',
@@ -600,7 +600,7 @@ class NeutronSubnetTest(common.HeatTestCase):
rsrc.validate)
msg = ("Cannot define the following properties at the same time: "
"subnetpool, cidr.")
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
def test_validate_none_subnetpool_cidr(self):
t = template_format.parse(neutron_template)
@@ -614,7 +614,7 @@ class NeutronSubnetTest(common.HeatTestCase):
rsrc.validate)
msg = ("At least one of the following properties must be specified: "
"subnetpool, cidr.")
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
def test_validate_subnetpool_ref_with_cidr(self):
t = template_format.parse(neutron_template)
@@ -635,7 +635,7 @@ class NeutronSubnetTest(common.HeatTestCase):
rsrc.validate)
msg = ("Cannot define the following properties at the same time: "
"subnetpool, cidr.")
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
def test_validate_subnetpool_ref_no_cidr(self):
t = template_format.parse(neutron_template)
@@ -667,7 +667,7 @@ class NeutronSubnetTest(common.HeatTestCase):
rsrc.validate)
msg = ("Cannot define the following properties at the same time: "
"prefixlen, cidr.")
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
def test_deprecated_network_id(self):
template = """
diff --git a/heat/tests/openstack/neutron/test_neutron_subnetpool.py b/heat/tests/openstack/neutron/test_neutron_subnetpool.py
index 0cf5c5bec..d6e28235a 100644
--- a/heat/tests/openstack/neutron/test_neutron_subnetpool.py
+++ b/heat/tests/openstack/neutron/test_neutron_subnetpool.py
@@ -14,7 +14,6 @@
from neutronclient.common import exceptions as qe
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as neutronclient
-import six
from heat.common import exception
from heat.common import template_format
@@ -54,7 +53,7 @@ class NeutronSubnetPoolTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.sub_pool: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
else:
self.patchobject(neutronclient.Client, 'create_subnetpool',
return_value={'subnetpool': {
@@ -80,7 +79,7 @@ class NeutronSubnetPoolTest(common.HeatTestCase):
'min_prefixlen=28.')
error = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
- self.assertEqual(errMessage, six.text_type(error))
+ self.assertEqual(errMessage, str(error))
def test_validate_prefixlen_default_gt_max(self):
self.t = template_format.parse(inline_templates.SPOOL_TEMPLATE)
@@ -93,7 +92,7 @@ class NeutronSubnetPoolTest(common.HeatTestCase):
'default_prefixlen=28.')
error = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
- self.assertEqual(errMessage, six.text_type(error))
+ self.assertEqual(errMessage, str(error))
def test_validate_prefixlen_min_gt_default(self):
self.t = template_format.parse(inline_templates.SPOOL_TEMPLATE)
@@ -106,7 +105,7 @@ class NeutronSubnetPoolTest(common.HeatTestCase):
'default_prefixlen=24.')
error = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
- self.assertEqual(errMessage, six.text_type(error))
+ self.assertEqual(errMessage, str(error))
def test_validate_minimal(self):
self.t = template_format.parse(inline_templates.SPOOL_MINIMAL_TEMPLATE)
@@ -235,7 +234,7 @@ class NeutronSubnetPoolTest(common.HeatTestCase):
rsrc.handle_update,
update_snippet, {}, props)
- self.assertEqual(errMessage, six.text_type(error))
+ self.assertEqual(errMessage, str(error))
update_subnetpool.assert_not_called()
props = {
diff --git a/heat/tests/openstack/neutron/test_neutron_trunk.py b/heat/tests/openstack/neutron/test_neutron_trunk.py
index 4fe5f4ec0..4d5a2f8b2 100644
--- a/heat/tests/openstack/neutron/test_neutron_trunk.py
+++ b/heat/tests/openstack/neutron/test_neutron_trunk.py
@@ -13,7 +13,6 @@
# under the License.
import copy
-import six
from oslo_log import log as logging
@@ -261,7 +260,7 @@ class NeutronTrunkTest(common.HeatTestCase):
self.assertIn(
'Went to status DEGRADED due to',
- six.text_type(e))
+ str(e))
def test_create_parent_port_by_name(self):
t = template_format.parse(create_template)
diff --git a/heat/tests/openstack/neutron/test_neutron_vpnservice.py b/heat/tests/openstack/neutron/test_neutron_vpnservice.py
index f34485732..003d5bdd6 100644
--- a/heat/tests/openstack/neutron/test_neutron_vpnservice.py
+++ b/heat/tests/openstack/neutron/test_neutron_vpnservice.py
@@ -12,8 +12,7 @@
# under the License.
import copy
-import mock
-import six
+from unittest import mock
from neutronclient.common import exceptions
from neutronclient.neutron import v2_0 as neutronV20
@@ -197,7 +196,7 @@ class VPNServiceTest(common.HeatTestCase):
self.assertEqual(
'ResourceInError: resources.vpnservice: '
'Went to status ERROR due to "Error in VPNService"',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mockclient.create_vpnservice.assert_called_once_with(
@@ -221,7 +220,7 @@ class VPNServiceTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.vpnservice: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mockclient.create_vpnservice.assert_called_once_with(
@@ -278,7 +277,7 @@ class VPNServiceTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.vpnservice: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.mockclient.create_vpnservice.assert_called_once_with(
@@ -319,7 +318,7 @@ class VPNServiceTest(common.HeatTestCase):
self.assertEqual(
'The Referenced Attribute (vpnservice non-existent_property) is '
'incorrect.',
- six.text_type(error))
+ str(error))
self.mockclient.create_vpnservice.assert_called_once_with(
self.VPN_SERVICE_CONF)
@@ -426,7 +425,7 @@ class IPsecSiteConnectionTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.ipsec_site_connection: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mockclient.create_ipsec_site_connection.assert_called_once_with(
@@ -445,7 +444,7 @@ class IPsecSiteConnectionTest(common.HeatTestCase):
self.assertEqual(
'ResourceInError: resources.ipsec_site_connection: '
'Went to status ERROR due to "Error in IPsecSiteConnection"',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mockclient.create_ipsec_site_connection.assert_called_once_with(
@@ -505,7 +504,7 @@ class IPsecSiteConnectionTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.ipsec_site_connection: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.mockclient.create_ipsec_site_connection.assert_called_once_with(
@@ -556,7 +555,7 @@ class IPsecSiteConnectionTest(common.HeatTestCase):
self.assertEqual(
'The Referenced Attribute (ipsec_site_connection '
'non-existent_property) is incorrect.',
- six.text_type(error))
+ str(error))
self.mockclient.create_ipsec_site_connection.assert_called_once_with(
self.IPSEC_SITE_CONNECTION_CONF)
@@ -645,7 +644,7 @@ class IKEPolicyTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.ikepolicy: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mockclient.create_ikepolicy.assert_called_once_with(
@@ -691,7 +690,7 @@ class IKEPolicyTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.ikepolicy: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.mockclient.create_ikepolicy.assert_called_once_with(
@@ -727,7 +726,7 @@ class IKEPolicyTest(common.HeatTestCase):
self.assertEqual(
'The Referenced Attribute (ikepolicy non-existent_property) is '
'incorrect.',
- six.text_type(error))
+ str(error))
self.mockclient.create_ikepolicy.assert_called_once_with(
self.IKE_POLICY_CONF)
@@ -737,21 +736,30 @@ class IKEPolicyTest(common.HeatTestCase):
rsrc = self.create_ikepolicy()
self.mockclient.update_ikepolicy.return_value = None
+ new_props = {
+ 'name': 'New IKEPolicy',
+ 'auth_algorithm': 'sha512',
+ 'description': 'New description',
+ 'encryption_algorithm': 'aes-256',
+ 'lifetime': {
+ 'units': 'seconds',
+ 'value': 1800
+ },
+ 'pfs': 'group2',
+ 'ike_version': 'v2'
+ }
+ update_body = {
+ 'ikepolicy': new_props
+ }
scheduler.TaskRunner(rsrc.create)()
props = dict(rsrc.properties)
- props['name'] = 'New IKEPolicy'
- props['auth_algorithm'] = 'sha512'
+ props.update(new_props)
+
update_template = rsrc.t.freeze(properties=props)
scheduler.TaskRunner(rsrc.update, update_template)()
self.mockclient.create_ikepolicy.assert_called_once_with(
self.IKE_POLICY_CONF)
- update_body = {
- 'ikepolicy': {
- 'name': 'New IKEPolicy',
- 'auth_algorithm': 'sha512'
- }
- }
self.mockclient.update_ikepolicy.assert_called_once_with(
'ike123', update_body)
@@ -818,7 +826,7 @@ class IPsecPolicyTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.ipsecpolicy: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.mockclient.create_ipsecpolicy.assert_called_once_with(
@@ -864,7 +872,7 @@ class IPsecPolicyTest(common.HeatTestCase):
self.assertEqual(
'NeutronClientException: resources.ipsecpolicy: '
'An unknown exception occurred.',
- six.text_type(error))
+ str(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.mockclient.create_ipsecpolicy.assert_called_once_with(
@@ -900,7 +908,7 @@ class IPsecPolicyTest(common.HeatTestCase):
self.assertEqual(
'The Referenced Attribute (ipsecpolicy non-existent_property) is '
'incorrect.',
- six.text_type(error))
+ str(error))
self.mockclient.create_ipsecpolicy.assert_called_once_with(
self.IPSEC_POLICY_CONF)
diff --git a/heat/tests/openstack/neutron/test_qos.py b/heat/tests/openstack/neutron/test_qos.py
index f740d586b..d46eb64f4 100644
--- a/heat/tests/openstack/neutron/test_qos.py
+++ b/heat/tests/openstack/neutron/test_qos.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.common import template_format
from heat.engine.clients.os import neutron
@@ -58,6 +58,18 @@ resources:
tenant_id: d66c74c01d6c41b9846088c1ad9634d0
'''
+minimum_bandwidth_rule_template = '''
+heat_template_version: 2016-04-08
+description: This template to define a neutron minimum bandwidth rule.
+resources:
+ my_minimum_bandwidth_rule:
+ type: OS::Neutron::QoSMinimumBandwidthRule
+ properties:
+ policy: 477e8273-60a7-4c41-b683-fdb0bc7cd151
+ min_kbps: 1000
+ tenant_id: d66c74c01d6c41b9846088c1ad9634d0
+'''
+
class NeutronQoSPolicyTest(common.HeatTestCase):
def setUp(self):
@@ -392,3 +404,114 @@ class NeutronQoSDscpMarkingRuleTest(common.HeatTestCase):
self.neutronclient.show_dscp_marking_rule.assert_called_once_with(
self.dscp_marking_rule.resource_id, self.policy_id)
+
+
+class NeutronQoSMinimumBandwidthRuleTest(common.HeatTestCase):
+ def setUp(self):
+ super(NeutronQoSMinimumBandwidthRuleTest, self).setUp()
+
+ self.ctx = utils.dummy_context()
+ tpl = template_format.parse(minimum_bandwidth_rule_template)
+ self.stack = stack.Stack(
+ self.ctx,
+ 'neutron_minimum_bandwidth_rule_test',
+ template.Template(tpl)
+ )
+
+ self.neutronclient = mock.MagicMock()
+ self.patchobject(neutron.NeutronClientPlugin, 'has_extension',
+ return_value=True)
+ self.minimum_bandwidth_rule = self.stack['my_minimum_bandwidth_rule']
+ self.minimum_bandwidth_rule.client = mock.MagicMock(
+ return_value=self.neutronclient)
+ self.find_mock = self.patchobject(
+ neutron.neutronV20,
+ 'find_resourceid_by_name_or_id')
+ self.policy_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
+ self.find_mock.return_value = self.policy_id
+
+ def test_rule_handle_create(self):
+ rule = {
+ 'minimum_bandwidth_rule': {
+ 'id': 'cf0eab12-ef8b-4a62-98d0-70576583c17a',
+ 'min_kbps': 1000,
+ 'direction': 'egress',
+ 'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0'
+ }
+ }
+
+ create_props = {'min_kbps': 1000,
+ 'direction': 'egress',
+ 'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0'}
+ self.neutronclient.create_minimum_bandwidth_rule.return_value = rule
+
+ self.minimum_bandwidth_rule.handle_create()
+ self.assertEqual('cf0eab12-ef8b-4a62-98d0-70576583c17a',
+ self.minimum_bandwidth_rule.resource_id)
+ self.neutronclient.create_minimum_bandwidth_rule.\
+ assert_called_once_with(
+ self.policy_id,
+ {'minimum_bandwidth_rule': create_props})
+
+ def test_rule_handle_delete(self):
+ rule_id = 'cf0eab12-ef8b-4a62-98d0-70576583c17a'
+ self.minimum_bandwidth_rule.resource_id = rule_id
+ self.neutronclient.delete_minimum_bandwidth_rule.return_value = None
+
+ self.assertIsNone(self.minimum_bandwidth_rule.handle_delete())
+ self.neutronclient.delete_minimum_bandwidth_rule.\
+ assert_called_once_with(rule_id, self.policy_id)
+
+ def test_rule_handle_delete_not_found(self):
+ rule_id = 'cf0eab12-ef8b-4a62-98d0-70576583c17a'
+ self.minimum_bandwidth_rule.resource_id = rule_id
+ not_found = self.neutronclient.NotFound
+ self.neutronclient.delete_minimum_bandwidth_rule.side_effect =\
+ not_found
+
+ self.assertIsNone(self.minimum_bandwidth_rule.handle_delete())
+ self.neutronclient.delete_minimum_bandwidth_rule.\
+ assert_called_once_with(rule_id, self.policy_id)
+
+ def test_rule_handle_delete_resource_id_is_none(self):
+ self.minimum_bandwidth_rule.resource_id = None
+ self.assertIsNone(self.minimum_bandwidth_rule.handle_delete())
+ self.assertEqual(0,
+ self.neutronclient.minimum_bandwidth_rule.call_count)
+
+ def test_rule_handle_update(self):
+ rule_id = 'cf0eab12-ef8b-4a62-98d0-70576583c17a'
+ self.minimum_bandwidth_rule.resource_id = rule_id
+
+ prop_diff = {
+ 'min_kbps': 500
+ }
+
+ self.minimum_bandwidth_rule.handle_update(
+ json_snippet={},
+ tmpl_diff={},
+ prop_diff=prop_diff.copy())
+
+ self.neutronclient.update_minimum_bandwidth_rule.\
+ assert_called_once_with(
+ rule_id,
+ self.policy_id,
+ {'minimum_bandwidth_rule': prop_diff})
+
+ def test_rule_get_attr(self):
+ self.minimum_bandwidth_rule.resource_id = 'test rule'
+ rule = {
+ 'minimum_bandwidth_rule': {
+ 'id': 'cf0eab12-ef8b-4a62-98d0-70576583c17a',
+ 'min_kbps': 1000,
+ 'direction': 'egress',
+ 'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0'
+ }
+ }
+ self.neutronclient.show_minimum_bandwidth_rule.return_value = rule
+
+ self.assertEqual(rule['minimum_bandwidth_rule'],
+ self.minimum_bandwidth_rule.FnGetAtt('show'))
+
+ self.neutronclient.show_minimum_bandwidth_rule.assert_called_once_with(
+ self.minimum_bandwidth_rule.resource_id, self.policy_id)
diff --git a/heat/tests/openstack/neutron/test_quota.py b/heat/tests/openstack/neutron/test_quota.py
index d5fab0b73..4cf73ebfa 100644
--- a/heat/tests/openstack/neutron/test_quota.py
+++ b/heat/tests/openstack/neutron/test_quota.py
@@ -10,8 +10,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
@@ -80,7 +79,7 @@ class NeutronQuotaTest(common.HeatTestCase):
def _test_validate(self, resource, error_msg):
exc = self.assertRaises(exception.StackValidationFailed,
resource.validate)
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
def test_miss_all_quotas(self):
my_quota = self.stack['my_quota']
diff --git a/heat/tests/openstack/neutron/test_sfc/test_flow_classifier.py b/heat/tests/openstack/neutron/test_sfc/test_flow_classifier.py
index eed5f2793..c4a0fd4e5 100644
--- a/heat/tests/openstack/neutron/test_sfc/test_flow_classifier.py
+++ b/heat/tests/openstack/neutron/test_sfc/test_flow_classifier.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.engine.resources.openstack.neutron.sfc import flow_classifier
from heat.engine import stack
@@ -201,4 +201,4 @@ class FlowClassifierTest(common.HeatTestCase):
{
'name': 'name-updated',
'description': 'description-updated',
- }, self.test_resource.resource_id)
+ }, self.test_resource.resource_id)
diff --git a/heat/tests/openstack/neutron/test_sfc/test_port_chain.py b/heat/tests/openstack/neutron/test_sfc/test_port_chain.py
index ead7cceff..233bb1549 100644
--- a/heat/tests/openstack/neutron/test_sfc/test_port_chain.py
+++ b/heat/tests/openstack/neutron/test_sfc/test_port_chain.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.engine.clients.os import neutron
from heat.engine.resources.openstack.neutron.sfc import port_chain
@@ -168,4 +168,4 @@ class PortChainTest(common.HeatTestCase):
'description': 'description-updated',
'port_pair_groups': ['port_pair_group_2'],
'flow_classifiers': ['flow_classifier2'],
- }, self.test_resource.resource_id)
+ }, self.test_resource.resource_id)
diff --git a/heat/tests/openstack/neutron/test_sfc/test_port_pair.py b/heat/tests/openstack/neutron/test_sfc/test_port_pair.py
index 1d86ae244..bb4fbf856 100644
--- a/heat/tests/openstack/neutron/test_sfc/test_port_pair.py
+++ b/heat/tests/openstack/neutron/test_sfc/test_port_pair.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.engine.resources.openstack.neutron.sfc import port_pair
from heat.engine import stack
@@ -27,8 +27,8 @@ sample_template = {
'properties': {
'name': 'test_port_pair',
'description': 'desc',
- 'ingress': '6af055d3-26f6-48dd-a597-7611d7e58d35',
- 'egress': '6af055d3-26f6-48dd-a597-7611d7e58d35',
+ 'ingress': '6af055d3-26f6-48dd-a597-7611d7e58d35',
+ 'egress': '6af055d3-26f6-48dd-a597-7611d7e58d35',
'service_function_parameters': {'correlation': None}
}
}
@@ -155,4 +155,4 @@ class PortPairTest(common.HeatTestCase):
{
'name': 'name-updated',
'description': 'description-updated',
- }, self.test_resource.resource_id)
+ }, self.test_resource.resource_id)
diff --git a/heat/tests/openstack/neutron/test_sfc/test_port_pair_group.py b/heat/tests/openstack/neutron/test_sfc/test_port_pair_group.py
index f9590b95d..0b150aec8 100644
--- a/heat/tests/openstack/neutron/test_sfc/test_port_pair_group.py
+++ b/heat/tests/openstack/neutron/test_sfc/test_port_pair_group.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.engine.clients.os import neutron
from heat.engine.resources.openstack.neutron.sfc import port_pair_group
@@ -28,7 +28,7 @@ sample_template = {
'properties': {
'name': 'test_port_pair_group',
'description': 'desc',
- 'port_pairs': ['port1']
+ 'port_pairs': ['port1']
}
}
}
@@ -155,7 +155,7 @@ class PortPairGroupTest(common.HeatTestCase):
{
'name': 'name-updated',
'description': 'description-updated',
- }, self.test_resource.resource_id)
+ }, self.test_resource.resource_id)
def test_resource_handle_update_port_pairs(self):
self.patchobject(self.test_client_plugin,
@@ -181,4 +181,4 @@ class PortPairGroupTest(common.HeatTestCase):
'name': 'name',
'description': 'description',
'port_pairs': ['port2'],
- }, self.test_resource.resource_id)
+ }, self.test_resource.resource_id)
diff --git a/heat/tests/openstack/neutron/test_taas/test_tap_flow.py b/heat/tests/openstack/neutron/test_taas/test_tap_flow.py
index 52500a1f2..ffff2fa59 100644
--- a/heat/tests/openstack/neutron/test_taas/test_tap_flow.py
+++ b/heat/tests/openstack/neutron/test_taas/test_tap_flow.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.engine.resources.openstack.neutron.taas import tap_flow
from heat.engine import stack
@@ -27,8 +27,8 @@ sample_template = {
'properties': {
'name': 'test_tap_flow',
'description': 'desc',
- 'port': '6af055d3-26f6-48dd-a597-7611d7e58d35',
- 'tap_service': '6af055d3-26f6-48dd-a597-7611d7e58d35',
+ 'port': '6af055d3-26f6-48dd-a597-7611d7e58d35',
+ 'tap_service': '6af055d3-26f6-48dd-a597-7611d7e58d35',
'direction': 'BOTH',
'vlan_filter': '1-5,9,18,27-30,99-108,4000-4095'
}
@@ -177,4 +177,4 @@ class TapFlowTest(common.HeatTestCase):
'tap_service': '6af055d3-26f6-48dd-a597-7611d7e58d35',
'direction': 'BOTH',
'vlan_filter': '1-5,9,18,27-30,99-108,4000-4095',
- }, self.test_resource.resource_id)
+ }, self.test_resource.resource_id)
diff --git a/heat/tests/openstack/neutron/test_taas/test_tap_service.py b/heat/tests/openstack/neutron/test_taas/test_tap_service.py
index 23db0aca8..322c910b6 100644
--- a/heat/tests/openstack/neutron/test_taas/test_tap_service.py
+++ b/heat/tests/openstack/neutron/test_taas/test_tap_service.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.engine.resources.openstack.neutron.taas import tap_service
from heat.engine import stack
@@ -27,7 +27,7 @@ sample_template = {
'properties': {
'name': 'test_tap_service',
'description': 'desc',
- 'port': '6af055d3-26f6-48dd-a597-7611d7e58d35',
+ 'port': '6af055d3-26f6-48dd-a597-7611d7e58d35',
}
}
}
@@ -149,4 +149,4 @@ class TapServiceTest(common.HeatTestCase):
'name': 'name-updated',
'description': 'description-updated',
'port': '6af055d3-26f6-48dd-a597-7611d7e58d35',
- }, self.test_resource.resource_id)
+ }, self.test_resource.resource_id)
diff --git a/heat/tests/openstack/nova/fakes.py b/heat/tests/openstack/nova/fakes.py
index 03a00d29a..d931b8b69 100644
--- a/heat/tests/openstack/nova/fakes.py
+++ b/heat/tests/openstack/nova/fakes.py
@@ -14,15 +14,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
+from unittest import mock
+
from novaclient import client as base_client
from novaclient import exceptions as nova_exceptions
import requests
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
from heat.tests import fakes
-
NOVA_API_VERSION = "2.1"
Client = base_client.Client(NOVA_API_VERSION).__class__
@@ -45,16 +45,18 @@ class FakeClient(fakes.FakeClient, Client):
class FakeSessionClient(base_client.SessionClient):
- def __init__(self, *args, **kwargs):
+ def __init__(self, *args, **kwargs):
super(FakeSessionClient, self).__init__(*args, **kwargs)
self.callstack = []
def request(self, url, method, **kwargs):
# Check that certain things are called correctly
if method in ['GET', 'DELETE']:
- assert 'body' not in kwargs
+ if 'body' in kwargs:
+ raise AssertionError('Request body in %s' % method)
elif method == 'PUT':
- assert 'body' in kwargs
+ if 'body' not in kwargs:
+ raise AssertionError('No request body in %s' % method)
# Call the method
args = urlparse.parse_qsl(urlparse.urlparse(url)[4])
@@ -111,6 +113,8 @@ class FakeSessionClient(base_client.SessionClient):
"accessIPv6": "",
"metadata": {"Server Label": "Web Head 1",
"Image Version": "2.1"}},
+
+ # 1
{"id": "5678",
"name": "sample-server2",
"OS-EXT-AZ:availability_zone": "nova2",
@@ -135,6 +139,7 @@ class FakeSessionClient(base_client.SessionClient):
"OS-EXT-IPS-MAC:mac_addr":
"fa:16:3e:8c:44:cc"}]},
"metadata": {}},
+ # 2
{"id": "9101",
"name": "hard-reboot",
"OS-EXT-SRV-ATTR:instance_name":
@@ -152,6 +157,7 @@ class FakeSessionClient(base_client.SessionClient):
"private": [{"version": 4,
"addr": "10.13.12.13"}]},
"metadata": {"Server Label": "DB 1"}},
+ # 3
{"id": "9102",
"name": "server-with-no-ip",
"OS-EXT-SRV-ATTR:instance_name":
@@ -164,6 +170,7 @@ class FakeSessionClient(base_client.SessionClient):
"accessIPv6": "",
"addresses": {"empty_net": []},
"metadata": {"Server Label": "DB 1"}},
+ # 4
{"id": "9999",
"name": "sample-server3",
"OS-EXT-SRV-ATTR:instance_name":
@@ -184,6 +191,7 @@ class FakeSessionClient(base_client.SessionClient):
"os-extended-volumes:volumes_attached":
[{"id":
"66359157-dace-43ab-a7ed-a7e7cd7be59d"}]},
+ # 5
{"id": 56789,
"name": "server-with-metadata",
"OS-EXT-SRV-ATTR:instance_name":
@@ -200,6 +208,74 @@ class FakeSessionClient(base_client.SessionClient):
"addr": "5.6.9.8"}],
"private": [{"version": 4,
"addr": "10.13.12.13"}]},
+ "metadata": {'test': '123', 'this': 'that'}},
+ # 6
+ {"id": "WikiDatabase",
+ "name": "server-with-metadata",
+ "OS-EXT-STS:task_state": None,
+ "image": {"id": 2, "name": "sample image"},
+ "flavor": {"id": 1, "name": "256 MB Server"},
+ "hostId": "9e107d9d372bb6826bd81d3542a419d6",
+ "status": "ACTIVE",
+ "accessIPv4": "192.0.2.0",
+ "accessIPv6": "::babe:4317:0A83",
+ "addresses": {"public": [{"version": 4,
+ "addr": "4.5.6.7"},
+ {"version": 4,
+ "addr": "5.6.9.8"}],
+ "private": [{"version": 4,
+ "addr": "10.13.12.13"}]},
+ "metadata": {'test': '123', 'this': 'that'}},
+ # 7
+ {"id": "InstanceInResize",
+ "name": "server-with-metadata",
+ "OS-EXT-STS:task_state": 'resize_finish',
+ "image": {"id": 2, "name": "sample image"},
+ "flavor": {"id": 1, "name": "256 MB Server"},
+ "hostId": "9e107d9d372bb6826bd81d3542a419d6",
+ "status": "ACTIVE",
+ "accessIPv4": "192.0.2.0",
+ "accessIPv6": "::babe:4317:0A83",
+ "addresses": {"public": [{"version": 4,
+ "addr": "4.5.6.7"},
+ {"version": 4,
+ "addr": "5.6.9.8"}],
+ "private": [{"version": 4,
+ "addr": "10.13.12.13"}]},
+ "metadata": {'test': '123', 'this': 'that'}},
+ # 8
+ {"id": "InstanceInActive",
+ "name": "server-with-metadata",
+ "OS-EXT-STS:task_state": 'active',
+ "image": {"id": 2, "name": "sample image"},
+ "flavor": {"id": 1, "name": "256 MB Server"},
+ "hostId": "9e107d9d372bb6826bd81d3542a419d6",
+ "status": "ACTIVE",
+ "accessIPv4": "192.0.2.0",
+ "accessIPv6": "::babe:4317:0A83",
+ "addresses": {"public": [{"version": 4,
+ "addr": "4.5.6.7"},
+ {"version": 4,
+ "addr": "5.6.9.8"}],
+ "private": [{"version": 4,
+ "addr": "10.13.12.13"}]},
+ "metadata": {'test': '123', 'this': 'that'}},
+ # 9
+ {"id": "AnotherServer",
+ "name": "server-with-metadata",
+ "OS-EXT-STS:task_state": 'active',
+ "image": {"id": 2, "name": "sample image"},
+ "flavor": {"id": 1, "name": "256 MB Server"},
+ "hostId": "9e107d9d372bb6826bd81d3542a419d6",
+ "status": "ACTIVE",
+ "accessIPv4": "192.0.2.0",
+ "accessIPv6": "::babe:4317:0A83",
+ "addresses": {"public": [{"version": 4,
+ "addr": "4.5.6.7"},
+ {"version": 4,
+ "addr": "5.6.9.8"}],
+ "private": [{"version": 4,
+ "addr": "10.13.12.13"}]},
"metadata": {'test': '123', 'this': 'that'}}]})
def get_servers_1234(self, **kw):
@@ -214,6 +290,22 @@ class FakeSessionClient(base_client.SessionClient):
r = {'server': self.get_servers_detail()[1]['servers'][0]}
return (200, r)
+ def get_servers_WikiDatabase(self, **kw):
+ r = {'server': self.get_servers_detail()[1]['servers'][6]}
+ return (200, r)
+
+ def get_servers_InstanceInResize(self, **kw):
+ r = {'server': self.get_servers_detail()[1]['servers'][7]}
+ return (200, r)
+
+ def get_servers_InstanceInActive(self, **kw):
+ r = {'server': self.get_servers_detail()[1]['servers'][8]}
+ return (200, r)
+
+ def get_servers_AnotherServer(self, **kw):
+ r = {'server': self.get_servers_detail()[1]['servers'][9]}
+ return (200, r)
+
def get_servers_WikiServerOne1(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][0]}
return (200, r)
@@ -229,6 +321,9 @@ class FakeSessionClient(base_client.SessionClient):
def delete_servers_1234(self, **kw):
return (202, None)
+ def delete_servers_5678(self, **kw):
+ return (202, None)
+
def get_servers_9999(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][4]}
return (200, r)
@@ -244,21 +339,28 @@ class FakeSessionClient(base_client.SessionClient):
def post_servers_1234_action(self, body, **kw):
_body = None
resp = 202
- assert len(body.keys()) == 1
+ if len(body.keys()) != 1:
+ raise AssertionError('No keys in request body')
action = next(iter(body))
+ keys = list(body[action].keys()) if body[action] is not None else None
if action == 'reboot':
- assert list(body[action].keys()) == ['type']
- assert body[action]['type'] in ['HARD', 'SOFT']
+ if keys != ['type']:
+ raise AssertionError('Unexpection action keys for %s: %s' %
+ (action, keys))
+ if body[action]['type'] not in ['HARD', 'SOFT']:
+ raise AssertionError('Unexpected reboot type %s' %
+ body[action]['type'])
elif action == 'rebuild':
- keys = list(body[action].keys())
if 'adminPass' in keys:
keys.remove('adminPass')
- assert keys == ['imageRef']
+ if keys != ['imageRef']:
+ raise AssertionError('Unexpection action keys for %s: %s' %
+ (action, keys))
_body = self.get_servers_1234()[1]
- elif action == 'resize':
- assert list(body[action].keys()) == ['flavorRef']
elif action == 'confirmResize':
- assert body[action] is None
+ if body[action] is not None:
+ raise AssertionError('Unexpected data for confirmResize: %s' %
+ body[action])
# This one method returns a different response code
return (204, None)
elif action in ['revertResize',
@@ -266,45 +368,53 @@ class FakeSessionClient(base_client.SessionClient):
'rescue', 'unrescue',
'suspend', 'resume',
'lock', 'unlock',
- ]:
- assert body[action] is None
- elif action == 'addFixedIp':
- assert list(body[action].keys()) == ['networkId']
- elif action in ['removeFixedIp',
- 'addFloatingIp',
- 'removeFloatingIp',
- ]:
- assert list(body[action].keys()) == ['address']
- elif action == 'createImage':
- assert set(body[action].keys()) == set(['name', 'metadata'])
- resp = {"status": 202,
- "location": "http://blah/images/456"}
- elif action == 'changePassword':
- assert list(body[action].keys()) == ['adminPass']
- elif action == 'os-getConsoleOutput':
- assert list(body[action].keys()) == ['length']
- return (202, {'output': 'foo'})
- elif action == 'os-getVNCConsole':
- assert list(body[action].keys()) == ['type']
- elif action == 'os-migrateLive':
- assert set(body[action].keys()) == set(['host',
- 'block_migration',
- 'disk_over_commit'])
- elif action == 'forceDelete':
- assert body is not None
+ 'forceDelete']:
+ if body[action] is not None:
+ raise AssertionError('Unexpected data for %s: %s' %
+ (action, body[action]))
else:
- raise AssertionError("Unexpected server action: %s" % action)
+ expected_keys = {
+ 'resize': {'flavorRef'},
+ 'addFixedIp': {'networkId'},
+ 'removeFixedIp': {'address'},
+ 'addFloatingIp': {'address'},
+ 'removeFloatingp': {'address'},
+ 'createImage': {'name', 'metadata'},
+ 'changePassword': {'adminPass'},
+ 'os-getConsoleOutput': {'length'},
+ 'os-getVNCConsole': {'type'},
+ 'os-migrateLive': {'host', 'block_migration',
+ 'disk_over_commit'},
+ }
+
+ if action in expected_keys:
+ if set(keys) != set(expected_keys[action]):
+ raise AssertionError('Unexpection action keys for %s: %s' %
+ (action, keys))
+ else:
+ raise AssertionError("Unexpected server action: %s" % action)
+
+ if action == 'createImage':
+ resp = {"status": 202,
+ "location": "http://blah/images/456"}
+ if action == 'os-getConsoleOutput':
+ return (202, {'output': 'foo'})
+
return (resp, _body)
def post_servers_5678_action(self, body, **kw):
_body = None
resp = 202
- assert len(body.keys()) == 1
+ if len(body.keys()) != 1:
+ raise AssertionError("No action in body")
action = next(iter(body))
if action in ['addFloatingIp',
'removeFloatingIp',
]:
- assert list(body[action].keys()) == ['address']
+ keys = list(body[action].keys())
+ if keys != ['address']:
+ raise AssertionError('Unexpection action keys for %s: %s' %
+ (action, keys))
return (resp, _body)
@@ -347,6 +457,19 @@ class FakeSessionClient(base_client.SessionClient):
'OS-FLV-EXT-DATA:ephemeral': 30}})
#
+ # Interfaces
+ #
+
+ def get_servers_5678_os_interface(self, **kw):
+ return (200, {'interfaceAttachments':
+ [{"fixed_ips":
+ [{"ip_address": "10.0.0.1",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }],
+ "port_id": "ce531f90-199f-48c0-816c-13e38010b442"
+ }]})
+
+ #
# Floating ips
#
diff --git a/heat/tests/openstack/nova/test_flavor.py b/heat/tests/openstack/nova/test_flavor.py
index 7799fbdbb..fadab41d3 100644
--- a/heat/tests/openstack/nova/test_flavor.py
+++ b/heat/tests/openstack/nova/test_flavor.py
@@ -11,9 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
-from heat.engine.clients.os import nova as novac
from heat.engine import stack
from heat.engine import template
from heat.tests import common
@@ -42,8 +41,6 @@ flavor_template = {
class NovaFlavorTest(common.HeatTestCase):
def setUp(self):
super(NovaFlavorTest, self).setUp()
- self.patchobject(novac.NovaClientPlugin, 'has_extension',
- return_value=True)
self.ctx = utils.dummy_context()
def create_flavor(self, with_name_id=False, is_public=True):
@@ -154,7 +151,7 @@ class NovaFlavorTest(common.HeatTestCase):
test_tenants_add = [mock.call(value, 'new_foo'),
mock.call(value, 'new_bar')]
test_add = self.my_flavor.client().flavor_access.add_tenant_access
- self.assertItemsEqual(test_tenants_add,
+ self.assertCountEqual(test_tenants_add,
test_add.call_args_list)
def test_flavor_handle_update_remove_tenants(self):
@@ -177,7 +174,7 @@ class NovaFlavorTest(common.HeatTestCase):
test_tenants_remove = [mock.call(value, 'foo'),
mock.call(value, 'bar')]
test_rem = self.my_flavor.client().flavor_access.remove_tenant_access
- self.assertItemsEqual(test_tenants_remove,
+ self.assertCountEqual(test_tenants_remove,
test_rem.call_args_list)
def test_flavor_show_resource(self):
diff --git a/heat/tests/openstack/nova/test_floatingip.py b/heat/tests/openstack/nova/test_floatingip.py
index afbdde024..e747ce224 100644
--- a/heat/tests/openstack/nova/test_floatingip.py
+++ b/heat/tests/openstack/nova/test_floatingip.py
@@ -12,8 +12,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from neutronclient.v2_0 import client as neutronclient
from heat.common import exception as heat_ex
diff --git a/heat/tests/openstack/nova/test_host_aggregate.py b/heat/tests/openstack/nova/test_host_aggregate.py
index 8b164e4e7..bb0fbd944 100644
--- a/heat/tests/openstack/nova/test_host_aggregate.py
+++ b/heat/tests/openstack/nova/test_host_aggregate.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.engine.clients.os import nova
from heat.engine import stack
@@ -21,7 +21,7 @@ from heat.tests import utils
AGGREGATE_TEMPLATE = {
'heat_template_version': '2013-05-23',
- 'description': 'Heat Aggregate creation example',
+ 'description': 'Heat Aggregate creation example',
'resources': {
'my_aggregate': {
'type': 'OS::Nova::HostAggregate',
@@ -39,9 +39,6 @@ AGGREGATE_TEMPLATE = {
class NovaHostAggregateTest(common.HeatTestCase):
def setUp(self):
super(NovaHostAggregateTest, self).setUp()
- self.patchobject(nova.NovaClientPlugin,
- 'has_extension',
- return_value=True)
self.ctx = utils.dummy_context()
self.stack = stack.Stack(
diff --git a/heat/tests/openstack/nova/test_keypair.py b/heat/tests/openstack/nova/test_keypair.py
index cc412bbd2..c746b0950 100644
--- a/heat/tests/openstack/nova/test_keypair.py
+++ b/heat/tests/openstack/nova/test_keypair.py
@@ -12,9 +12,7 @@
# under the License.
import copy
-
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.engine.clients.os import keystone
@@ -46,8 +44,6 @@ class NovaKeyPairTest(common.HeatTestCase):
self.fake_nova = mock.MagicMock()
self.fake_keypairs = mock.MagicMock()
self.fake_nova.keypairs = self.fake_keypairs
- self.patchobject(nova.NovaClientPlugin, 'has_extension',
- return_value=True)
self.cp_mock = self.patchobject(nova.NovaClientPlugin, 'client',
return_value=self.fake_nova)
@@ -157,9 +153,9 @@ class NovaKeyPairTest(common.HeatTestCase):
kp_res = keypair.KeyPair('kp', definition, stack)
error = self.assertRaises(exception.StackValidationFailed,
kp_res.validate)
- self.assertIn("Property error", six.text_type(error))
+ self.assertIn("Property error", str(error))
self.assertIn("kp.properties.name: length (0) is out of "
- "range (min: 1, max: 255)", six.text_type(error))
+ "range (min: 1, max: 255)", str(error))
def test_create_key_excess_name_length(self):
"""Test creation of a keypair whose name is of excess length."""
@@ -171,9 +167,9 @@ class NovaKeyPairTest(common.HeatTestCase):
kp_res = keypair.KeyPair('kp', definition, stack)
error = self.assertRaises(exception.StackValidationFailed,
kp_res.validate)
- self.assertIn("Property error", six.text_type(error))
+ self.assertIn("Property error", str(error))
self.assertIn("kp.properties.name: length (256) is out of "
- "range (min: 1, max: 255)", six.text_type(error))
+ "range (min: 1, max: 255)", str(error))
def _test_validate(self, key_type=None, user=None):
template = copy.deepcopy(self.kp_template)
@@ -191,7 +187,7 @@ class NovaKeyPairTest(common.HeatTestCase):
kp_res.validate)
msg = (('Cannot use "%s" properties - nova does not support '
'required api microversion.') % validate_props)
- self.assertIn(msg, six.text_type(error))
+ self.assertIn(msg, str(error))
def test_validate_key_type(self):
self.patchobject(nova.NovaClientPlugin, 'get_max_microversion',
@@ -219,7 +215,7 @@ class NovaKeyPairTest(common.HeatTestCase):
res.client().keypairs.get.side_effect = Exception("boom")
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.check))
- self.assertIn("boom", six.text_type(exc))
+ self.assertIn("boom", str(exc))
self.assertEqual((res.CHECK, res.FAILED), res.state)
def test_update_replace(self):
diff --git a/heat/tests/openstack/nova/test_quota.py b/heat/tests/openstack/nova/test_quota.py
index 3c813a56b..bd2beb8f7 100644
--- a/heat/tests/openstack/nova/test_quota.py
+++ b/heat/tests/openstack/nova/test_quota.py
@@ -10,13 +10,11 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import keystone as k_plugin
-from heat.engine.clients.os import nova as n_plugin
from heat.engine import rsrc_defn
from heat.engine import stack as parser
from heat.engine import template
@@ -62,8 +60,6 @@ class NovaQuotaTest(common.HeatTestCase):
super(NovaQuotaTest, self).setUp()
self.ctx = utils.dummy_context()
- self.patchobject(n_plugin.NovaClientPlugin, 'has_extension',
- return_value=True)
self.patchobject(k_plugin.KeystoneClientPlugin, 'get_project_id',
return_value='some_project_id')
tpl = template_format.parse(quota_template)
@@ -85,7 +81,7 @@ class NovaQuotaTest(common.HeatTestCase):
def _test_validate(self, resource, error_msg):
exc = self.assertRaises(exception.StackValidationFailed,
resource.validate)
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
def _test_invalid_property(self, prop_name):
my_quota = self.stack['my_quota']
diff --git a/heat/tests/openstack/nova/test_server.py b/heat/tests/openstack/nova/test_server.py
index 1109c886e..b3d36af2b 100644
--- a/heat/tests/openstack/nova/test_server.py
+++ b/heat/tests/openstack/nova/test_server.py
@@ -14,7 +14,7 @@
import collections
import contextlib
import copy
-import mock
+from unittest import mock
from keystoneauth1 import exceptions as ks_exceptions
from neutronclient.v2_0 import client as neutronclient
@@ -22,8 +22,7 @@ from novaclient import exceptions as nova_exceptions
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import requests
-import six
-from six.moves.urllib import parse as urlparse
+from urllib import parse as urlparse
from heat.common import exception
from heat.common.i18n import _
@@ -319,14 +318,14 @@ class ServersTest(common.HeatTestCase):
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.check))
- self.assertIn('boom', six.text_type(exc))
+ self.assertIn('boom', str(exc))
self.assertEqual((res.CHECK, res.FAILED), res.state)
def test_check_not_active(self):
res = self._prepare_server_check(status='FOO')
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.check))
- self.assertIn('FOO', six.text_type(exc))
+ self.assertIn('FOO', str(exc))
def _get_test_template(self, stack_name, server_name=None,
image_id=None):
@@ -447,42 +446,48 @@ class ServersTest(common.HeatTestCase):
ip='5.6.9.8'),
create_fake_iface(port='1013',
mac='fa:16:3e:8c:44:cc',
- ip='10.13.12.13')]
+ ip='10.13.12.13',
+ subnet='private_subnet_id')]
+ ports = [dict(id=interfaces[0].port_id,
+ mac_address=interfaces[0].mac_addr,
+ fixed_ips=interfaces[0].fixed_ips,
+ network_id='public_id'),
+ dict(id=interfaces[1].port_id,
+ mac_address=interfaces[1].mac_addr,
+ fixed_ips=interfaces[1].fixed_ips,
+ network_id='public_id'),
+ dict(id=interfaces[2].port_id,
+ mac_address=interfaces[2].mac_addr,
+ fixed_ips=interfaces[2].fixed_ips,
+ network_id='private_id')]
+ public_net = dict(id='public_id',
+ name='public',
+ mtu=1500,
+ subnets=['public_subnet_id'])
+ private_net = dict(id='private_id',
+ name='private',
+ mtu=1500,
+ subnets=['private_subnet_id'])
+ private_subnet = dict(id='private_subnet_id',
+ name='private_subnet',
+ cidr='private_cidr',
+ allocation_pools=[{'start': 'start_addr',
+ 'end': 'end_addr'}],
+ gateway_ip='private_gateway',
+ network_id='private_id')
self.patchobject(self.fc.servers, 'get', return_value=return_server)
- self.patchobject(return_server, 'interface_list',
- return_value=interfaces)
+ self.patchobject(neutronclient.Client, 'list_ports',
+ return_value={'ports': ports})
+ self.patchobject(neutronclient.Client, 'list_networks',
+ side_effect=[{'networks': [public_net]},
+ {'networks': [public_net]},
+ {'networks': [private_net]}])
+ self.patchobject(neutronclient.Client, 'list_floatingips',
+ return_value={'floatingips': []})
self.patchobject(self.fc.servers, 'tag_list', return_value=['test'])
-
- self.port_show.return_value = {
- 'port': {'id': '1234',
- 'network_id': 'the_network',
- 'fixed_ips': [{
- 'ip_address': '4.5.6.7',
- 'subnet_id': 'the_subnet'}]
- }
- }
- subnet_dict = {
- 'subnet': {
- 'name': 'subnet_name',
- 'cidr': '10.0.0.0/24',
- 'allocation_pools': [{'start': '10.0.0.2',
- 'end': u'10.0.0.254'}],
- 'gateway_ip': '10.0.0.1',
- 'id': 'the_subnet',
- 'network_id': 'the_network'
- }
- }
- network_dict = {
- 'network': {
- 'name': 'network_name',
- 'mtu': 1500,
- 'subnets': [subnet_dict['subnet']['id']],
- 'id': 'the_network'
- }
- }
- self.subnet_show.return_value = subnet_dict
- self.network_show.return_value = network_dict
+ self.subnet_show.return_value = {'subnet': private_subnet}
+ self.network_show.return_value = {'network': private_net}
public_ip = return_server.networks['public'][0]
self.assertEqual('1234',
@@ -499,9 +504,9 @@ class ServersTest(common.HeatTestCase):
server.FnGetAtt('addresses')['private'][0]['port'])
self.assertEqual(private_ip,
server.FnGetAtt('addresses')['private'][0]['addr'])
- self.assertEqual([subnet_dict['subnet']],
+ self.assertEqual([private_subnet],
server.FnGetAtt('addresses')['private'][0]['subnets'])
- self.assertEqual(network_dict['network'],
+ self.assertEqual(private_net,
server.FnGetAtt('addresses')['private'][0]['network'])
self.assertEqual(private_ip,
server.FnGetAtt('networks')['private'][0])
@@ -522,21 +527,6 @@ class ServersTest(common.HeatTestCase):
self.assertIsNone(server.FnGetAtt('tags'))
self.assertEqual({}, server.FnGetAtt('os_collect_config'))
- def test_server_network_subnet_address_attr_port_not_found(self):
- return_server = self.fc.servers.list()[1]
- server_name = 'network-subnet-attr-server'
- server = self._create_test_server(return_server, server_name)
- interfaces = [create_fake_iface(port='1234',
- mac='fa:16:3e:8c:22:aa',
- ip='4.5.6.7')]
- self.patchobject(return_server, 'interface_list',
- return_value=interfaces)
- self.port_show.side_effect = neutron.exceptions.NotFound()
- self.assertEqual(None,
- server.FnGetAtt('addresses')['private'][0]['subnets'])
- self.assertEqual(None,
- server.FnGetAtt('addresses')['private'][0]['network'])
-
def test_server_create_metadata(self):
stack_name = 'create_metadata_test_stack'
self.patchobject(nova.NovaClientPlugin, 'client',
@@ -643,10 +633,30 @@ class ServersTest(common.HeatTestCase):
create_fake_iface(port='1013',
mac='fa:16:3e:8c:44:cc',
ip='10.13.12.13')]
+ ports = [dict(id=interfaces[0].port_id,
+ mac_address=interfaces[0].mac_addr,
+ fixed_ips=interfaces[0].fixed_ips,
+ network_id='public_id'),
+ dict(id=interfaces[1].port_id,
+ mac_address=interfaces[1].mac_addr,
+ fixed_ips=interfaces[1].fixed_ips,
+ network_id='public_id'),
+ dict(id=interfaces[2].port_id,
+ mac_address=interfaces[2].mac_addr,
+ fixed_ips=interfaces[2].fixed_ips,
+ network_id='private_id')]
+ public_net = dict(id='public_id', name='public')
+ private_net = dict(id='private_id', name='private')
self.patchobject(self.fc.servers, 'get', return_value=return_server)
- self.patchobject(return_server, 'interface_list',
- return_value=interfaces)
+ self.patchobject(neutronclient.Client, 'list_ports',
+ return_value={'ports': ports})
+ self.patchobject(neutronclient.Client, 'list_networks',
+ side_effect=[{'networks': [public_net]},
+ {'networks': [public_net]},
+ {'networks': [private_net]}])
+ self.patchobject(neutronclient.Client, 'list_floatingips',
+ return_value={'floatingips': []})
self.patchobject(return_server, 'interface_detach')
self.patchobject(return_server, 'interface_attach')
@@ -688,7 +698,7 @@ class ServersTest(common.HeatTestCase):
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(server.create))
self.assertIn("No image matching {'name': 'Slackware'}.",
- six.text_type(error))
+ str(error))
def test_server_duplicate_image_name_err(self):
stack_name = 'img_dup_err'
@@ -707,7 +717,7 @@ class ServersTest(common.HeatTestCase):
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(server.create))
self.assertIn('No image unique match found for CentOS 5.2.',
- six.text_type(error))
+ str(error))
def test_server_create_unexpected_status(self):
# NOTE(pshchelo) checking is done only on check_create_complete
@@ -723,7 +733,7 @@ class ServersTest(common.HeatTestCase):
server.check_create_complete,
server.resource_id)
self.assertEqual('Server is not active - Unknown status BOGUS due to '
- '"Unknown"', six.text_type(e))
+ '"Unknown"', str(e))
def test_server_create_error_status(self):
# NOTE(pshchelo) checking is done only on check_create_complete
@@ -745,7 +755,7 @@ class ServersTest(common.HeatTestCase):
server.resource_id)
self.assertEqual(
'Went to status ERROR due to "Message: NoValidHost, Code: 500"',
- six.text_type(e))
+ str(e))
def test_server_create_raw_userdata(self):
self.patchobject(nova.NovaClientPlugin, 'client',
@@ -1074,6 +1084,43 @@ class ServersTest(common.HeatTestCase):
'deployments': []
}, server.metadata_get())
+ def test_delete_swift_service_removed(self):
+ self.patchobject(nova.NovaClientPlugin, 'client',
+ return_value=self.fc)
+ return_server = self.fc.servers.list()[1]
+ stack_name = 'software_config_s'
+ (tmpl, stack) = self._setup_test_stack(stack_name)
+
+ props = tmpl.t['Resources']['WebServer']['Properties']
+ props['user_data_format'] = 'SOFTWARE_CONFIG'
+ props['software_config_transport'] = 'POLL_TEMP_URL'
+ self.server_props = props
+
+ resource_defns = tmpl.resource_definitions(stack)
+ server = servers.Server('WebServer',
+ resource_defns['WebServer'], stack)
+ self.patchobject(server, 'store_external_ports')
+
+ sc = mock.Mock()
+ sc.head_account.return_value = {
+ 'x-account-meta-temp-url-key': 'secrit'
+ }
+ sc.url = 'http://192.0.2.2'
+
+ self.patchobject(swift.SwiftClientPlugin, '_create',
+ return_value=sc)
+ self.patchobject(self.fc.servers, 'create',
+ return_value=return_server)
+ scheduler.TaskRunner(server.create)()
+ self.assertEqual((server.CREATE, server.COMPLETE), server.state)
+ self.patchobject(server.client_plugin(),
+ 'does_endpoint_exist',
+ return_value=False)
+ side_effect = [server, fakes_nova.fake_exception()]
+ self.patchobject(self.fc.servers, 'get', side_effect=side_effect)
+ scheduler.TaskRunner(server.delete)()
+ self.assertEqual((server.DELETE, server.COMPLETE), server.state)
+
def _prepare_for_server_create(self, md=None):
self.patchobject(nova.NovaClientPlugin, 'client',
return_value=self.fc)
@@ -1166,6 +1213,20 @@ class ServersTest(common.HeatTestCase):
scheduler.TaskRunner(server.delete)()
self.assertEqual((server.DELETE, server.COMPLETE), server.state)
+ def test_delete_zaqar_service_removed(self):
+ zcc = self.patchobject(zaqar.ZaqarClientPlugin, 'create_for_tenant')
+ zcc.return_value = mock.Mock()
+ server, stack = self._prepare_for_server_create()
+ scheduler.TaskRunner(server.create)()
+ self.assertEqual((server.CREATE, server.COMPLETE), server.state)
+ self.patchobject(server.client_plugin(),
+ 'does_endpoint_exist',
+ return_value=False)
+ side_effect = [server, fakes_nova.fake_exception()]
+ self.patchobject(self.fc.servers, 'get', side_effect=side_effect)
+ scheduler.TaskRunner(server.delete)()
+ self.assertEqual((server.DELETE, server.COMPLETE), server.state)
+
def test_server_create_software_config_zaqar_metadata(self):
md = {'os-collect-config': {'polling_interval': 10}}
queue_id, server = self._server_create_software_config_zaqar(md=md)
@@ -1272,7 +1333,7 @@ class ServersTest(common.HeatTestCase):
error = self.assertRaises(exception.StackValidationFailed,
servers.Server._check_maximum,
2, 1, msg)
- self.assertEqual(msg, six.text_type(error))
+ self.assertEqual(msg, str(error))
def test_server_validate(self):
stack_name = 'srv_val'
@@ -1318,7 +1379,7 @@ class ServersTest(common.HeatTestCase):
server.validate)
self.assertEqual('Neither image nor bootable volume is specified for '
'instance server_with_bootable_volume',
- six.text_type(ex))
+ str(ex))
web_server['Properties']['image'] = ''
server = create_server('vdb')
@@ -1349,8 +1410,6 @@ class ServersTest(common.HeatTestCase):
}
}
'''
- self.patchobject(nova.NovaClientPlugin, 'has_extension',
- return_value=True)
t = template_format.parse(nova_keypair_template)
templ = template.Template(t)
self.patchobject(nova.NovaClientPlugin, 'client',
@@ -1387,7 +1446,7 @@ class ServersTest(common.HeatTestCase):
self.assertEqual(
"Property error: Resources.WebServer.Properties.key_name: "
"Error validating value 'test2': The Key (test2) could not "
- "be found.", six.text_type(error))
+ "be found.", str(error))
def test_server_validate_software_config_invalid_meta(self):
stack_name = 'srv_val_test'
@@ -1409,7 +1468,7 @@ class ServersTest(common.HeatTestCase):
server.validate)
self.assertEqual(
"deployments key not allowed in resource metadata "
- "with user_data_format of SOFTWARE_CONFIG", six.text_type(error))
+ "with user_data_format of SOFTWARE_CONFIG", str(error))
def test_server_validate_with_networks(self):
stack_name = 'srv_net'
@@ -1431,7 +1490,7 @@ class ServersTest(common.HeatTestCase):
self.assertIn("Cannot define the following properties at "
"the same time: networks.network, networks.uuid",
- six.text_type(ex))
+ str(ex))
def test_server_validate_with_network_empty_ref(self):
stack_name = 'srv_net'
@@ -1475,7 +1534,7 @@ class ServersTest(common.HeatTestCase):
'"allocate_network" or "subnet" should be set '
'for the specified network of '
'server "%s".') % server.name,
- six.text_type(ex))
+ str(ex))
def test_server_validate_with_network_floating_ip(self):
stack_name = 'srv_net_floating_ip'
@@ -1500,7 +1559,7 @@ class ServersTest(common.HeatTestCase):
self.assertIn(_('Property "floating_ip" is not supported if '
'only "network" is specified, because the '
'corresponding port can not be retrieved.'),
- six.text_type(ex))
+ str(ex))
def test_server_validate_with_networks_str_net(self):
stack_name = 'srv_networks_str_nets'
@@ -1524,7 +1583,7 @@ class ServersTest(common.HeatTestCase):
server.validate)
self.assertIn(_('Can not specify "allocate_network" with '
'other keys of networks at the same time.'),
- six.text_type(ex))
+ str(ex))
def test_server_validate_port_fixed_ip(self):
stack_name = 'port_with_fixed_ip'
@@ -1545,7 +1604,7 @@ class ServersTest(common.HeatTestCase):
server.validate)
self.assertEqual("Cannot define the following properties at the same "
"time: networks/fixed_ip, networks/port.",
- six.text_type(error))
+ str(error))
# test if the 'port' doesn't reference with non-created resource
tmpl['Resources']['server']['Properties']['networks'] = (
[{'port': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
@@ -1559,7 +1618,7 @@ class ServersTest(common.HeatTestCase):
server.validate)
self.assertEqual("Cannot define the following properties at the same "
"time: networks/fixed_ip, networks/port.",
- six.text_type(error))
+ str(error))
def test_server_validate_with_uuid_fixed_ip(self):
stack_name = 'srv_net'
@@ -1624,7 +1683,7 @@ class ServersTest(common.HeatTestCase):
server.validate)
self.assertEqual("Cannot define the following properties at the same "
"time: security_groups, networks/port.",
- six.text_type(error))
+ str(error))
def test_server_delete(self):
return_server = self.fc.servers.list()[1]
@@ -1673,7 +1732,7 @@ class ServersTest(common.HeatTestCase):
resf = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(server.delete))
self.assertIn("Server %s delete failed" % return_server.name,
- six.text_type(resf))
+ str(resf))
def test_server_delete_error_task_in_progress(self):
# test server in 'ERROR', but task state in nova is 'deleting'
@@ -1698,7 +1757,7 @@ class ServersTest(common.HeatTestCase):
resf = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(server.delete))
self.assertIn("Server %s delete failed" % return_server.name,
- six.text_type(resf))
+ str(resf))
def test_server_soft_delete(self):
return_server = self.fc.servers.list()[1]
@@ -2041,20 +2100,42 @@ class ServersTest(common.HeatTestCase):
server.properties.data['networks'] = [{'network': 'public_id',
'fixed_ip': '5.6.9.8'}]
+ public_net = dict(id='public_id', name='public')
+ private_net = dict(id='private_id', name='private')
iface0 = create_fake_iface(port='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
net='public',
ip='5.6.9.8',
mac='fa:16:3e:8c:33:aa')
+ port0 = dict(id=iface0.port_id,
+ network_id=iface0.net_id,
+ mac_address=iface0.mac_addr,
+ fixed_ips=iface0.fixed_ips)
iface1 = create_fake_iface(port='bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
net='public',
ip='4.5.6.7',
mac='fa:16:3e:8c:22:aa')
+ port1 = dict(id=iface1.port_id,
+ network_id=iface1.net_id,
+ mac_address=iface1.mac_addr,
+ fixed_ips=iface1.fixed_ips)
iface2 = create_fake_iface(port='cccccccc-cccc-cccc-cccc-cccccccccccc',
net='private',
ip='10.13.12.13',
mac='fa:16:3e:8c:44:cc')
+ port2 = dict(id=iface2.port_id,
+ network_id=iface2.net_id,
+ mac_address=iface2.mac_addr,
+ fixed_ips=iface2.fixed_ips)
self.patchobject(return_server, 'interface_list',
return_value=[iface0, iface1, iface2])
+ self.patchobject(neutronclient.Client, 'list_ports',
+ return_value={'ports': [port0, port1, port2]})
+ self.patchobject(neutronclient.Client, 'list_networks',
+ side_effect=[{'networks': [public_net]},
+ {'networks': [public_net]},
+ {'networks': [private_net]}])
+ self.patchobject(neutronclient.Client, 'list_floatingips',
+ return_value={'floatingips': []})
self.patchobject(neutron.NeutronClientPlugin,
'find_resourceid_by_name_or_id',
@@ -2086,7 +2167,7 @@ class ServersTest(common.HeatTestCase):
self.assertEqual(exp_net[key], net[key])
break
- for key in six.iterkeys(reality):
+ for key in reality.keys():
self.assertEqual(expected[key], reality[key])
def test_server_update_server_flavor(self):
@@ -2144,7 +2225,7 @@ class ServersTest(common.HeatTestCase):
error = self.assertRaises(exception.ResourceFailure, updater)
self.assertEqual(
"Error: resources.srv_update2: Resizing to '2' failed, "
- "status 'ERROR'", six.text_type(error))
+ "status 'ERROR'", str(error))
self.assertEqual((server.UPDATE, server.FAILED), server.state)
mock_post.assert_called_once_with(body={'resize': {'flavorRef': '2'}})
@@ -2399,7 +2480,7 @@ class ServersTest(common.HeatTestCase):
self.assertEqual(
"Error: resources.srv_updrbldfail: "
"Rebuilding server failed, status 'ERROR'",
- six.text_type(error))
+ str(error))
self.assertEqual((server.UPDATE, server.FAILED), server.state)
mock_rebuild.assert_called_once_with(
return_server, '2', password=None, preserve_ephemeral=False,
@@ -2442,7 +2523,7 @@ class ServersTest(common.HeatTestCase):
self.assertEqual('Error: resources.srv_sus1: '
'Cannot suspend srv_sus1, '
'resource_id not set',
- six.text_type(ex))
+ str(ex))
self.assertEqual((server.SUSPEND, server.FAILED), server.state)
def test_server_status_suspend_not_found(self):
@@ -2456,7 +2537,7 @@ class ServersTest(common.HeatTestCase):
scheduler.TaskRunner(server.suspend))
self.assertEqual('NotFound: resources.srv_sus2: '
'Failed to find server 1234',
- six.text_type(ex))
+ str(ex))
self.assertEqual((server.SUSPEND, server.FAILED), server.state)
def _test_server_status_suspend(self, name, state=('CREATE', 'COMPLETE')):
@@ -2507,7 +2588,7 @@ class ServersTest(common.HeatTestCase):
self.assertEqual('Suspend of server %s failed - '
'Unknown status TRANSMOGRIFIED '
'due to "Unknown"' % return_server.name,
- six.text_type(ex.exc.message))
+ str(ex.exc.message))
self.assertEqual((server.SUSPEND, server.FAILED), server.state)
def _test_server_status_resume(self, name, state=('SUSPEND', 'COMPLETE')):
@@ -2553,7 +2634,7 @@ class ServersTest(common.HeatTestCase):
self.assertEqual('Error: resources.srv_susp_norid: '
'Cannot resume srv_susp_norid, '
'resource_id not set',
- six.text_type(ex))
+ str(ex))
self.assertEqual((server.RESUME, server.FAILED), server.state)
def test_server_status_resume_not_found(self):
@@ -2571,7 +2652,7 @@ class ServersTest(common.HeatTestCase):
scheduler.TaskRunner(server.resume))
self.assertEqual('NotFound: resources.srv_res_nf: '
'Failed to find server 1234',
- six.text_type(ex))
+ str(ex))
self.assertEqual((server.RESUME, server.FAILED), server.state)
def test_server_status_build_spawning(self):
@@ -2667,7 +2748,7 @@ class ServersTest(common.HeatTestCase):
resolver.side_effect = neutron.exceptions.NeutronClientNoUniqueMatch()
ex = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(server.create))
- self.assertIn('use an ID to be more specific.', six.text_type(ex))
+ self.assertIn('use an ID to be more specific.', str(ex))
def test_server_without_ip_address(self):
return_server = self.fc.servers.list()[3]
@@ -2677,6 +2758,14 @@ class ServersTest(common.HeatTestCase):
self.patchobject(neutron.NeutronClientPlugin,
'find_resourceid_by_name_or_id',
return_value=None)
+ self.patchobject(neutronclient.Client, 'list_ports',
+ return_value={'ports': [{'id': 'p_id',
+ 'name': 'p_name',
+ 'fixed_ips': [],
+ 'network_id': 'n_id'}]})
+ self.patchobject(neutronclient.Client, 'list_networks',
+ return_value={'networks': [{'id': 'n_id',
+ 'name': 'empty_net'}]})
self.patchobject(self.fc.servers, 'get', return_value=return_server)
self.patchobject(return_server, 'interface_list', return_value=[])
mock_detach = self.patchobject(return_server, 'interface_detach')
@@ -2767,7 +2856,7 @@ class ServersTest(common.HeatTestCase):
self.stub_VolumeConstraint_validate()
exc = self.assertRaises(exception.StackValidationFailed,
server.validate)
- self.assertIn("Value '10a' is not an integer", six.text_type(exc))
+ self.assertIn("Value '10a' is not an integer", str(exc))
@mock.patch.object(nova.NovaClientPlugin, 'client')
def test_validate_conflict_block_device_mapping_props(self, mock_create):
@@ -2800,7 +2889,7 @@ class ServersTest(common.HeatTestCase):
server.validate)
msg = ("Either volume_id or snapshot_id must be specified "
"for device mapping vdb")
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
@mock.patch.object(nova.NovaClientPlugin, 'client')
def test_validate_block_device_mapping_with_empty_ref(self, mock_create):
@@ -2838,7 +2927,7 @@ class ServersTest(common.HeatTestCase):
server.validate)
msg = ('Neither image nor bootable volume is specified '
'for instance %s' % server.name)
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
@mock.patch.object(nova.NovaClientPlugin, 'client')
def test_validate_invalid_image_status(self, mock_create):
@@ -2856,7 +2945,7 @@ class ServersTest(common.HeatTestCase):
server.validate)
self.assertEqual(
'Image status is required to be active not sdfsdf.',
- six.text_type(error))
+ str(error))
@mock.patch.object(nova.NovaClientPlugin, 'client')
def test_validate_insufficient_ram_flavor(self, mock_create):
@@ -2877,7 +2966,7 @@ class ServersTest(common.HeatTestCase):
self.assertEqual(
'Image F18-x86_64-gold requires 100 minimum ram. Flavor m1.large '
'has only 4.',
- six.text_type(error))
+ str(error))
@mock.patch.object(nova.NovaClientPlugin, 'client')
def test_validate_image_flavor_not_found(self, mock_create):
@@ -2915,7 +3004,7 @@ class ServersTest(common.HeatTestCase):
self.assertEqual(
'Image F18-x86_64-gold requires 100 GB minimum disk space. '
'Flavor m1.large has only 4 GB.',
- six.text_type(error))
+ str(error))
def test_build_block_device_mapping_v2(self):
self.assertIsNone(servers.Server._build_block_device_mapping_v2([]))
@@ -2991,7 +3080,7 @@ class ServersTest(common.HeatTestCase):
server = servers.Server('server', resource_defns['server'], stack)
exc = self.assertRaises(exception.StackValidationFailed,
server.validate)
- self.assertIn(msg, six.text_type(exc))
+ self.assertIn(msg, str(exc))
@mock.patch.object(nova.NovaClientPlugin, 'client')
def test_validate_with_both_blk_dev_map_and_blk_dev_map_v2(self,
@@ -3013,7 +3102,7 @@ class ServersTest(common.HeatTestCase):
server.validate)
msg = ('Cannot define the following properties at the same time: '
'block_device_mapping, block_device_mapping_v2.')
- self.assertEqual(msg, six.text_type(exc))
+ self.assertEqual(msg, str(exc))
def _test_validate_bdm_v2(self, stack_name, bdm_v2, with_image=True,
error_msg=None, raise_exc=None):
@@ -3033,7 +3122,7 @@ class ServersTest(common.HeatTestCase):
self.stub_VolumeConstraint_validate()
if raise_exc:
ex = self.assertRaises(raise_exc, server.validate)
- self.assertIn(error_msg, six.text_type(ex))
+ self.assertIn(error_msg, str(ex))
else:
self.assertIsNone(server.validate())
@@ -3114,7 +3203,7 @@ class ServersTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
server.validate)
self.assertIn('Instance metadata must not contain greater than 3 '
- 'entries', six.text_type(ex))
+ 'entries', str(ex))
def test_validate_metadata_okay(self):
stack_name = 'srv_val_metadata'
@@ -3156,13 +3245,15 @@ class ServersTest(common.HeatTestCase):
server.validate)
self.assertEqual('Cannot use "tags" property - nova does not support '
'required api microversion.',
- six.text_type(exc))
+ str(exc))
def test_server_validate_too_many_personality(self):
stack_name = 'srv_val'
(tmpl, stack) = self._setup_test_stack(stack_name)
self.patchobject(nova.NovaClientPlugin, 'client',
return_value=self.fc)
+ self.patchobject(nova.NovaClientPlugin, 'is_version_supported',
+ return_value=False)
tmpl.t['Resources']['WebServer']['Properties'][
'personality'] = {"/fake/path1": "fake contents1",
"/fake/path2": "fake_contents2",
@@ -3180,13 +3271,41 @@ class ServersTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
server.validate)
self.assertEqual("The personality property may not contain "
- "greater than 5 entries.", six.text_type(exc))
+ "greater than 5 entries.", str(exc))
+
+ def test_server_validate_personality_unsupported(self):
+ stack_name = 'srv_val'
+ (tmpl, stack) = self._setup_test_stack(stack_name)
+ self.patchobject(nova.NovaClientPlugin, 'client',
+ return_value=self.fc)
+ self.patchobject(nova.NovaClientPlugin, 'is_version_supported',
+ return_value=True)
+ tmpl.t['Resources']['WebServer']['Properties'][
+ 'personality'] = {"/fake/path1": "fake contents1",
+ "/fake/path2": "fake_contents2",
+ "/fake/path3": "fake_contents3",
+ "/fake/path4": "fake_contents4",
+ "/fake/path5": "fake_contents5"}
+ resource_defns = tmpl.resource_definitions(stack)
+ server = servers.Server('server_create_image_err',
+ resource_defns['WebServer'], stack)
+
+ self.patchobject(self.fc.limits, 'get', return_value=self.limits)
+ self.patchobject(glance.GlanceClientPlugin, 'get_image',
+ return_value=self.mock_image)
+ exc = self.assertRaises(exception.StackValidationFailed,
+ server.validate)
+ self.assertEqual("Cannot use the personality parameter as nova "
+ "no longer supports it. Use user_data instead.",
+ str(exc))
def test_server_validate_personality_okay(self):
stack_name = 'srv_val'
(tmpl, stack) = self._setup_test_stack(stack_name)
self.patchobject(nova.NovaClientPlugin, 'client',
return_value=self.fc)
+ self.patchobject(nova.NovaClientPlugin, 'is_version_supported',
+ return_value=False)
tmpl.t['Resources']['WebServer']['Properties'][
'personality'] = {"/fake/path1": "fake contents1",
"/fake/path2": "fake_contents2",
@@ -3207,6 +3326,8 @@ class ServersTest(common.HeatTestCase):
(tmpl, stack) = self._setup_test_stack(stack_name)
self.patchobject(nova.NovaClientPlugin, 'client',
return_value=self.fc)
+ self.patchobject(nova.NovaClientPlugin, 'is_version_supported',
+ return_value=False)
tmpl.t['Resources']['WebServer']['Properties'][
'personality'] = {"/fake/path1": "a" * 10240}
resource_defns = tmpl.resource_definitions(stack)
@@ -3223,6 +3344,8 @@ class ServersTest(common.HeatTestCase):
self.patchobject(nova.NovaClientPlugin, 'client',
return_value=self.fc)
+ self.patchobject(nova.NovaClientPlugin, 'is_version_supported',
+ return_value=False)
tmpl.t['Resources']['WebServer']['Properties'][
'personality'] = {"/fake/path1": "a" * 10241}
resource_defns = tmpl.resource_definitions(stack)
@@ -3236,7 +3359,7 @@ class ServersTest(common.HeatTestCase):
server.validate)
self.assertEqual('The contents of personality file "/fake/path1" '
'is larger than the maximum allowed personality '
- 'file size (10240 bytes).', six.text_type(exc))
+ 'file size (10240 bytes).', str(exc))
def test_server_validate_personality_get_attr_return_none(self):
stack_name = 'srv_val'
@@ -3244,6 +3367,8 @@ class ServersTest(common.HeatTestCase):
stack_name, server_with_sw_config_personality)
self.patchobject(nova.NovaClientPlugin, 'client',
return_value=self.fc)
+ self.patchobject(nova.NovaClientPlugin, 'is_version_supported',
+ return_value=False)
resource_defns = tmpl.resource_definitions(stack)
server = servers.Server('server_create_image_err',
resource_defns['server'], stack)
@@ -3272,7 +3397,7 @@ class ServersTest(common.HeatTestCase):
ws.resource_id = server.id
self.patchobject(self.fc.servers, 'get', return_value=server)
console_urls = ws._resolve_any_attribute('console_urls')
- self.assertIsInstance(console_urls, collections.Mapping)
+ self.assertIsInstance(console_urls, collections.abc.Mapping)
supported_consoles = ('novnc', 'xvpvnc', 'spice-html5', 'rdp-html5',
'serial', 'webmks')
self.assertEqual(set(supported_consoles),
@@ -4168,7 +4293,7 @@ class ServersTest(common.HeatTestCase):
self.assertEqual("StackValidationFailed: resources.my_server: "
"Property error: Properties.image: Error validating "
"value '1': No image matching Update Image.",
- six.text_type(err))
+ str(err))
def test_server_snapshot(self):
return_server = self.fc.servers.list()[1]
@@ -4191,16 +4316,46 @@ class ServersTest(common.HeatTestCase):
def test_server_check_snapshot_complete_fail(self):
self._test_server_check_snapshot_complete()
- def _test_server_check_snapshot_complete(self, image_status='ERROR'):
+ def test_server_check_snapshot_complete_with_not_complete_task_state(self):
+ for task_state in {'image_uploading', 'image_snapshot_pending',
+ 'image_snapshot', 'image_pending_upload'}:
+ self._test_check_snapshot_complete_with_task_state(
+ task_state=task_state)
+
+ def test_server_check_snapshot_complete_with_active_task_state(self):
+ self._test_check_snapshot_complete_with_task_state()
+
+ def _test_check_snapshot_complete_with_task_state(self,
+ task_state='active'):
return_server = self.fc.servers.list()[1]
return_server.id = '1234'
server = self._create_test_server(return_server,
'test_server_snapshot')
- image_in_error = mock.Mock()
- image_in_error.status = image_status
+ image = mock.MagicMock(status='active')
+ self.patchobject(glance.GlanceClientPlugin, 'get_image',
+ return_value=image)
+ server_with_task_state = mock.Mock()
+ setattr(server_with_task_state, 'OS-EXT-STS:task_state', task_state)
+ mock_get = self.patchobject(
+ nova.NovaClientPlugin, 'get_server',
+ return_value=server_with_task_state)
+
+ if task_state not in {'image_uploading', 'image_snapshot_pending',
+ 'image_snapshot', 'image_pending_upload'}:
+ self.assertTrue(server.check_snapshot_complete('fake_iamge_id'))
+ else:
+ self.assertFalse(server.check_snapshot_complete('fake_iamge_id'))
+ mock_get.assert_called_once_with(server.resource_id)
+ def _test_server_check_snapshot_complete(self, image_status='ERROR'):
+ return_server = self.fc.servers.list()[1]
+ return_server.id = '1234'
+ server = self._create_test_server(return_server,
+ 'test_server_snapshot')
+ image_in_error = mock.MagicMock(status=image_status)
self.patchobject(glance.GlanceClientPlugin, 'get_image',
return_value=image_in_error)
+
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(server.snapshot))
@@ -4240,6 +4395,8 @@ class ServersTest(common.HeatTestCase):
'personality'] = {"/fake/path1": "a" * 10}
self.patchobject(nova.NovaClientPlugin, 'client',
return_value=self.fc)
+ self.patchobject(nova.NovaClientPlugin, 'is_version_supported',
+ return_value=False)
resource_defns = tmpl.resource_definitions(stack)
server = servers.Server('server_create_image_err',
resource_defns['WebServer'], stack)
@@ -4257,6 +4414,8 @@ class ServersTest(common.HeatTestCase):
'personality'] = {"/fake/path1": "a" * 10}
self.patchobject(nova.NovaClientPlugin, 'client',
return_value=self.fc)
+ self.patchobject(nova.NovaClientPlugin, 'is_version_supported',
+ return_value=False)
resource_defns = tmpl.resource_definitions(stack)
server = servers.Server('server_create_image_err',
resource_defns['WebServer'], stack)
@@ -4557,7 +4716,7 @@ class ServerInternalPortTest(ServersTest):
ex = self.assertRaises(exception.StackValidationFailed,
server._build_nics, networks)
self.assertEqual('Specified subnet 1234 does not belongs to '
- 'network 4321.', six.text_type(ex))
+ 'network 4321.', str(ex))
def test_build_nics_create_internal_port_all_props_without_extras(self):
tmpl = """
@@ -4917,7 +5076,6 @@ class ServerInternalPortTest(ServersTest):
server.client = mock.Mock()
server.client().servers.get.return_value = Fake()
server.client_plugin = mock.Mock()
- server.client_plugin().has_extension.return_value = True
server._data = {"internal_ports": '[{"id": "1122"}]',
"external_ports": '[{"id": "3344"},{"id": "5566"}]'}
@@ -4959,7 +5117,7 @@ class ServerInternalPortTest(ServersTest):
server.prepare_for_replace)
self.assertIn('Failed to detach interface (1122) from server '
'(ser-11)',
- six.text_type(exc))
+ str(exc))
def test_prepare_ports_for_replace(self):
t, stack, server = self._return_template_stack_and_rsrc_defn(
@@ -5134,7 +5292,7 @@ class ServerInternalPortTest(ServersTest):
server.restore_prev_rsrc)
self.assertIn('Failed to attach interface (3344) to server '
'(old_server)',
- six.text_type(exc))
+ str(exc))
@mock.patch.object(server_network_mixin.ServerNetworkMixin,
'store_external_ports')
@@ -5190,28 +5348,3 @@ class ServerInternalPortTest(ServersTest):
mock.call('prev_rsrc', 1122),
mock.call('prev_rsrc', 3344),
mock.call('prev_rsrc', 5566)])
-
- def test_store_external_ports_os_interface_not_installed(self):
- t, stack, server = self._return_template_stack_and_rsrc_defn(
- 'test', tmpl_server_with_network_id)
-
- class Fake(object):
- def interface_list(self):
- return [iface('1122'),
- iface('1122'),
- iface('2233'),
- iface('3344')]
-
- server.client = mock.Mock()
- server.client().servers.get.return_value = Fake()
- server.client_plugin = mock.Mock()
- server.client_plugin().has_extension.return_value = False
-
- server._data = {"internal_ports": '[{"id": "1122"}]',
- "external_ports": '[{"id": "3344"},{"id": "5566"}]'}
-
- iface = collections.namedtuple('iface', ['port_id'])
- update_data = self.patchobject(server, '_data_update_ports')
-
- server.store_external_ports()
- self.assertEqual(0, update_data.call_count)
diff --git a/heat/tests/openstack/nova/test_server_group.py b/heat/tests/openstack/nova/test_server_group.py
index a8129fa9e..0a1e682e0 100644
--- a/heat/tests/openstack/nova/test_server_group.py
+++ b/heat/tests/openstack/nova/test_server_group.py
@@ -12,11 +12,9 @@
# under the License.
import json
-
-import mock
+from unittest import mock
from heat.common import template_format
-from heat.engine.clients.os import nova
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
@@ -44,8 +42,6 @@ class FakeGroup(object):
class NovaServerGroupTest(common.HeatTestCase):
def setUp(self):
super(NovaServerGroupTest, self).setUp()
- self.patchobject(nova.NovaClientPlugin, 'has_extension',
- return_value=True)
def _init_template(self, sg_template):
template = template_format.parse(json.dumps(sg_template))
diff --git a/heat/tests/openstack/octavia/inline_templates.py b/heat/tests/openstack/octavia/inline_templates.py
index 29ed8f887..c6453bd1d 100644
--- a/heat/tests/openstack/octavia/inline_templates.py
+++ b/heat/tests/openstack/octavia/inline_templates.py
@@ -25,6 +25,7 @@ resources:
provider: octavia
tenant_id: 1234
admin_state_up: True
+ flavor: f123
'''
LISTENER_TEMPLATE = '''
@@ -47,6 +48,9 @@ resources:
- ref2
connection_limit: -1
tenant_id: 1234
+ allowed_cidrs:
+ - 10.10.0.0/16
+ - 192.168.0.0/16
'''
POOL_TEMPLATE = '''
@@ -132,3 +136,30 @@ resources:
value: test_value
invert: False
'''
+
+FLAVORPROFILE_TEMPLATE = '''
+heat_template_version: 2016-10-14
+description: Template to test FlavorProfile Octavia resource
+resources:
+ flavor_profile:
+ type: OS::Octavia::FlavorProfile
+ properties:
+ name: test_flavor_profile
+ provider_name: test_provider
+ flavor_data: |
+ {"flavor_data_key": "flavor_data_value"}
+'''
+
+
+FLAVOR_TEMPLATE = '''
+heat_template_version: 2016-10-14
+description: Template to test Flavor Octavia resource
+resources:
+ flavor:
+ type: OS::Octavia::Flavor
+ properties:
+ flavor_profile: test_flavor_profile_id
+ name: test_name
+ description: test_description
+ enabled: True
+'''
diff --git a/heat/tests/openstack/octavia/test_flavor.py b/heat/tests/openstack/octavia/test_flavor.py
new file mode 100644
index 000000000..3d5b9537d
--- /dev/null
+++ b/heat/tests/openstack/octavia/test_flavor.py
@@ -0,0 +1,95 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from heat.common import template_format
+from heat.tests import common
+from heat.tests.openstack.octavia import inline_templates
+from heat.tests import utils
+
+
+class FlavorTest(common.HeatTestCase):
+
+ def _create_stack(self, tmpl=inline_templates.FLAVOR_TEMPLATE):
+ self.t = template_format.parse(tmpl)
+ self.stack = utils.parse_stack(self.t)
+ self.flavor = self.stack['flavor']
+
+ self.octavia_client = mock.MagicMock()
+ self.flavor.client = mock.MagicMock()
+ self.flavor.client.return_value = self.octavia_client
+
+ self.flavor.client_plugin().client = mock.MagicMock(
+ return_value=self.octavia_client)
+ self.patchobject(self.flavor, 'physical_resource_name',
+ return_value='resource_name')
+
+ def test_create(self):
+ self._create_stack()
+ self.octavia_client.flavor_show.side_effect = [
+ {'flavor': {'id': 'f123'}}
+ ]
+ expected = {
+ 'flavor': {
+ 'name': 'test_name',
+ 'description': 'test_description',
+ 'flavor_profile_id': 'test_flavor_profile_id',
+ 'enabled': True,
+ }
+ }
+
+ self.flavor.handle_create()
+
+ self.octavia_client.flavor_create.assert_called_with(
+ json=expected)
+
+ def test_update(self):
+ self._create_stack()
+ self.flavor.resource_id_set('f123')
+ prop_diff = {
+ 'name': 'test_name2',
+ 'description': 'test_description2',
+ 'flavor_profile_id': 'test_flavor_profile_id2',
+ 'enabled': False,
+ }
+
+ self.flavor.handle_update(None, None, prop_diff)
+
+ self.octavia_client.flavor_set.assert_called_once_with(
+ 'f123', json={'flavor': prop_diff})
+
+ self.octavia_client.flavor_set.reset_mock()
+
+ # Updating a flavor with None as name should use
+ # physical_resource_name() as new name
+ prop_diff = {
+ 'name': None,
+ 'description': 'test_description3',
+ 'flavor_profile_id': 'test_flavor_profile_id3',
+ 'enabled': True,
+ }
+
+ self.flavor.handle_update(None, None, prop_diff)
+
+ self.assertEqual(prop_diff['name'], 'resource_name')
+ self.octavia_client.flavor_set.assert_called_once_with(
+ 'f123', json={'flavor': prop_diff})
+
+ def test_delete(self):
+ self._create_stack()
+ self.flavor.resource_id_set('f123')
+
+ self.flavor.handle_delete()
+
+ self.octavia_client.flavor_delete.assert_called_with('f123')
diff --git a/heat/tests/openstack/octavia/test_flavor_profile.py b/heat/tests/openstack/octavia/test_flavor_profile.py
new file mode 100644
index 000000000..4c0646a33
--- /dev/null
+++ b/heat/tests/openstack/octavia/test_flavor_profile.py
@@ -0,0 +1,92 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from heat.common import template_format
+from heat.tests import common
+from heat.tests.openstack.octavia import inline_templates
+from heat.tests import utils
+
+
+class FlavorProfileTest(common.HeatTestCase):
+
+ def _create_stack(self, tmpl=inline_templates.FLAVORPROFILE_TEMPLATE):
+ self.t = template_format.parse(tmpl)
+ self.stack = utils.parse_stack(self.t)
+ self.flavor_profile = self.stack['flavor_profile']
+
+ self.octavia_client = mock.MagicMock()
+ self.flavor_profile.client = mock.MagicMock()
+ self.flavor_profile.client.return_value = self.octavia_client
+
+ self.flavor_profile.client_plugin().client = mock.MagicMock(
+ return_value=self.octavia_client)
+ self.patchobject(self.flavor_profile, 'physical_resource_name',
+ return_value='resource_name')
+
+ def test_create(self):
+ self._create_stack()
+ self.octavia_client.flavorprofile_show.side_effect = [
+ {'flavorprofile': {'id': 'fp123'}}
+ ]
+ expected = {
+ 'flavorprofile': {
+ 'name': 'test_flavor_profile',
+ 'provider_name': 'test_provider',
+ 'flavor_data': '{"flavor_data_key": "flavor_data_value"}\n'
+ }
+ }
+
+ self.flavor_profile.handle_create()
+
+ self.octavia_client.flavorprofile_create.assert_called_with(
+ json=expected)
+
+ def test_update(self):
+ self._create_stack()
+ self.flavor_profile.resource_id_set('f123')
+ prop_diff = {
+ 'name': 'test_flavor_profile2',
+ 'provider_name': 'test_provider2',
+ 'flavor_data': '{"flavor_data_key2": "flavor_data_value2"}\n'
+ }
+
+ self.flavor_profile.handle_update(None, None, prop_diff)
+
+ self.octavia_client.flavorprofile_set.assert_called_once_with(
+ 'f123', json={'flavorprofile': prop_diff})
+
+ self.octavia_client.flavorprofile_set.reset_mock()
+
+ # Updating a flavor profile with None as name should use
+ # physical_resource_name() as new name
+ prop_diff = {
+ 'name': None,
+ 'provider_name': 'test_provider3',
+ 'flavor_data': '{"flavor_data_key3": "flavor_data_value3"}\n'
+ }
+
+ self.flavor_profile.handle_update(None, None, prop_diff)
+
+ self.assertEqual(prop_diff['name'], 'resource_name')
+ self.octavia_client.flavorprofile_set.assert_called_once_with(
+ 'f123', json={'flavorprofile': prop_diff})
+
+ def test_delete(self):
+ self._create_stack()
+ self.flavor_profile.resource_id_set('f123')
+
+ self.flavor_profile.handle_delete()
+
+ self.octavia_client.flavorprofile_delete.assert_called_with('f123')
diff --git a/heat/tests/openstack/octavia/test_health_monitor.py b/heat/tests/openstack/octavia/test_health_monitor.py
index 880473881..bbc4c3e18 100644
--- a/heat/tests/openstack/octavia/test_health_monitor.py
+++ b/heat/tests/openstack/octavia/test_health_monitor.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from osc_lib import exceptions
diff --git a/heat/tests/openstack/octavia/test_l7policy.py b/heat/tests/openstack/octavia/test_l7policy.py
index c66eff043..0628a94e5 100644
--- a/heat/tests/openstack/octavia/test_l7policy.py
+++ b/heat/tests/openstack/octavia/test_l7policy.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import yaml
from osc_lib import exceptions
diff --git a/heat/tests/openstack/octavia/test_l7rule.py b/heat/tests/openstack/octavia/test_l7rule.py
index 2d46d6fca..e5d1eb6b4 100644
--- a/heat/tests/openstack/octavia/test_l7rule.py
+++ b/heat/tests/openstack/octavia/test_l7rule.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import yaml
from osc_lib import exceptions
diff --git a/heat/tests/openstack/octavia/test_listener.py b/heat/tests/openstack/octavia/test_listener.py
index e4ddc85c7..bfafe29b1 100644
--- a/heat/tests/openstack/octavia/test_listener.py
+++ b/heat/tests/openstack/octavia/test_listener.py
@@ -12,7 +12,7 @@
# under the License.
-import mock
+from unittest import mock
import yaml
from osc_lib import exceptions
@@ -75,6 +75,7 @@ class ListenerTest(common.HeatTestCase):
'sni_container_refs': ['ref1', 'ref2'],
'connection_limit': -1,
'tenant_id': '1234',
+ 'allowed_cidrs': ['10.10.0.0/16', '192.168.0.0/16']
}
}
diff --git a/heat/tests/openstack/octavia/test_loadbalancer.py b/heat/tests/openstack/octavia/test_loadbalancer.py
index ad5b9c8b0..a93dda3f4 100644
--- a/heat/tests/openstack/octavia/test_loadbalancer.py
+++ b/heat/tests/openstack/octavia/test_loadbalancer.py
@@ -11,13 +11,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from neutronclient.neutron import v2_0 as neutronV20
from osc_lib import exceptions
from heat.common import exception
from heat.common import template_format
+from heat.engine.clients.os.octavia import OctaviaClientPlugin
from heat.engine.resources.openstack.octavia import loadbalancer
from heat.tests import common
from heat.tests.openstack.octavia import inline_templates
@@ -41,6 +42,8 @@ class LoadBalancerTest(common.HeatTestCase):
self.patchobject(neutronV20, 'find_resourceid_by_name_or_id',
return_value='123')
+ self.patchobject(OctaviaClientPlugin, 'get_flavor',
+ return_value='f123')
self.lb.client_plugin().client = mock.MagicMock(
return_value=self.octavia_client)
@@ -58,6 +61,7 @@ class LoadBalancerTest(common.HeatTestCase):
'provider': 'octavia',
'project_id': '1234',
'admin_state_up': True,
+ 'flavor_id': 'f123',
}
}
diff --git a/heat/tests/openstack/octavia/test_pool.py b/heat/tests/openstack/octavia/test_pool.py
index f4e74930f..1192d53d7 100644
--- a/heat/tests/openstack/octavia/test_pool.py
+++ b/heat/tests/openstack/octavia/test_pool.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import yaml
from osc_lib import exceptions
@@ -87,7 +87,8 @@ class PoolTest(common.HeatTestCase):
'listener_id': '123',
'loadbalancer_id': 'my_lb',
'protocol': 'HTTP',
- 'admin_state_up': True
+ 'admin_state_up': True,
+ 'tls_enabled': False,
}
}
diff --git a/heat/tests/openstack/octavia/test_pool_member.py b/heat/tests/openstack/octavia/test_pool_member.py
index a75b3e808..07ce9f075 100644
--- a/heat/tests/openstack/octavia/test_pool_member.py
+++ b/heat/tests/openstack/octavia/test_pool_member.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from neutronclient.neutron import v2_0 as neutronV20
from osc_lib import exceptions
diff --git a/heat/tests/openstack/octavia/test_quota.py b/heat/tests/openstack/octavia/test_quota.py
new file mode 100644
index 000000000..390ff3083
--- /dev/null
+++ b/heat/tests/openstack/octavia/test_quota.py
@@ -0,0 +1,141 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from unittest import mock
+
+import six
+
+from heat.common import exception
+from heat.common import template_format
+from heat.engine.clients.os import keystone as k_plugin
+from heat.engine import rsrc_defn
+from heat.engine import stack as parser
+from heat.engine import template
+from heat.tests import common
+from heat.tests import utils
+
+quota_template = '''
+heat_template_version: newton
+
+description: Sample octavia quota heat template
+
+resources:
+ my_quota:
+ type: OS::Octavia::Quota
+ properties:
+ project: demo
+ healthmonitor: 5
+ listener: 5
+ loadbalancer: 5
+ pool: 5
+ member: 5
+'''
+
+valid_properties = [
+ 'healthmonitor', 'listener', 'loadbalancer', 'pool', 'member'
+]
+
+
+class OcataQuotaTest(common.HeatTestCase):
+ def setUp(self):
+ super(OcataQuotaTest, self).setUp()
+
+ self.ctx = utils.dummy_context()
+ self.patchobject(k_plugin.KeystoneClientPlugin, 'get_project_id',
+ return_value='some_project_id')
+ tpl = template_format.parse(quota_template)
+ self.stack = parser.Stack(
+ self.ctx, 'ocata_quota_test_stack',
+ template.Template(tpl)
+ )
+
+ self.my_quota = self.stack['my_quota']
+ ocata = mock.MagicMock()
+ self.ocataclient = mock.MagicMock()
+ self.my_quota.client = ocata
+ ocata.return_value = self.ocataclient
+ self.quotas = self.ocataclient.quotas
+ self.quota_set = mock.MagicMock()
+ self.quotas.update.return_value = self.quota_set
+ self.quotas.delete.return_value = self.quota_set
+
+ def _test_validate(self, resource, error_msg):
+ exc = self.assertRaises(exception.StackValidationFailed,
+ resource.validate)
+ self.assertIn(error_msg, six.text_type(exc))
+
+ def _test_invalid_property(self, prop_name):
+ my_quota = self.stack['my_quota']
+ props = self.stack.t.t['resources']['my_quota']['properties'].copy()
+ props[prop_name] = -2
+ my_quota.t = my_quota.t.freeze(properties=props)
+ my_quota.reparse()
+ error_msg = ('Property error: resources.my_quota.properties.%s:'
+ ' -2 is out of range (min: -1, max: None)' % prop_name)
+ self._test_validate(my_quota, error_msg)
+
+ def test_invalid_properties(self):
+ for prop in valid_properties:
+ self._test_invalid_property(prop)
+
+ def test_miss_all_quotas(self):
+ my_quota = self.stack['my_quota']
+ props = self.stack.t.t['resources']['my_quota']['properties'].copy()
+ for key in valid_properties:
+ if key in props:
+ del props[key]
+ my_quota.t = my_quota.t.freeze(properties=props)
+ my_quota.reparse()
+ msg = ('At least one of the following properties must be specified: '
+ 'healthmonitor, listener, loadbalancer, member, pool.')
+ self.assertRaisesRegex(exception.PropertyUnspecifiedError, msg,
+ my_quota.validate)
+
+ def test_quota_handle_create(self):
+ self.my_quota.physical_resource_name = mock.MagicMock(
+ return_value='some_resource_id')
+ self.my_quota.reparse()
+ self.my_quota.handle_create()
+ self.quotas.update.assert_called_once_with(
+ 'some_project_id',
+ healthmonitor=5,
+ listener=5,
+ loadbalancer=5,
+ pool=5,
+ member=5
+ )
+ self.assertEqual('some_resource_id', self.my_quota.resource_id)
+
+ def test_quota_handle_update(self):
+ tmpl_diff = mock.MagicMock()
+ prop_diff = mock.MagicMock()
+ props = {'project': 'some_project_id', 'pool': 1, 'member': 2,
+ 'listener': 3, 'loadbalancer': 4, 'healthmonitor': 2}
+ json_snippet = rsrc_defn.ResourceDefinition(
+ self.my_quota.name,
+ 'OS::Octavia::Quota',
+ properties=props)
+ self.my_quota.reparse()
+ self.my_quota.handle_update(json_snippet, tmpl_diff, prop_diff)
+ self.quotas.update.assert_called_once_with(
+ 'some_project_id',
+ pool=1,
+ member=2,
+ listener=3,
+ loadbalancer=4,
+ healthmonitor=2
+ )
+
+ def test_quota_handle_delete(self):
+ self.my_quota.reparse()
+ self.my_quota.handle_delete()
+ self.quotas.delete.assert_called_once_with('some_project_id')
diff --git a/heat/tests/openstack/sahara/test_cluster.py b/heat/tests/openstack/sahara/test_cluster.py
index f12d46f25..d137984e9 100644
--- a/heat/tests/openstack/sahara/test_cluster.py
+++ b/heat/tests/openstack/sahara/test_cluster.py
@@ -13,9 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
-import six
from heat.common import exception
from heat.common import template_format
@@ -147,7 +147,7 @@ class SaharaClusterTest(common.HeatTestCase):
ex = self.assertRaises(exception.ResourceFailure, create_task)
expected = ('ResourceInError: resources.super-cluster: '
'Went to status Error due to "Unknown"')
- self.assertEqual(expected, six.text_type(ex))
+ self.assertEqual(expected, str(ex))
def test_cluster_check_delete_complete_error(self):
cluster = self._create_cluster(self.t)
@@ -158,7 +158,7 @@ class SaharaClusterTest(common.HeatTestCase):
delete_task = scheduler.TaskRunner(cluster.delete)
ex = self.assertRaises(exception.ResourceFailure, delete_task)
expected = "APIException: resources.super-cluster: None"
- self.assertEqual(expected, six.text_type(ex))
+ self.assertEqual(expected, str(ex))
self.cl_mgr.delete.assert_called_once_with(self.fake_cl.id)
self.assertEqual(2, self.cl_mgr.get.call_count)
@@ -172,7 +172,7 @@ class SaharaClusterTest(common.HeatTestCase):
ex = self.assertRaises(exception.ResourceFailure, delete_task)
expected = ('ResourceInError: resources.super-cluster: '
'Went to status Error due to "Unknown"')
- self.assertEqual(expected, six.text_type(ex))
+ self.assertEqual(expected, str(ex))
self.cl_mgr.delete.assert_called_once_with(self.fake_cl.id)
self.assertEqual(2, self.cl_mgr.get.call_count)
@@ -198,7 +198,7 @@ class SaharaClusterTest(common.HeatTestCase):
self.assertIn("default_image_id must be provided: "
"Referenced cluster template some_cluster_template_id "
"has no default_image_id defined.",
- six.text_type(ex.message))
+ str(ex.message))
def test_cluster_validate_no_network_on_neutron_fails(self):
self.t['resources']['super-cluster']['properties'].pop(
@@ -208,7 +208,7 @@ class SaharaClusterTest(common.HeatTestCase):
cluster.validate)
error_msg = ('Property error: resources.super-cluster.properties: '
'Property neutron_management_network not assigned')
- self.assertEqual(error_msg, six.text_type(ex))
+ self.assertEqual(error_msg, str(ex))
def test_deprecated_properties_correctly_translates(self):
tmpl = '''
diff --git a/heat/tests/openstack/sahara/test_data_source.py b/heat/tests/openstack/sahara/test_data_source.py
index 4da071b42..3417ee706 100644
--- a/heat/tests/openstack/sahara/test_data_source.py
+++ b/heat/tests/openstack/sahara/test_data_source.py
@@ -12,8 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
@@ -109,4 +108,4 @@ class SaharaDataSourceTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed, ds.validate)
error_msg = ('Property error: resources.data-source.properties.'
'credentials: Property user not assigned')
- self.assertEqual(error_msg, six.text_type(ex))
+ self.assertEqual(error_msg, str(ex))
diff --git a/heat/tests/openstack/sahara/test_image.py b/heat/tests/openstack/sahara/test_image.py
index 5da7a17eb..a7b00d896 100644
--- a/heat/tests/openstack/sahara/test_image.py
+++ b/heat/tests/openstack/sahara/test_image.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
+from unittest import mock
from heat.common import template_format
from heat.engine.clients.os import glance
diff --git a/heat/tests/openstack/sahara/test_job.py b/heat/tests/openstack/sahara/test_job.py
index 0566b8f45..4d8234b18 100644
--- a/heat/tests/openstack/sahara/test_job.py
+++ b/heat/tests/openstack/sahara/test_job.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
+from unittest import mock
from heat.common import template_format
from heat.engine.clients.os import sahara
diff --git a/heat/tests/openstack/sahara/test_job_binary.py b/heat/tests/openstack/sahara/test_job_binary.py
index 97fa903bd..ed9ba8142 100644
--- a/heat/tests/openstack/sahara/test_job_binary.py
+++ b/heat/tests/openstack/sahara/test_job_binary.py
@@ -12,8 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
@@ -122,7 +121,7 @@ class SaharaJobBinaryTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed, jb.validate)
error_msg = ('resources.job-binary.properties: internal-db://38273f82 '
'is not a valid job location.')
- self.assertEqual(error_msg, six.text_type(ex))
+ self.assertEqual(error_msg, str(ex))
def test_validate_password_without_user(self):
props = self.stack.t.t['resources']['job-binary']['properties'].copy()
@@ -132,4 +131,4 @@ class SaharaJobBinaryTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed, jb.validate)
error_msg = ('Property error: resources.job-binary.properties.'
'credentials: Property user not assigned')
- self.assertEqual(error_msg, six.text_type(ex))
+ self.assertEqual(error_msg, str(ex))
diff --git a/heat/tests/openstack/sahara/test_templates.py b/heat/tests/openstack/sahara/test_templates.py
index 952ffa37a..774e52fb7 100644
--- a/heat/tests/openstack/sahara/test_templates.py
+++ b/heat/tests/openstack/sahara/test_templates.py
@@ -13,8 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
@@ -182,10 +181,10 @@ class SaharaNodeGroupTemplateTest(common.HeatTestCase):
]
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual('Too many',
- six.text_type(ex))
+ str(ex))
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual('Not found',
- six.text_type(ex))
+ str(ex))
def test_validate_flavor_constraint_return_false(self):
self.t['resources']['node-group']['properties'].pop('floating_ip_pool')
@@ -197,7 +196,7 @@ class SaharaNodeGroupTemplateTest(common.HeatTestCase):
self.assertEqual(u"Property error: "
u"resources.node-group.properties.flavor: "
u"Error validating value 'm1.large'",
- six.text_type(ex))
+ str(ex))
def test_template_invalid_name(self):
tmpl = template_format.parse(node_group_template_without_name)
@@ -228,11 +227,11 @@ class SaharaNodeGroupTemplateTest(common.HeatTestCase):
self.assertIn("resources.node-group.properties: Plugin vanilla "
"doesn't support the following node processes: "
"jobtracker. Allowed processes are: ",
- six.text_type(ex))
- self.assertIn("namenode", six.text_type(ex))
- self.assertIn("datanode", six.text_type(ex))
- self.assertIn("secondarynamenode", six.text_type(ex))
- self.assertIn("oozie", six.text_type(ex))
+ str(ex))
+ self.assertIn("namenode", str(ex))
+ self.assertIn("datanode", str(ex))
+ self.assertIn("secondarynamenode", str(ex))
+ self.assertIn("oozie", str(ex))
def test_update(self):
ngt = self._create_ngt(self.t)
@@ -374,7 +373,7 @@ class SaharaClusterTemplateTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
ct.validate)
self.assertEqual("neutron_management_network must be provided",
- six.text_type(ex))
+ str(ex))
def test_template_invalid_name(self):
tmpl = template_format.parse(cluster_template_without_name)
diff --git a/heat/tests/openstack/senlin/test_cluster.py b/heat/tests/openstack/senlin/test_cluster.py
index 88656df44..32b39fb54 100644
--- a/heat/tests/openstack/senlin/test_cluster.py
+++ b/heat/tests/openstack/senlin/test_cluster.py
@@ -14,9 +14,9 @@
# limitations under the License.
import copy
-import mock
+from unittest import mock
+
from oslo_config import cfg
-import six
from heat.common import exception
from heat.common import template_format
@@ -122,6 +122,8 @@ class SenlinClusterTest(common.HeatTestCase):
self.assertEqual((cluster.CREATE, cluster.COMPLETE),
cluster.state)
self.assertEqual(self.fake_cl.id, cluster.resource_id)
+ self.assertEqual(1, self.senlin_mock.get_action.call_count)
+ self.assertEqual(1, self.senlin_mock.get_cluster.call_count)
return cluster
def test_cluster_create_success(self):
@@ -149,18 +151,18 @@ class SenlinClusterTest(common.HeatTestCase):
cfg.CONF.set_override('action_retry_limit', 0)
cluster = self._init_cluster(self.t)
self.senlin_mock.create_cluster.return_value = self.fake_cl
- mock_action = mock.MagicMock()
- mock_action.status = 'FAILED'
- mock_action.status_reason = 'oops'
+ mock_cluster = mock.MagicMock()
+ mock_cluster.status = 'ERROR'
+ mock_cluster.status_reason = 'oops'
self.senlin_mock.get_policy.return_value = mock.Mock(
id='fake_policy_id'
)
- self.senlin_mock.get_action.return_value = mock_action
+ self.senlin_mock.get_cluster.return_value = mock_cluster
create_task = scheduler.TaskRunner(cluster.create)
ex = self.assertRaises(exception.ResourceFailure, create_task)
expected = ('ResourceInError: resources.senlin-cluster: '
- 'Went to status FAILED due to "oops"')
- self.assertEqual(expected, six.text_type(ex))
+ 'Went to status ERROR due to "oops"')
+ self.assertEqual(expected, str(ex))
def test_cluster_delete_success(self):
cluster = self._create_cluster(self.t)
@@ -177,7 +179,7 @@ class SenlinClusterTest(common.HeatTestCase):
delete_task = scheduler.TaskRunner(cluster.delete)
ex = self.assertRaises(exception.ResourceFailure, delete_task)
expected = 'Error: resources.senlin-cluster: oops'
- self.assertEqual(expected, six.text_type(ex))
+ self.assertEqual(expected, str(ex))
def test_cluster_update_profile(self):
cluster = self._create_cluster(self.t)
@@ -205,7 +207,7 @@ class SenlinClusterTest(common.HeatTestCase):
}
self.senlin_mock.update_cluster.assert_called_once_with(
cluster=self.fake_cl, **cluster_update_kwargs)
- self.assertEqual(3, self.senlin_mock.get_action.call_count)
+ self.assertEqual(2, self.senlin_mock.get_action.call_count)
def test_cluster_update_desire_capacity(self):
cluster = self._create_cluster(self.t)
@@ -226,7 +228,7 @@ class SenlinClusterTest(common.HeatTestCase):
}
self.senlin_mock.cluster_resize.assert_called_once_with(
cluster=cluster.resource_id, **cluster_resize_kwargs)
- self.assertEqual(3, self.senlin_mock.get_action.call_count)
+ self.assertEqual(2, self.senlin_mock.get_action.call_count)
def test_cluster_update_policy_add_remove(self):
cluster = self._create_cluster(self.t)
@@ -259,7 +261,7 @@ class SenlinClusterTest(common.HeatTestCase):
self.senlin_mock.cluster_detach_policy.assert_called_once_with(
**detach_policy_kwargs)
self.assertEqual(0, self.senlin_mock.cluster_update_policy.call_count)
- self.assertEqual(4, self.senlin_mock.get_action.call_count)
+ self.assertEqual(3, self.senlin_mock.get_action.call_count)
def test_cluster_update_policy_exists(self):
cluster = self._create_cluster(self.t)
@@ -300,7 +302,7 @@ class SenlinClusterTest(common.HeatTestCase):
scheduler.TaskRunner(cluster.update, update_snippet))
self.assertEqual('ResourceInError: resources.senlin-cluster: '
'Went to status FAILED due to "Unknown"',
- six.text_type(exc))
+ str(exc))
def test_cluster_get_attr_collect(self):
cluster = self._create_cluster(self.t)
@@ -370,7 +372,7 @@ class TestSenlinClusterValidation(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
stack['senlin-cluster'].validate)
self.assertEqual('min_size can not be greater than max_size',
- six.text_type(ex))
+ str(ex))
def test_invalid_desired_capacity(self):
self.t['resources']['senlin-cluster']['properties']['min_size'] = 1
@@ -382,5 +384,5 @@ class TestSenlinClusterValidation(common.HeatTestCase):
stack['senlin-cluster'].validate)
self.assertEqual(
'desired_capacity must be between min_size and max_size',
- six.text_type(ex)
+ str(ex)
)
diff --git a/heat/tests/openstack/senlin/test_node.py b/heat/tests/openstack/senlin/test_node.py
index 3966fb265..108d737ea 100644
--- a/heat/tests/openstack/senlin/test_node.py
+++ b/heat/tests/openstack/senlin/test_node.py
@@ -13,9 +13,9 @@
# limitations under the License.
import copy
-import mock
+from unittest import mock
+
from oslo_config import cfg
-import six
from heat.common import exception
from heat.common import template_format
@@ -122,7 +122,7 @@ class SenlinNodeTest(common.HeatTestCase):
ex = self.assertRaises(exception.ResourceFailure, create_task)
expected = ('ResourceInError: resources.senlin-node: '
'Went to status FAILED due to "oops"')
- self.assertEqual(expected, six.text_type(ex))
+ self.assertEqual(expected, str(ex))
def test_node_delete_success(self):
node = self._create_node()
@@ -139,7 +139,7 @@ class SenlinNodeTest(common.HeatTestCase):
delete_task = scheduler.TaskRunner(node.delete)
ex = self.assertRaises(exception.ResourceFailure, delete_task)
expected = 'Error: resources.senlin-node: oops'
- self.assertEqual(expected, six.text_type(ex))
+ self.assertEqual(expected, str(ex))
def test_node_update_profile(self):
node = self._create_node()
@@ -208,7 +208,7 @@ class SenlinNodeTest(common.HeatTestCase):
ex = self.assertRaises(exception.ResourceFailure, update_task)
expected = ('ResourceInError: resources.senlin-node: Went to '
'status FAILED due to "oops"')
- self.assertEqual(expected, six.text_type(ex))
+ self.assertEqual(expected, str(ex))
self.assertEqual((node.UPDATE, node.FAILED), node.state)
self.assertEqual(2, self.senlin_mock.get_action.call_count)
diff --git a/heat/tests/openstack/senlin/test_policy.py b/heat/tests/openstack/senlin/test_policy.py
index b8795550b..5763cc561 100644
--- a/heat/tests/openstack/senlin/test_policy.py
+++ b/heat/tests/openstack/senlin/test_policy.py
@@ -13,8 +13,9 @@
# limitations under the License.
import copy
-import mock
+from unittest import mock
+from openstack.clustering.v1._proxy import Proxy
from openstack import exceptions
from oslo_config import cfg
@@ -71,7 +72,7 @@ class SenlinPolicyTest(common.HeatTestCase):
return_value=True)
self.patchobject(senlin.PolicyTypeConstraint, 'validate',
return_value=True)
- self.senlin_mock = mock.MagicMock()
+ self.senlin_mock = mock.MagicMock(spec=Proxy)
self.senlin_mock.get_cluster.return_value = mock.Mock(
id='c1_id')
self.patchobject(policy.Policy, 'client',
@@ -89,7 +90,7 @@ class SenlinPolicyTest(common.HeatTestCase):
def _create_policy(self, template):
policy = self._init_policy(template)
self.senlin_mock.create_policy.return_value = self.fake_p
- self.senlin_mock.cluster_attach_policy.return_value = {
+ self.senlin_mock.attach_policy_to_cluster.return_value = {
'action': 'fake_action'}
self.senlin_mock.get_action.return_value = mock.Mock(
status='SUCCEEDED')
@@ -97,7 +98,7 @@ class SenlinPolicyTest(common.HeatTestCase):
self.assertEqual((policy.CREATE, policy.COMPLETE),
policy.state)
self.assertEqual(self.fake_p.id, policy.resource_id)
- self.senlin_mock.cluster_attach_policy.assert_called_once_with(
+ self.senlin_mock.attach_policy_to_cluster.assert_called_once_with(
'c1_id', policy.resource_id, enabled=True)
self.senlin_mock.get_action.assert_called_once_with('fake_action')
return policy
@@ -115,7 +116,7 @@ class SenlinPolicyTest(common.HeatTestCase):
cfg.CONF.set_override('action_retry_limit', 0)
policy = self._init_policy(self.t)
self.senlin_mock.create_policy.return_value = self.fake_p
- self.senlin_mock.cluster_attach_policy.return_value = {
+ self.senlin_mock.attach_policy_to_cluster.return_value = {
'action': 'fake_action'}
self.senlin_mock.get_action.return_value = mock.Mock(
status='FAILED', status_reason='oops',
@@ -130,14 +131,14 @@ class SenlinPolicyTest(common.HeatTestCase):
self.assertEqual(err_msg, policy.status_reason)
def test_policy_delete_not_found(self):
- self.senlin_mock.cluster_detach_policy.return_value = {
+ self.senlin_mock.detach_policy_from_cluster.return_value = {
'action': 'fake_action'}
policy = self._create_policy(self.t)
self.senlin_mock.get_policy.side_effect = [
exceptions.ResourceNotFound('SenlinPolicy'),
]
scheduler.TaskRunner(policy.delete)()
- self.senlin_mock.cluster_detach_policy.assert_called_once_with(
+ self.senlin_mock.detach_policy_from_cluster.assert_called_once_with(
'c1_id', policy.resource_id)
self.senlin_mock.delete_policy.assert_called_once_with(
policy.resource_id)
@@ -147,11 +148,11 @@ class SenlinPolicyTest(common.HeatTestCase):
self.senlin_mock.get_policy.side_effect = [
exceptions.ResourceNotFound('SenlinPolicy'),
]
- self.senlin_mock.cluster_detach_policy.side_effect = [
+ self.senlin_mock.detach_policy_from_cluster.side_effect = [
exceptions.HttpException(http_status=400),
]
scheduler.TaskRunner(policy.delete)()
- self.senlin_mock.cluster_detach_policy.assert_called_once_with(
+ self.senlin_mock.detach_policy_from_cluster.assert_called_once_with(
'c1_id', policy.resource_id)
self.senlin_mock.delete_policy.assert_called_once_with(
policy.resource_id)
@@ -170,18 +171,18 @@ class SenlinPolicyTest(common.HeatTestCase):
props['name'] = 'new_name'
rsrc_defns = template.Template(new_t).resource_definitions(self.stack)
new_cluster = rsrc_defns['senlin-policy']
- self.senlin_mock.cluster_attach_policy.return_value = {
+ self.senlin_mock.attach_policy_to_cluster.return_value = {
'action': 'fake_action1'}
- self.senlin_mock.cluster_detach_policy.return_value = {
+ self.senlin_mock.detach_policy_from_cluster.return_value = {
'action': 'fake_action2'}
self.senlin_mock.get_policy.return_value = self.fake_p
scheduler.TaskRunner(policy.update, new_cluster)()
self.assertEqual((policy.UPDATE, policy.COMPLETE), policy.state)
self.senlin_mock.update_policy.assert_called_once_with(
self.fake_p, name='new_name')
- self.senlin_mock.cluster_detach_policy.assert_called_once_with(
+ self.senlin_mock.detach_policy_from_cluster.assert_called_once_with(
'c1_id', policy.resource_id)
- self.senlin_mock.cluster_attach_policy.assert_called_with(
+ self.senlin_mock.attach_policy_to_cluster.assert_called_with(
'c2_id', policy.resource_id, enabled=True)
def test_policy_resolve_attribute(self):
diff --git a/heat/tests/openstack/senlin/test_profile.py b/heat/tests/openstack/senlin/test_profile.py
index 2a0001a9d..c3eb6b869 100644
--- a/heat/tests/openstack/senlin/test_profile.py
+++ b/heat/tests/openstack/senlin/test_profile.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
+from unittest import mock
from heat.common import template_format
from heat.engine.clients.os import senlin
diff --git a/heat/tests/openstack/senlin/test_receiver.py b/heat/tests/openstack/senlin/test_receiver.py
index 138de4c6b..5685424b2 100644
--- a/heat/tests/openstack/senlin/test_receiver.py
+++ b/heat/tests/openstack/senlin/test_receiver.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
+from unittest import mock
from openstack import exceptions
diff --git a/heat/tests/openstack/swift/test_container.py b/heat/tests/openstack/swift/test_container.py
index a91baba03..82f05b13a 100644
--- a/heat/tests/openstack/swift/test_container.py
+++ b/heat/tests/openstack/swift/test_container.py
@@ -11,9 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
-import mock
-import six
import swiftclient.client as sc
from heat.common import exception
@@ -304,7 +303,7 @@ class SwiftTest(common.HeatTestCase):
container.state)
self.assertIn('ResourceActionNotSupported: resources.test_resource: '
'Deleting non-empty container',
- six.text_type(ex))
+ str(ex))
mock_put.assert_called_once_with(container_name, {})
mock_get.assert_called_once_with(container_name)
@@ -453,7 +452,7 @@ class SwiftTest(common.HeatTestCase):
ex = self.assertRaises(exception.ResourceFailure, runner)
# Verify
- self.assertIn('boom', six.text_type(ex))
+ self.assertIn('boom', str(ex))
self.assertEqual((container.CHECK, container.FAILED),
container.state)
diff --git a/heat/tests/openstack/trove/test_cluster.py b/heat/tests/openstack/trove/test_cluster.py
index 1480bab28..e2e32c1ed 100644
--- a/heat/tests/openstack/trove/test_cluster.py
+++ b/heat/tests/openstack/trove/test_cluster.py
@@ -12,8 +12,8 @@
# under the License.
import copy
-import mock
-import six
+from unittest import mock
+
from troveclient import exceptions as troveexc
from heat.common import exception
@@ -210,7 +210,7 @@ class TroveClusterTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed, tc.validate)
error_msg = ('Datastore version 2.6.2 for datastore type mongodb is '
'not valid. Allowed versions are 2.6.1.')
- self.assertEqual(error_msg, six.text_type(ex))
+ self.assertEqual(error_msg, str(ex))
def test_validate_invalid_flavor(self):
self.troveclient.flavors.get.side_effect = troveexc.NotFound()
@@ -223,4 +223,4 @@ class TroveClusterTest(common.HeatTestCase):
error_msg = ("Property error: "
"resources.cluster.properties.instances[0].flavor: "
"Error validating value 'm1.small': Not Found (HTTP 404)")
- self.assertEqual(error_msg, six.text_type(ex))
+ self.assertEqual(error_msg, str(ex))
diff --git a/heat/tests/openstack/trove/test_instance.py b/heat/tests/openstack/trove/test_instance.py
index 6653c4af6..bc4d024d2 100644
--- a/heat/tests/openstack/trove/test_instance.py
+++ b/heat/tests/openstack/trove/test_instance.py
@@ -11,11 +11,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
import uuid
-import mock
from oslo_config import cfg
-import six
from troveclient import exceptions as troveexc
from troveclient.v1 import users
@@ -180,7 +179,7 @@ class InstanceTest(common.HeatTestCase):
exc = self.assertRaises(exception.ResourceInError,
osdb_res.check_create_complete,
mock_input)
- self.assertIn(error_string, six.text_type(exc))
+ self.assertIn(error_string, str(exc))
mock_input = mock.Mock()
mock_input.status = 'FAILED'
@@ -193,7 +192,7 @@ class InstanceTest(common.HeatTestCase):
exc = self.assertRaises(exception.ResourceInError,
osdb_res.check_create_complete,
mock_input)
- self.assertIn(error_string, six.text_type(exc))
+ self.assertIn(error_string, str(exc))
# test if error string is not defined
@@ -205,7 +204,7 @@ class InstanceTest(common.HeatTestCase):
exc = self.assertRaises(exception.ResourceInError,
osdb_res.check_create_complete,
mock_input)
- self.assertIn(error_string, six.text_type(exc))
+ self.assertIn(error_string, str(exc))
def _create_failed_bad_status(self, status, error_message):
t = template_format.parse(db_template)
@@ -216,7 +215,7 @@ class InstanceTest(common.HeatTestCase):
ex = self.assertRaises(exception.ResourceInError,
instance.check_create_complete,
self.fake_instance.id)
- self.assertIn(error_message, six.text_type(ex))
+ self.assertIn(error_message, str(ex))
def test_create_failed_status_error(self):
self._create_failed_bad_status(
@@ -294,7 +293,7 @@ class InstanceTest(common.HeatTestCase):
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.check))
- self.assertIn('FOOBAR', six.text_type(exc))
+ self.assertIn('FOOBAR', str(exc))
self.assertEqual((res.CHECK, res.FAILED), res.state)
# return previous status
self.fake_instance.status = 'ACTIVE'
@@ -366,7 +365,7 @@ class InstanceTest(common.HeatTestCase):
instance.validate)
self.assertEqual("Database ['invaliddb'] specified for user does not "
"exist in databases for resource MySqlCloudDB.",
- six.text_type(ex))
+ str(ex))
def test_instance_validation_db_name_hyphens(self):
t = template_format.parse(db_template)
@@ -400,7 +399,7 @@ class InstanceTest(common.HeatTestCase):
instance.validate)
self.assertEqual('Databases property is required if users property '
'is provided for resource MySqlCloudDB.',
- six.text_type(ex))
+ str(ex))
def test_instance_validation_user_no_db(self):
t = template_format.parse(db_template)
@@ -415,7 +414,7 @@ class InstanceTest(common.HeatTestCase):
self.assertEqual('Property error: '
'Resources.MySqlCloudDB.Properties.'
'users[0].databases: length (0) is out of range '
- '(min: 1, max: None)', six.text_type(ex))
+ '(min: 1, max: None)', str(ex))
def test_instance_validation_no_datastore_yes_version(self):
t = template_format.parse(db_template)
@@ -424,7 +423,7 @@ class InstanceTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
instance.validate)
exp_msg = "Not allowed - datastore_version without datastore_type."
- self.assertEqual(exp_msg, six.text_type(ex))
+ self.assertEqual(exp_msg, str(ex))
def test_instance_validation_no_ds_version(self):
t = template_format.parse(db_template)
@@ -449,7 +448,7 @@ class InstanceTest(common.HeatTestCase):
expected_msg = ("Datastore version SomeVersion for datastore type "
"mysql is not valid. "
"Allowed versions are MariaDB-5.5.")
- self.assertEqual(expected_msg, six.text_type(ex))
+ self.assertEqual(expected_msg, str(ex))
def test_instance_validation_implicit_version(self):
t = template_format.parse(db_template)
@@ -475,7 +474,7 @@ class InstanceTest(common.HeatTestCase):
ex = self.assertRaises(
exception.StackValidationFailed, instance.validate)
self.assertEqual('Either network or port must be provided.',
- six.text_type(ex))
+ str(ex))
def test_instance_validation_no_net_no_port_fail(self):
t = template_format.parse(db_template)
@@ -489,7 +488,7 @@ class InstanceTest(common.HeatTestCase):
ex = self.assertRaises(
exception.StackValidationFailed, instance.validate)
self.assertEqual('Either network or port must be provided.',
- six.text_type(ex))
+ str(ex))
def test_instance_validation_nic_port_on_novanet_fails(self):
t = template_format.parse(db_template)
@@ -503,7 +502,7 @@ class InstanceTest(common.HeatTestCase):
ex = self.assertRaises(
exception.StackValidationFailed, instance.validate)
self.assertEqual('Can not use port property on Nova-network.',
- six.text_type(ex))
+ str(ex))
def test_instance_create_with_port(self):
t = template_format.parse(db_template_with_nics)
@@ -717,7 +716,7 @@ class InstanceUpdateTests(common.HeatTestCase):
trove.check_update_complete,
{"foo": "bar"})
msg = "The last operation for the database instance failed"
- self.assertIn(msg, six.text_type(exc))
+ self.assertIn(msg, str(exc))
def test_check_client_exceptions(self, mock_client, mock_plugin):
mock_instance = mock.Mock(status="ACTIVE")
diff --git a/heat/tests/openstack/vitrage/__init__.py b/heat/tests/openstack/vitrage/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/heat/tests/openstack/vitrage/__init__.py
diff --git a/heat/tests/openstack/vitrage/test_vitrage_template.py b/heat/tests/openstack/vitrage/test_vitrage_template.py
new file mode 100644
index 000000000..7e83fbb4f
--- /dev/null
+++ b/heat/tests/openstack/vitrage/test_vitrage_template.py
@@ -0,0 +1,183 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from heat.common import exception
+from heat.common import template_format
+from heat.engine.clients.os import mistral as mistral_client
+from heat.engine.resources.openstack.vitrage.vitrage_template import \
+ VitrageTemplate
+from heat.engine import scheduler
+from heat.tests import common
+from heat.tests import utils
+
+
+vitrage_template = '''
+heat_template_version: rocky
+resources:
+ execute_healing:
+ type: OS::Vitrage::Template
+ description: Execute Mistral healing workflow if instance is down
+ properties:
+ template_file: execute_healing_on_instance_down.yaml
+ template_params:
+ instance_alarm_name: Instance down
+ instance_id: 1233e48c-62ee-470e-8d4a-adff30211b5d
+ workflow_name: autoheal
+ heat_stack_id: 12cc6d3e-f801-4422-b2a0-43cedacb4eb5
+
+'''
+
+
+class TestVitrageTemplate(common.HeatTestCase):
+ def setUp(self):
+ super(TestVitrageTemplate, self).setUp()
+ self.ctx = utils.dummy_context()
+ template = template_format.parse(vitrage_template)
+ self.stack = utils.parse_stack(template, stack_name='test_stack')
+
+ resource_defs = self.stack.t.resource_definitions(self.stack)
+ self.resource_def = resource_defs['execute_healing']
+
+ self.vitrage = mock.Mock()
+ self.patchobject(VitrageTemplate, 'client', return_value=self.vitrage)
+
+ self.patches = []
+ self.patches.append(mock.patch.object(mistral_client,
+ 'mistral_base'))
+ self.patches.append(mock.patch.object(
+ mistral_client.MistralClientPlugin, '_create'))
+ for patch in self.patches:
+ patch.start()
+
+ self.mistral_client = \
+ mistral_client.MistralClientPlugin(context=self.ctx)
+
+ def tearDown(self):
+ super(TestVitrageTemplate, self).tearDown()
+ for patch in self.patches:
+ patch.stop()
+
+ def test_create(self):
+ template = self._create_resource(
+ 'execute_healing', self.resource_def, self.stack)
+ expected_state = (template.CREATE, template.COMPLETE)
+
+ # Verify the creation succeeded
+ self.assertEqual(expected_state, template.state)
+ self.assertEqual('2fddb683-e32c-4a9b-b8c8-df59af1f5a1a',
+ template.get_reference_id())
+
+ def test_validate(self):
+ self.vitrage.template.validate.return_value = {
+ "results": [
+ {
+ "status": "validation OK",
+ "file path": "/tmp/tmpNUEgE3",
+ "message": "Template validation is OK",
+ "status code": 0,
+ "description": "Template validation"
+ }
+ ]
+ }
+
+ # No result for a valid template
+ template = \
+ VitrageTemplate('execute_healing', self.resource_def, self.stack)
+ scheduler.TaskRunner(template.validate)()
+ self.vitrage.template.validate.assert_called_once()
+
+ def test_validate_vitrage_validate_wrong_format(self):
+ """wrong result format for vitrage templete validate"""
+ template = \
+ VitrageTemplate('execute_healing', self.resource_def, self.stack)
+
+ # empty return value
+ self.vitrage.template.validate.return_value = {}
+ self.assertRaises(exception.StackValidationFailed,
+ scheduler.TaskRunner(template.validate))
+
+ # empty 'results'
+ self.vitrage.template.validate.return_value = {
+ "results": []
+ }
+ self.assertRaises(exception.StackValidationFailed,
+ scheduler.TaskRunner(template.validate))
+
+ # too many 'results'
+ self.vitrage.template.validate.return_value = {
+ "results": [
+ {
+ "status": "validation OK",
+ "file path": "/tmp/tmpNUEgE3",
+ "message": "Template validation is OK",
+ "status code": 0,
+ "description": "Template validation"
+ },
+ {
+ "status": "validation OK",
+ "file path": "/tmp/tmpNUEgE3",
+ "message": "Template validation is OK",
+ "status code": 0,
+ "description": "Template validation"
+ },
+ ]
+ }
+ self.assertRaises(exception.StackValidationFailed,
+ scheduler.TaskRunner(template.validate))
+
+ # no 'status code'
+ self.vitrage.template.validate.return_value = {
+ "results": [
+ {
+ "status": "validation OK",
+ "file path": "/tmp/tmpNUEgE3",
+ "message": "Template validation is OK",
+ "description": "Template validation"
+ }
+ ]
+ }
+ self.assertRaises(exception.StackValidationFailed,
+ scheduler.TaskRunner(template.validate))
+
+ def test_validate_vitrage_validation_failed(self):
+ template = \
+ VitrageTemplate('execute_healing', self.resource_def, self.stack)
+
+ self.vitrage.template.validate.return_value = {
+ "results": [
+ {
+ "status": "validation failed",
+ "file path": "/tmp/tmpNUEgE3",
+ "status code": 163,
+ "message": "Failed to resolve parameter",
+ "description": "Template content validation"
+ }
+ ]
+ }
+ self.assertRaises(exception.StackValidationFailed,
+ scheduler.TaskRunner(template.validate))
+
+ def _create_resource(self, name, snippet, stack):
+ template = VitrageTemplate(name, snippet, stack)
+ self.vitrage.template.add.return_value = [{
+ 'status': 'LOADING',
+ 'uuid': '2fddb683-e32c-4a9b-b8c8-df59af1f5a1a',
+ 'status details': 'Template validation is OK',
+ 'date': '2019-02-20 16:36:17.240976',
+ 'type': 'standard',
+ 'name': 'Stack40-execute_healing-4ri7d3vlwp5w'
+ }]
+ scheduler.TaskRunner(template.create)()
+ return template
diff --git a/heat/tests/openstack/zaqar/test_queue.py b/heat/tests/openstack/zaqar/test_queue.py
index 72bd09b88..c12d1abbd 100644
--- a/heat/tests/openstack/zaqar/test_queue.py
+++ b/heat/tests/openstack/zaqar/test_queue.py
@@ -11,10 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
-
-from six.moves.urllib import parse as urlparse
+from unittest import mock
+from urllib import parse as urlparse
from heat.common import template_format
from heat.engine.clients import client_plugin
@@ -214,7 +212,7 @@ class ZaqarMessageQueueTest(common.HeatTestCase):
scheduler.TaskRunner(queue.update,
new_queue))
msg = 'The Resource MyQueue2 requires replacement.'
- self.assertEqual(msg, six.text_type(err))
+ self.assertEqual(msg, str(err))
def test_show_resource(self):
t = template_format.parse(wp_template)
diff --git a/heat/tests/openstack/zaqar/test_subscription.py b/heat/tests/openstack/zaqar/test_subscription.py
index f6840b430..436db0cab 100644
--- a/heat/tests/openstack/zaqar/test_subscription.py
+++ b/heat/tests/openstack/zaqar/test_subscription.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import template_format
@@ -84,7 +83,7 @@ class FakeSubscription(object):
def update(self, prop_diff):
allowed_keys = {'subscriber', 'ttl', 'options'}
- for key in six.iterkeys(prop_diff):
+ for key in prop_diff.keys():
if key not in allowed_keys:
raise KeyError(key)
@@ -118,7 +117,7 @@ class ZaqarSubscriptionTest(common.HeatTestCase):
self.stack.validate)
self.assertEqual('The subscriber type of must be one of: http, https, '
'mailto, trust+http, trust+https.',
- six.text_type(exc))
+ str(exc))
def test_create(self):
t = template_format.parse(subscr_template)
@@ -233,7 +232,7 @@ class ZaqarSubscriptionTest(common.HeatTestCase):
scheduler.TaskRunner(subscr.update,
new_subscr))
msg = 'The Resource MySubscription requires replacement.'
- self.assertEqual(msg, six.text_type(err))
+ self.assertEqual(msg, str(err))
def test_show_resource(self):
t = template_format.parse(subscr_template)
@@ -420,7 +419,7 @@ class ZaqarMistralTriggerTest(common.HeatTestCase):
scheduler.TaskRunner(subscr.update,
new_subscr))
msg = 'The Resource subscription requires replacement.'
- self.assertEqual(msg, six.text_type(err))
+ self.assertEqual(msg, str(err))
self.fc.subscription.assert_called_with(
subscr.properties['queue_name'],
options=self.options,
diff --git a/heat/tests/openstack/zun/test_container.py b/heat/tests/openstack/zun/test_container.py
index 358833455..f69f995f1 100644
--- a/heat/tests/openstack/zun/test_container.py
+++ b/heat/tests/openstack/zun/test_container.py
@@ -12,8 +12,7 @@
# under the License.
import copy
-import mock
-import six
+from unittest import mock
from oslo_config import cfg
from zunclient import exceptions as zc_exc
@@ -48,6 +47,7 @@ resources:
image_pull_policy: always
restart_policy: on-failure:2
interactive: false
+ tty: false
image_driver: docker
hints:
hintkey: hintval
@@ -108,6 +108,7 @@ class ZunContainerTest(common.HeatTestCase):
self.fake_restart_policy = {'MaximumRetryCount': '2',
'Name': 'on-failure'}
self.fake_interactive = False
+ self.fake_tty = False
self.fake_image_driver = 'docker'
self.fake_hints = {'hintkey': 'hintval'}
self.fake_hostname = 'myhost'
@@ -194,6 +195,7 @@ class ZunContainerTest(common.HeatTestCase):
value.image_pull_policy = self.fake_image_policy
value.restart_policy = self.fake_restart_policy
value.interactive = self.fake_interactive
+ value.tty = self.fake_tty
value.image_driver = self.fake_image_driver
value.hints = self.fake_hints
value.hostname = self.fake_hostname
@@ -249,6 +251,9 @@ class ZunContainerTest(common.HeatTestCase):
self.fake_interactive,
c.properties.get(container.Container.INTERACTIVE))
self.assertEqual(
+ self.fake_tty,
+ c.properties.get(container.Container.TTY))
+ self.assertEqual(
self.fake_image_driver,
c.properties.get(container.Container.IMAGE_DRIVER))
self.assertEqual(
@@ -283,6 +288,7 @@ class ZunContainerTest(common.HeatTestCase):
image_pull_policy=self.fake_image_policy,
restart_policy=self.fake_restart_policy,
interactive=self.fake_interactive,
+ tty=self.fake_tty,
image_driver=self.fake_image_driver,
hints=self.fake_hints,
hostname=self.fake_hostname,
@@ -299,7 +305,7 @@ class ZunContainerTest(common.HeatTestCase):
exception.ResourceFailure,
scheduler.TaskRunner(c.create))
self.assertEqual((c.CREATE, c.FAILED), c.state)
- self.assertIn("Error in creating container ", six.text_type(exc))
+ self.assertIn("Error in creating container ", str(exc))
def test_container_create_unknown_status(self):
c = self._create_resource('container', self.rsrc_defn, self.stack,
@@ -308,7 +314,7 @@ class ZunContainerTest(common.HeatTestCase):
exception.ResourceFailure,
scheduler.TaskRunner(c.create))
self.assertEqual((c.CREATE, c.FAILED), c.state)
- self.assertIn("Unknown status Container", six.text_type(exc))
+ self.assertIn("Unknown status Container", str(exc))
def test_container_update(self):
c = self._create_resource('container', self.rsrc_defn, self.stack)
diff --git a/heat/tests/policy/check_admin.json b/heat/tests/policy/check_admin.json
deleted file mode 100644
index 96a15c83c..000000000
--- a/heat/tests/policy/check_admin.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "context_is_admin": "role:admin"
-}
diff --git a/heat/tests/policy/deny_stack_user.json b/heat/tests/policy/deny_stack_user.json
deleted file mode 100644
index c20d2673f..000000000
--- a/heat/tests/policy/deny_stack_user.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "deny_stack_user": "not role:heat_stack_user",
- "cloudformation:ListStacks": "rule:deny_stack_user",
- "cloudformation:CreateStack": "rule:deny_stack_user",
- "cloudformation:DescribeStacks": "rule:deny_stack_user",
- "cloudformation:DeleteStack": "rule:deny_stack_user",
- "cloudformation:UpdateStack": "rule:deny_stack_user",
- "cloudformation:DescribeStackEvents": "rule:deny_stack_user",
- "cloudformation:ValidateTemplate": "rule:deny_stack_user",
- "cloudformation:GetTemplate": "rule:deny_stack_user",
- "cloudformation:EstimateTemplateCost": "rule:deny_stack_user",
- "cloudformation:DescribeStackResource": "",
- "cloudformation:DescribeStackResources": "rule:deny_stack_user",
- "cloudformation:ListStackResources": "rule:deny_stack_user",
-}
diff --git a/heat/tests/policy/notallowed.json b/heat/tests/policy/notallowed.json
deleted file mode 100644
index 5346307e3..000000000
--- a/heat/tests/policy/notallowed.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
- "cloudformation:ListStacks": "!",
- "cloudformation:CreateStack": "!",
- "cloudformation:DescribeStacks": "!",
- "cloudformation:DeleteStack": "!",
- "cloudformation:UpdateStack": "!",
- "cloudformation:DescribeStackEvents": "!",
- "cloudformation:ValidateTemplate": "!",
- "cloudformation:GetTemplate": "!",
- "cloudformation:EstimateTemplateCost": "!",
- "cloudformation:DescribeStackResource": "!",
- "cloudformation:DescribeStackResources": "!",
- "cloudformation:ListStackResources": "!"
-}
diff --git a/heat/tests/policy/resources.json b/heat/tests/policy/resources.json
index 163fdb66e..dc56a4778 100644
--- a/heat/tests/policy/resources.json
+++ b/heat/tests/policy/resources.json
@@ -1,7 +1,3 @@
{
- "context_is_admin": "role:admin",
-
"resource_types:OS::Cinder::Quota": "!",
- "resource_types:OS::Keystone::*": "rule:context_is_admin"
-
}
diff --git a/heat/tests/policy/test_acl_personas.yaml b/heat/tests/policy/test_acl_personas.yaml
new file mode 100644
index 000000000..1fe5ce40a
--- /dev/null
+++ b/heat/tests/policy/test_acl_personas.yaml
@@ -0,0 +1,241 @@
+actions_most_restricted:
+ scope: "actions"
+ actions:
+ - "snapshot"
+ - "suspend"
+ - "resume"
+ - "cancel_update"
+ - "cancel_without_rollback"
+ allowed:
+ - "system_admin"
+ - "project_member"
+ denied:
+ - "stack_user"
+
+actions_restricted:
+ scope: "actions"
+ actions:
+ - "check"
+ allowed:
+ - "system_reader"
+ - "project_reader"
+ denied:
+ - "stack_user"
+
+cloud_formation_most_restricted:
+ scope: "cloudformation"
+ actions:
+ - "ListStacks"
+ - "CreateStack"
+ - "DescribeStacks"
+ - "DeleteStack"
+ - "UpdateStack"
+ - "DescribeStackEvents"
+ - "ValidateTemplate"
+ - "GetTemplate"
+ - "EstimateTemplateCost"
+ - "DescribeStackResources"
+ allowed:
+ - "system_admin"
+ - "project_member"
+ denied:
+ - "stack_user"
+
+cloud_formation_restricted:
+ scope: "cloudformation"
+ actions:
+ - "DescribeStackResource"
+ allowed:
+ - "system_admin"
+ - "project_member"
+
+build_info_acl:
+ scope: "build_info"
+ actions:
+ - "build_info"
+ allowed:
+ - "system_reader"
+ - "project_reader"
+ denied:
+ - "stack_user"
+
+events_acl:
+ scope: "events"
+ actions:
+ - "index"
+ - "show"
+ allowed:
+ - "system_reader"
+ - "project_reader"
+ denied:
+ - "stack_user"
+
+resource_least_restricted:
+ scope: "resource"
+ actions:
+ - "metadata"
+ - "signal"
+ allowed:
+ - "system_reader"
+ - "system_reader"
+ - "stack_user"
+
+resource_restricted:
+ scope: "resource"
+ actions:
+ - "index"
+ - "show"
+ allowed:
+ - "system_reader"
+ - "project_reader"
+ denied:
+ - "stack_user"
+
+resource_most_restricted:
+ scope: "resource"
+ actions:
+ - "mark_unhealthy"
+ allowed:
+ - "system_admin"
+ - "project_member"
+ denied:
+ - "stack_user"
+
+service_acl:
+ scope: "service"
+ actions:
+ - "index"
+ allowed:
+ - "system_reader"
+
+software_configs_least_restricted:
+ scope: "software_configs"
+ actions:
+ - "global_index"
+ allowed:
+ - "system_reader"
+
+software_configs_most_restricted:
+ scope: "software_configs"
+ actions:
+ - "create"
+ - "delete"
+ allowed:
+ - "system_admin"
+ - "project_member"
+ denied:
+ - "stack_user"
+
+software_configs_restricted:
+ scope: "software_configs"
+ actions:
+ - "index"
+ - "create"
+ - "show"
+ allowed:
+ - "system_reader"
+ - "project_reader"
+ denied:
+ - "stack_user"
+
+software_deployments_most_restricted:
+ scope: "software_deployments"
+ actions:
+ - "create"
+ - "update"
+ - "delete"
+ allowed:
+ - "system_admin"
+ - "project_member"
+ denied:
+ - "stack_user"
+
+software_deployments_restricted:
+ scope: "software_deployments"
+ actions:
+ - "index"
+ - "show"
+ allowed:
+ - "system_reader"
+ - "project_reader"
+ denied:
+ - "stack_user"
+
+
+software_deployments_least_restricted:
+ scope: "software_deployments"
+ actions:
+ - "metadata"
+ allowed:
+ - "stack_user"
+
+stacks_most_restricted:
+ scope: "stacks"
+ actions:
+ - "abandon"
+ - "create"
+ - "delete"
+ - "export"
+ - "generate_template"
+ - "update"
+ - "update_patch"
+ - "preview_update"
+ - "preview_update_patch"
+ - "validate_template"
+ - "snapshot"
+ - "delete_snapshot"
+ - "restore_snapshot"
+ allowed:
+ - "system_admin"
+ - "project_member"
+ denied:
+ - "stack_user"
+
+stacks_restricted:
+ scope: "stacks"
+ actions:
+ - "detail"
+ - "index"
+ - "list_resource_types"
+ - "list_template_versions"
+ - "list_template_functions"
+ - "preview"
+ - "resource_schema"
+ - "show"
+ - "template"
+ - "environment"
+ - "files"
+ - "show_snapshot"
+ - "list_snapshots"
+ - "list_outputs"
+ - "show_output"
+ allowed:
+ - "system_reader"
+ - "project_reader"
+ denied:
+ - "stack_user"
+
+stacks_restricted_index:
+ scope: "stacks"
+ actions:
+ - "global_index"
+ allowed:
+ - "system_admin"
+
+stacks_open:
+ scope: "stacks"
+ actions:
+ - "lookup"
+ allowed:
+ - "system_reader"
+ - "project_reader"
+ - "stack_user"
+
+create_stacks:
+ scope: "stacks"
+ actions:
+ - "create"
+ allowed:
+ - "system_admin"
+ - "project_admin"
+ - "project_member"
diff --git a/heat/tests/policy/test_deprecated_access.yaml b/heat/tests/policy/test_deprecated_access.yaml
new file mode 100644
index 000000000..26af91c8b
--- /dev/null
+++ b/heat/tests/policy/test_deprecated_access.yaml
@@ -0,0 +1,22 @@
+cloud_formation_restricted:
+ scope: "cloudformation"
+ actions:
+ - "DescribeStackResource"
+ allowed:
+ - "stack_user"
+ - "anyone"
+
+stacks_open:
+ scope: "stacks"
+ actions:
+ - "lookup"
+ allowed:
+ - "anyone"
+
+create_stacks:
+ scope: "stacks"
+ actions:
+ - "create"
+ allowed:
+ - "system_reader"
+ - "project_reader"
diff --git a/heat/tests/test_attributes.py b/heat/tests/test_attributes.py
index 5393ec198..6ce5cafa1 100644
--- a/heat/tests/test_attributes.py
+++ b/heat/tests/test_attributes.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.engine import attributes
from heat.engine import resources
@@ -34,9 +33,9 @@ class AttributeSchemaTest(common.HeatTestCase):
def test_all_resource_schemata(self):
for resource_type in resources.global_env().get_types():
- for schema in six.itervalues(getattr(resource_type,
- 'attributes_schema',
- {})):
+ for schema in getattr(resource_type,
+ 'attributes_schema',
+ {}).values():
attributes.Schema.from_attribute(schema)
def test_from_attribute_new_schema_format(self):
diff --git a/heat/tests/test_auth_password.py b/heat/tests/test_auth_password.py
index c4c8f213e..ee06d8b1e 100644
--- a/heat/tests/test_auth_password.py
+++ b/heat/tests/test_auth_password.py
@@ -14,10 +14,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from unittest import mock
+
from keystoneauth1 import exceptions as keystone_exc
-import mock
from oslo_config import cfg
-import six
import webob
from heat.common import auth_password
@@ -79,9 +79,10 @@ class FakeApp(object):
def __call__(self, env, start_response):
"""Assert that expected environment is present when finally called."""
for k, v in self.expected_env.items():
- assert env[k] == v, '%s != %s' % (env[k], v)
+ if env[k] != v:
+ raise AssertionError('%s != %s' % (env[k], v))
resp = webob.Response()
- resp.body = six.b('SUCCESS')
+ resp.body = 'SUCCESS'.encode('latin-1')
return resp(env, start_response)
diff --git a/heat/tests/test_auth_url.py b/heat/tests/test_auth_url.py
index 0f9e4aabf..585d5b7e8 100644
--- a/heat/tests/test_auth_url.py
+++ b/heat/tests/test_auth_url.py
@@ -14,8 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
-import six
+from unittest import mock
+
import webob
from webob import exc
@@ -29,7 +29,7 @@ class FakeApp(object):
def __call__(self, environ, start_response):
"""Assert that headers are correctly set up when finally called."""
resp = webob.Response()
- resp.body = six.b('SUCCESS')
+ resp.body = 'SUCCESS'.encode('latin-1')
return resp(environ, start_response)
diff --git a/heat/tests/test_common_auth_plugin.py b/heat/tests/test_common_auth_plugin.py
index ee8cac7e9..ac289b3f1 100644
--- a/heat/tests/test_common_auth_plugin.py
+++ b/heat/tests/test_common_auth_plugin.py
@@ -12,10 +12,10 @@
# under the License.
import json
+from unittest import mock
+
from keystoneauth1 import loading as ks_loading
from keystoneauth1 import session
-import mock
-import six
from heat.common import auth_plugin
from heat.common import config
@@ -80,7 +80,7 @@ class TestAuthPlugin(common.HeatTestCase):
ValueError, auth_plugin.parse_auth_credential_to_dict, credential)
self.assertEqual("Missing key in auth information, the correct "
"format contains [\'auth_type\', \'auth\'].",
- six.text_type(error))
+ str(error))
def test_parse_auth_credential_to_dict_with_json_error(self):
credential = (
@@ -93,4 +93,4 @@ class TestAuthPlugin(common.HeatTestCase):
ValueError, auth_plugin.parse_auth_credential_to_dict, credential)
error_msg = ('Failed to parse credential, please check your Stack '
'Credential format.')
- self.assertEqual(error_msg, six.text_type(error))
+ self.assertEqual(error_msg, str(error))
diff --git a/heat/tests/test_common_context.py b/heat/tests/test_common_context.py
index 405eaf869..01604c665 100644
--- a/heat/tests/test_common_context.py
+++ b/heat/tests/test_common_context.py
@@ -13,9 +13,9 @@
# under the License.
import os
+from unittest import mock
from keystoneauth1 import loading as ks_loading
-import mock
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_middleware import request_id
diff --git a/heat/tests/test_common_policy.py b/heat/tests/test_common_policy.py
index ee311a809..eb2753c4a 100644
--- a/heat/tests/test_common_policy.py
+++ b/heat/tests/test_common_policy.py
@@ -16,6 +16,8 @@
import os.path
+import ddt
+
from oslo_config import fixture as config_fixture
from oslo_policy import policy as base_policy
@@ -27,17 +29,8 @@ from heat.tests import utils
policy_path = os.path.dirname(os.path.realpath(__file__)) + "/policy/"
+@ddt.ddt
class TestPolicyEnforcer(common.HeatTestCase):
- cfn_actions = ("ListStacks", "CreateStack", "DescribeStacks",
- "DeleteStack", "UpdateStack", "DescribeStackEvents",
- "ValidateTemplate", "GetTemplate",
- "EstimateTemplateCost", "DescribeStackResource",
- "DescribeStackResources")
-
- cw_actions = ("DeleteAlarms", "DescribeAlarmHistory", "DescribeAlarms",
- "DescribeAlarmsForMetric", "DisableAlarmActions",
- "EnableAlarmActions", "GetMetricStatistics", "ListMetrics",
- "PutMetricAlarm", "PutMetricData", "SetAlarmState")
def setUp(self):
super(TestPolicyEnforcer, self).setUp(mock_resource_policy=False)
@@ -47,44 +40,80 @@ class TestPolicyEnforcer(common.HeatTestCase):
def get_policy_file(self, filename):
return policy_path + filename
- def test_policy_cfn_default(self):
- enforcer = policy.Enforcer(scope='cloudformation')
-
- ctx = utils.dummy_context(roles=[])
- for action in self.cfn_actions:
- # Everything should be allowed
- enforcer.enforce(ctx, action, is_registered_policy=True)
-
- def test_policy_cfn_notallowed(self):
- enforcer = policy.Enforcer(
- scope='cloudformation',
- policy_file=self.get_policy_file('notallowed.json'))
-
- ctx = utils.dummy_context(roles=[])
- for action in self.cfn_actions:
- # Everything should raise the default exception.Forbidden
- self.assertRaises(exception.Forbidden, enforcer.enforce, ctx,
- action, {}, is_registered_policy=True)
-
- def test_policy_cfn_deny_stack_user(self):
- enforcer = policy.Enforcer(scope='cloudformation')
-
- ctx = utils.dummy_context(roles=['heat_stack_user'])
- for action in self.cfn_actions:
- # Everything apart from DescribeStackResource should be Forbidden
- if action == "DescribeStackResource":
- enforcer.enforce(ctx, action, is_registered_policy=True)
- else:
- self.assertRaises(exception.Forbidden, enforcer.enforce, ctx,
- action, {}, is_registered_policy=True)
-
- def test_policy_cfn_allow_non_stack_user(self):
- enforcer = policy.Enforcer(scope='cloudformation')
-
- ctx = utils.dummy_context(roles=['not_a_stack_user'])
- for action in self.cfn_actions:
- # Everything should be allowed
- enforcer.enforce(ctx, action, is_registered_policy=True)
+ def _get_context(self, persona):
+ if persona == "system_admin":
+ ctx = utils.dummy_system_admin_context()
+ elif persona == "system_reader":
+ ctx = utils.dummy_system_reader_context()
+ elif persona == "project_admin":
+ ctx = utils.dummy_context(roles=['admin', 'member', 'reader'])
+ elif persona == "project_member":
+ ctx = utils.dummy_context(roles=['member', 'reader'])
+ elif persona == "project_reader":
+ ctx = utils.dummy_context(roles=['reader'])
+ elif persona == "stack_user":
+ ctx = utils.dummy_context(roles=['heat_stack_user'])
+ elif persona == "anyone":
+ ctx = utils.dummy_context(roles=['foobar'])
+ else:
+ self.fail("Persona [{}] not found".format(persona))
+ return ctx
+
+ def _test_legacy_rbac_policies(self, **kwargs):
+ scope = kwargs.get("scope")
+ actions = kwargs.get("actions")
+ allowed_personas = kwargs.get("allowed", [])
+ denied_personas = kwargs.get("denied", [])
+ self._test_policy_allowed(scope, actions, allowed_personas)
+ self._test_policy_notallowed(scope, actions, denied_personas)
+
+ @ddt.file_data('policy/test_acl_personas.yaml')
+ @ddt.unpack
+ def test_legacy_rbac_policies(self, **kwargs):
+ self._test_legacy_rbac_policies(**kwargs)
+
+ @ddt.file_data('policy/test_deprecated_access.yaml')
+ @ddt.unpack
+ def test_deprecated_policies(self, **kwargs):
+ self._test_legacy_rbac_policies(**kwargs)
+
+ @ddt.file_data('policy/test_acl_personas.yaml')
+ @ddt.unpack
+ def test_secure_rbac_policies(self, **kwargs):
+ self.fixture.config(group='oslo_policy', enforce_scope=True)
+ self.fixture.config(group='oslo_policy', enforce_new_defaults=True)
+ scope = kwargs.get("scope")
+ actions = kwargs.get("actions")
+ allowed_personas = kwargs.get("allowed", [])
+ denied_personas = kwargs.get("denied", [])
+ self._test_policy_allowed(scope, actions, allowed_personas)
+ self._test_policy_notallowed(scope, actions, denied_personas)
+
+ def _test_policy_allowed(self, scope, actions, personas):
+ enforcer = policy.Enforcer(scope=scope)
+ for persona in personas:
+ ctx = self._get_context(persona)
+ for action in actions:
+ # Everything should be allowed
+ enforcer.enforce(
+ ctx,
+ action,
+ target={"project_id": "test_tenant_id"},
+ is_registered_policy=True
+ )
+
+ def _test_policy_notallowed(self, scope, actions, personas):
+ enforcer = policy.Enforcer(scope=scope)
+ for persona in personas:
+ ctx = self._get_context(persona)
+ for action in actions:
+ # Everything should raise the default exception.Forbidden
+ self.assertRaises(
+ exception.Forbidden,
+ enforcer.enforce, ctx,
+ action,
+ target={"project_id": "test_tenant_id"},
+ is_registered_policy=True)
def test_set_rules_overwrite_true(self):
enforcer = policy.Enforcer()
diff --git a/heat/tests/test_common_serializers.py b/heat/tests/test_common_serializers.py
index 75605a41f..6fb436307 100644
--- a/heat/tests/test_common_serializers.py
+++ b/heat/tests/test_common_serializers.py
@@ -19,7 +19,6 @@ import datetime
from lxml import etree
from oslo_serialization import jsonutils as json
-import six
import webob
from heat.common import serializers
@@ -111,9 +110,9 @@ class XMLResponseSerializerTest(common.HeatTestCase):
])])
]))
])
- expected = six.b('<aresponse><is_public>True</is_public>'
- '<name><member><name1>test</name1></member></name>'
- '</aresponse>')
+ expected = '<aresponse><is_public>True</is_public>' \
+ '<name><member><name1>test</name1></member></name>' \
+ '</aresponse>'.encode('latin-1')
actual = serializers.XMLResponseSerializer().to_xml(fixture)
actual_xml_tree = etree.XML(actual)
actual_xml_dict = self._recursive_dict(actual_xml_tree)
@@ -132,9 +131,11 @@ class XMLResponseSerializerTest(common.HeatTestCase):
('Metadata', {"name2": "test2"}),
]))
])
- expected = six.b('<aresponse><is_public>True</is_public>'
- '<TemplateBody>{"name1": "test"}</TemplateBody>'
- '<Metadata>{"name2": "test2"}</Metadata></aresponse>')
+ expected = '<aresponse>' \
+ '<is_public>True</is_public>' \
+ '<TemplateBody>{"name1": "test"}</TemplateBody>' \
+ '<Metadata>{"name2": "test2"}</Metadata>' \
+ '</aresponse>'.encode('latin-1')
actual = serializers.XMLResponseSerializer().to_xml(fixture)
actual_xml_tree = etree.XML(actual)
actual_xml_dict = self._recursive_dict(actual_xml_tree)
diff --git a/heat/tests/test_common_service_utils.py b/heat/tests/test_common_service_utils.py
index 6bad2a83d..0745e8f85 100644
--- a/heat/tests/test_common_service_utils.py
+++ b/heat/tests/test_common_service_utils.py
@@ -51,23 +51,23 @@ class TestServiceUtils(common.HeatTestCase):
self.assertEqual(service_dict['status'], 'up')
- # check again within first report_interval time (60)
+ # check again within first report_interval time
service_dict = service_utils.format_service(service)
self.assertEqual(service_dict['status'], 'up')
- # check update not happen within report_interval time (60+)
+ # check update not happen within 2*report_interval time
service.created_at = (timeutils.utcnow() -
- datetime.timedelta(0, 70))
+ datetime.timedelta(0, 130))
service_dict = service_utils.format_service(service)
self.assertEqual(service_dict['status'], 'down')
- # check update happened after report_interval time (60+)
+ # check update happened after 2* report_interval time
service.updated_at = (timeutils.utcnow() -
- datetime.timedelta(0, 70))
+ datetime.timedelta(0, 130))
service_dict = service_utils.format_service(service)
self.assertEqual(service_dict['status'], 'down')
- # check update happened within report_interval time (60)
+ # check update happened within report_interval time
service.updated_at = (timeutils.utcnow() -
datetime.timedelta(0, 50))
service_dict = service_utils.format_service(service)
diff --git a/heat/tests/test_constraints.py b/heat/tests/test_constraints.py
index c2f87a68d..1c62ca5ef 100644
--- a/heat/tests/test_constraints.py
+++ b/heat/tests/test_constraints.py
@@ -11,7 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
from heat.common import exception
from heat.engine import constraints
@@ -81,16 +80,16 @@ class SchemaTest(common.HeatTestCase):
self.assertRaises(ValueError, r.validate, 6)
def test_length_validate(self):
- l = constraints.Length(min=5, max=5, description='a range')
- l.validate('abcde')
+ cl = constraints.Length(min=5, max=5, description='a range')
+ cl.validate('abcde')
def test_length_min_fail(self):
- l = constraints.Length(min=5, description='a range')
- self.assertRaises(ValueError, l.validate, 'abcd')
+ cl = constraints.Length(min=5, description='a range')
+ self.assertRaises(ValueError, cl.validate, 'abcd')
def test_length_max_fail(self):
- l = constraints.Length(max=5, description='a range')
- self.assertRaises(ValueError, l.validate, 'abcdef')
+ cl = constraints.Length(max=5, description='a range')
+ self.assertRaises(ValueError, cl.validate, 'abcdef')
def test_modulo_validate(self):
r = constraints.Modulo(step=2, offset=1, description='a modulo')
@@ -125,7 +124,7 @@ class SchemaTest(common.HeatTestCase):
r = constraints.Modulo(step=2, offset=1)
err = self.assertRaises(ValueError, r.validate, 4)
self.assertIn('4 is not a multiple of 2 with an offset of 1',
- six.text_type(err))
+ str(err))
self.assertRaises(ValueError, r.validate, 0)
self.assertRaises(ValueError, r.validate, 2)
@@ -140,26 +139,26 @@ class SchemaTest(common.HeatTestCase):
err = self.assertRaises(exception.InvalidSchemaError,
constraints.Modulo, step=111, offset=111)
self.assertIn('offset must be smaller (by absolute value) than step',
- six.text_type(err))
+ str(err))
err = self.assertRaises(exception.InvalidSchemaError,
constraints.Modulo, step=111, offset=112)
self.assertIn('offset must be smaller (by absolute value) than step',
- six.text_type(err))
+ str(err))
err = self.assertRaises(exception.InvalidSchemaError,
constraints.Modulo, step=0, offset=1)
- self.assertIn('step cannot be 0', six.text_type(err))
+ self.assertIn('step cannot be 0', str(err))
err = self.assertRaises(exception.InvalidSchemaError,
constraints.Modulo, step=-2, offset=1)
self.assertIn('step and offset must be both positive or both negative',
- six.text_type(err))
+ str(err))
err = self.assertRaises(exception.InvalidSchemaError,
constraints.Modulo, step=2, offset=-1)
self.assertIn('step and offset must be both positive or both negative',
- six.text_type(err))
+ str(err))
def test_schema_all(self):
d = {
@@ -196,8 +195,8 @@ class SchemaTest(common.HeatTestCase):
s = constraints.Schema(constraints.Schema.STRING, 'A string',
default='wibble',
constraints=[constraints.Length(4, 8)])
- l = constraints.Schema(constraints.Schema.LIST, 'A list', schema=s)
- self.assertEqual(d, dict(l))
+ ls = constraints.Schema(constraints.Schema.LIST, 'A list', schema=s)
+ self.assertEqual(d, dict(ls))
def test_schema_map_schema(self):
d = {
@@ -252,8 +251,8 @@ class SchemaTest(common.HeatTestCase):
constraints=[constraints.Length(4, 8)])
m = constraints.Schema(constraints.Schema.MAP, 'A map',
schema={'Foo': s})
- l = constraints.Schema(constraints.Schema.LIST, 'A list', schema=m)
- self.assertEqual(d, dict(l))
+ ls = constraints.Schema(constraints.Schema.LIST, 'A list', schema=m)
+ self.assertEqual(d, dict(ls))
def test_invalid_type(self):
self.assertRaises(exception.InvalidSchemaError, constraints.Schema,
@@ -271,7 +270,7 @@ class SchemaTest(common.HeatTestCase):
err = self.assertRaises(exception.InvalidSchemaError,
schema.validate)
self.assertIn('Range constraint invalid for String',
- six.text_type(err))
+ str(err))
def test_length_invalid_type(self):
schema = constraints.Schema('Integer',
@@ -279,7 +278,7 @@ class SchemaTest(common.HeatTestCase):
err = self.assertRaises(exception.InvalidSchemaError,
schema.validate)
self.assertIn('Length constraint invalid for Integer',
- six.text_type(err))
+ str(err))
def test_modulo_invalid_type(self):
schema = constraints.Schema('String',
@@ -287,7 +286,7 @@ class SchemaTest(common.HeatTestCase):
err = self.assertRaises(exception.InvalidSchemaError,
schema.validate)
self.assertIn('Modulo constraint invalid for String',
- six.text_type(err))
+ str(err))
def test_allowed_pattern_invalid_type(self):
schema = constraints.Schema(
@@ -297,7 +296,7 @@ class SchemaTest(common.HeatTestCase):
err = self.assertRaises(exception.InvalidSchemaError,
schema.validate)
self.assertIn('AllowedPattern constraint invalid for Integer',
- six.text_type(err))
+ str(err))
def test_range_vals_invalid_type(self):
self.assertRaises(exception.InvalidSchemaError,
@@ -329,7 +328,7 @@ class SchemaTest(common.HeatTestCase):
constraints=[constraints.Range(max=4)])
err = self.assertRaises(exception.InvalidSchemaError, s.validate)
self.assertIn('Range constraint invalid for String',
- six.text_type(err))
+ str(err))
def test_schema_nested_validate_good(self):
nested = constraints.Schema(constraints.Schema.STRING, 'A string',
@@ -347,7 +346,7 @@ class SchemaTest(common.HeatTestCase):
schema={'Foo': nested})
err = self.assertRaises(exception.InvalidSchemaError, s.validate)
self.assertIn('Range constraint invalid for String',
- six.text_type(err))
+ str(err))
def test_allowed_values_numeric_int(self):
"""Test AllowedValues constraint for numeric integer values.
@@ -367,12 +366,12 @@ class SchemaTest(common.HeatTestCase):
err = self.assertRaises(exception.StackValidationFailed,
schema.validate_constraints, 3)
self.assertEqual('3 is not an allowed value [1, 2, 4]',
- six.text_type(err))
+ str(err))
self.assertIsNone(schema.validate_constraints('1'))
err = self.assertRaises(exception.StackValidationFailed,
schema.validate_constraints, '3')
self.assertEqual('"3" is not an allowed value [1, 2, 4]',
- six.text_type(err))
+ str(err))
# Allowed values defined as integer strings
schema = constraints.Schema(
@@ -384,12 +383,12 @@ class SchemaTest(common.HeatTestCase):
err = self.assertRaises(exception.StackValidationFailed,
schema.validate_constraints, 3)
self.assertEqual('3 is not an allowed value ["1", "2", "4"]',
- six.text_type(err))
+ str(err))
self.assertIsNone(schema.validate_constraints('1'))
err = self.assertRaises(exception.StackValidationFailed,
schema.validate_constraints, '3')
self.assertEqual('"3" is not an allowed value ["1", "2", "4"]',
- six.text_type(err))
+ str(err))
def test_allowed_values_numeric_float(self):
"""Test AllowedValues constraint for numeric floating point values.
@@ -409,12 +408,12 @@ class SchemaTest(common.HeatTestCase):
err = self.assertRaises(exception.StackValidationFailed,
schema.validate_constraints, 3.3)
self.assertEqual('3.3 is not an allowed value [1.1, 2.2, 4.4]',
- six.text_type(err))
+ str(err))
self.assertIsNone(schema.validate_constraints('1.1'))
err = self.assertRaises(exception.StackValidationFailed,
schema.validate_constraints, '3.3')
self.assertEqual('"3.3" is not an allowed value [1.1, 2.2, 4.4]',
- six.text_type(err))
+ str(err))
# Allowed values defined as strings
schema = constraints.Schema(
@@ -426,12 +425,12 @@ class SchemaTest(common.HeatTestCase):
err = self.assertRaises(exception.StackValidationFailed,
schema.validate_constraints, 3.3)
self.assertEqual('3.3 is not an allowed value ["1.1", "2.2", "4.4"]',
- six.text_type(err))
+ str(err))
self.assertIsNone(schema.validate_constraints('1.1'))
err = self.assertRaises(exception.StackValidationFailed,
schema.validate_constraints, '3.3')
self.assertEqual('"3.3" is not an allowed value ["1.1", "2.2", "4.4"]',
- six.text_type(err))
+ str(err))
def test_to_schema_type_int(self):
"""Test Schema.to_schema_type method for type Integer."""
@@ -444,14 +443,14 @@ class SchemaTest(common.HeatTestCase):
# test invalid numeric values, i.e. floating point numbers
err = self.assertRaises(ValueError, schema.to_schema_type, 1.5)
self.assertEqual('Value "1.5" is invalid for data type "Integer".',
- six.text_type(err))
+ str(err))
err = self.assertRaises(ValueError, schema.to_schema_type, '1.5')
self.assertEqual('Value "1.5" is invalid for data type "Integer".',
- six.text_type(err))
+ str(err))
# test invalid string values
err = self.assertRaises(ValueError, schema.to_schema_type, 'foo')
self.assertEqual('Value "foo" is invalid for data type "Integer".',
- six.text_type(err))
+ str(err))
def test_to_schema_type_num(self):
"""Test Schema.to_schema_type method for type Number."""
@@ -467,21 +466,21 @@ class SchemaTest(common.HeatTestCase):
self.assertEqual(1.5, res)
err = self.assertRaises(ValueError, schema.to_schema_type, 'foo')
self.assertEqual('Value "foo" is invalid for data type "Number".',
- six.text_type(err))
+ str(err))
def test_to_schema_type_string(self):
"""Test Schema.to_schema_type method for type String."""
schema = constraints.Schema('String')
res = schema.to_schema_type('one')
- self.assertIsInstance(res, six.string_types)
+ self.assertIsInstance(res, str)
res = schema.to_schema_type('1')
- self.assertIsInstance(res, six.string_types)
+ self.assertIsInstance(res, str)
res = schema.to_schema_type(1)
- self.assertIsInstance(res, six.string_types)
+ self.assertIsInstance(res, str)
res = schema.to_schema_type(True)
- self.assertIsInstance(res, six.string_types)
+ self.assertIsInstance(res, str)
res = schema.to_schema_type(None)
- self.assertIsInstance(res, six.string_types)
+ self.assertIsInstance(res, str)
def test_to_schema_type_boolean(self):
"""Test Schema.to_schema_type method for type Boolean."""
@@ -501,7 +500,7 @@ class SchemaTest(common.HeatTestCase):
err = self.assertRaises(ValueError, schema.to_schema_type, 'foo')
self.assertEqual('Value "foo" is invalid for data type "Boolean".',
- six.text_type(err))
+ str(err))
def test_to_schema_type_map(self):
"""Test Schema.to_schema_type method for type Map."""
@@ -533,11 +532,11 @@ class CustomConstraintTest(common.HeatTestCase):
constraint = constraints.CustomConstraint("zero", environment=self.env)
self.assertEqual("Value must be of type zero",
- six.text_type(constraint))
+ str(constraint))
self.assertIsNone(constraint.validate(0))
error = self.assertRaises(ValueError, constraint.validate, 1)
self.assertEqual('"1" does not validate zero',
- six.text_type(error))
+ str(error))
def test_custom_error(self):
class ZeroConstraint(object):
@@ -552,7 +551,7 @@ class CustomConstraintTest(common.HeatTestCase):
constraint = constraints.CustomConstraint("zero", environment=self.env)
error = self.assertRaises(ValueError, constraint.validate, 1)
- self.assertEqual("1 is not 0", six.text_type(error))
+ self.assertEqual("1 is not 0", str(error))
def test_custom_message(self):
class ZeroConstraint(object):
@@ -564,13 +563,13 @@ class CustomConstraintTest(common.HeatTestCase):
self.env.register_constraint("zero", ZeroConstraint)
constraint = constraints.CustomConstraint("zero", environment=self.env)
- self.assertEqual("Only zero!", six.text_type(constraint))
+ self.assertEqual("Only zero!", str(constraint))
def test_unknown_constraint(self):
constraint = constraints.CustomConstraint("zero", environment=self.env)
error = self.assertRaises(ValueError, constraint.validate, 1)
self.assertEqual('"1" does not validate zero (constraint not found)',
- six.text_type(error))
+ str(error))
def test_constraints(self):
class ZeroConstraint(object):
diff --git a/heat/tests/test_convg_stack.py b/heat/tests/test_convg_stack.py
index a642b51cc..0db119781 100644
--- a/heat/tests/test_convg_stack.py
+++ b/heat/tests/test_convg_stack.py
@@ -11,7 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from heat.common import template_format
@@ -194,7 +195,7 @@ class StackConvergenceCreateUpdateDeleteTest(common.HeatTestCase):
[[4, False], [3, False]],
[[4, False], [4, True]]]),
sorted(stack_db.current_deps['edges']))
- '''
+ r'''
To visualize:
G(7, True) H(6, True)
diff --git a/heat/tests/test_crypt.py b/heat/tests/test_crypt.py
index b1192b3d3..242e08b59 100644
--- a/heat/tests/test_crypt.py
+++ b/heat/tests/test_crypt.py
@@ -12,7 +12,6 @@
# under the License.
from oslo_config import cfg
-import six
from heat.common import config
from heat.common import crypt
@@ -35,7 +34,7 @@ class CryptTest(common.HeatTestCase):
config.startup_sanity_check)
exp_msg = ('heat.conf misconfigured, auth_encryption_key '
'must be 32 characters')
- self.assertIn(exp_msg, six.text_type(err))
+ self.assertIn(exp_msg, str(err))
def _test_encrypt_decrypt_dict(self, encryption_key=None):
data = {'p1': u'happy',
@@ -73,4 +72,4 @@ class CryptTest(common.HeatTestCase):
'767c3ed056cbaa3b9dfedb8c6f825bf1')
self.assertEqual('Can not decrypt data with the auth_encryption_key '
'in heat config.',
- six.text_type(ex))
+ str(ex))
diff --git a/heat/tests/test_engine_api_utils.py b/heat/tests/test_engine_api_utils.py
index f21687da1..8703fcdda 100644
--- a/heat/tests/test_engine_api_utils.py
+++ b/heat/tests/test_engine_api_utils.py
@@ -13,11 +13,10 @@
import datetime as dt
import json
+from unittest import mock
import uuid
-import mock
from oslo_utils import timeutils
-import six
from heat.common import exception
from heat.common import template_format
@@ -398,7 +397,7 @@ class FormatTest(common.HeatTestCase):
'outputs': [],
'template_description': 'No description',
'timeout_mins': None,
- 'tags': None,
+ 'tags': [],
'parameters': {
'AWS::Region': 'ap-southeast-1',
'AWS::StackId': aws_id,
@@ -1187,7 +1186,7 @@ class TestExtractArgs(common.HeatTestCase):
def test_timeout_extract_negative(self):
p = {'timeout_mins': '-100'}
error = self.assertRaises(ValueError, api.extract_args, p)
- self.assertIn('Invalid timeout value', six.text_type(error))
+ self.assertIn('Invalid timeout value', str(error))
def test_timeout_extract_not_present(self):
args = api.extract_args({})
@@ -1201,7 +1200,7 @@ class TestExtractArgs(common.HeatTestCase):
def test_invalid_adopt_stack_data(self):
params = {'adopt_stack_data': json.dumps("foo")}
exc = self.assertRaises(ValueError, api.extract_args, params)
- self.assertIn('Invalid adopt data', six.text_type(exc))
+ self.assertIn('Invalid adopt data', str(exc))
def test_adopt_stack_data_extract_not_present(self):
args = api.extract_args({})
@@ -1249,12 +1248,12 @@ class TestExtractArgs(common.HeatTestCase):
def test_tags_extract_not_map(self):
p = {'tags': {"foo": "bar"}}
exc = self.assertRaises(ValueError, api.extract_args, p)
- self.assertIn('Invalid tags, not a list: ', six.text_type(exc))
+ self.assertIn('Invalid tags, not a list: ', str(exc))
def test_tags_extract_not_string(self):
p = {'tags': ["tag1", 2]}
exc = self.assertRaises(ValueError, api.extract_args, p)
- self.assertIn('Invalid tag, "2" is not a string', six.text_type(exc))
+ self.assertIn('Invalid tag, "2" is not a string', str(exc))
def test_tags_extract_over_limit(self):
p = {'tags': ["tag1", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
@@ -1262,13 +1261,13 @@ class TestExtractArgs(common.HeatTestCase):
exc = self.assertRaises(ValueError, api.extract_args, p)
self.assertIn('Invalid tag, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" is longer '
- 'than 80 characters', six.text_type(exc))
+ 'than 80 characters', str(exc))
def test_tags_extract_comma(self):
p = {'tags': ["tag1", 'tag2,']}
exc = self.assertRaises(ValueError, api.extract_args, p)
self.assertIn('Invalid tag, "tag2," contains a comma',
- six.text_type(exc))
+ str(exc))
class TranslateFilterTest(common.HeatTestCase):
diff --git a/heat/tests/test_engine_service.py b/heat/tests/test_engine_service.py
index 4c25f2b9f..875d44dcd 100644
--- a/heat/tests/test_engine_service.py
+++ b/heat/tests/test_engine_service.py
@@ -11,13 +11,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
import uuid
-import mock
from oslo_config import cfg
from oslo_messaging.rpc import dispatcher
from oslo_serialization import jsonutils as json
-import six
from heat.common import context
from heat.common import environment_util as env_util
@@ -290,7 +289,7 @@ class StackConvergenceServiceCreateUpdateTest(common.HeatTestCase):
self.assertIsInstance(result, dict)
self.assertTrue(result['stack_id'])
parser.Stack.load.assert_called_once_with(
- self.ctx, stack=mock.ANY)
+ self.ctx, stack=mock.ANY, check_refresh_cred=True)
templatem.Template.assert_called_once_with(template, files=None)
environment.Environment.assert_called_once_with(params)
@@ -852,7 +851,7 @@ class StackServiceTest(common.HeatTestCase):
ret = self.assertRaises(exception.InvalidTemplateVersions,
self.eng.list_template_versions, self.ctx)
self.assertIn('A template version alias c.something was added',
- six.text_type(ret))
+ str(ret))
@mock.patch('heat.engine.template._get_template_extension_manager')
def test_list_template_functions(self, templ_mock):
@@ -920,7 +919,7 @@ class StackServiceTest(common.HeatTestCase):
self.ctx,
version)
msg = "Template with version %s not found" % version
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
def test_stack_list_outputs(self):
t = template_format.parse(tools.wp_template)
@@ -1041,7 +1040,7 @@ class StackServiceTest(common.HeatTestCase):
self.ctx, mock.ANY, 'bunny')
self.assertEqual(exception.NotFound, ex.exc_info[0])
self.assertEqual('Specified output key bunny not found.',
- six.text_type(ex.exc_info[1]))
+ str(ex.exc_info[1]))
def test_stack_show_output_error(self):
t = template_format.parse(tools.wp_template)
@@ -1202,7 +1201,7 @@ class StackServiceTest(common.HeatTestCase):
msg = (u'"Type" is not a valid keyword '
'inside a resource definition')
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
def test_validate_new_stack_checks_incorrect_sections(self):
template = {'heat_template_version': '2013-05-23',
@@ -1214,7 +1213,7 @@ class StackServiceTest(common.HeatTestCase):
self.ctx, 'test_existing_stack',
parsed_template)
msg = u'The template section is invalid: unknown_section'
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
def test_validate_new_stack_checks_resource_limit(self):
cfg.CONF.set_override('max_resources_per_stack', 5)
@@ -1237,7 +1236,7 @@ class StackServiceTest(common.HeatTestCase):
tmpl.validate.side_effect = AssertionError(expected_message)
exc = self.assertRaises(AssertionError, self.eng._validate_new_stack,
self.ctx, 'stack_name', tmpl)
- self.assertEqual(expected_message, six.text_type(exc))
+ self.assertEqual(expected_message, str(exc))
@mock.patch('heat.engine.service.ThreadGroupManager',
return_value=mock.Mock())
diff --git a/heat/tests/test_environment.py b/heat/tests/test_environment.py
index fc96afb86..6de00dc9f 100644
--- a/heat/tests/test_environment.py
+++ b/heat/tests/test_environment.py
@@ -13,11 +13,10 @@
import os.path
import sys
+from unittest import mock
import fixtures
-import mock
from oslo_config import cfg
-import six
from heat.common import environment_format
from heat.common import exception
@@ -209,7 +208,7 @@ def constraint_mapping():
env = environment.Environment({})
error = self.assertRaises(ValueError,
resources._load_global_environment, env)
- self.assertEqual("oops", six.text_type(error))
+ self.assertEqual("oops", str(error))
def test_constraints_registry_stevedore(self):
env = environment.Environment({})
@@ -788,7 +787,7 @@ class ResourceRegistryTest(common.HeatTestCase):
'\'post-delete\')')
ex = self.assertRaises(exception.InvalidBreakPointHook,
registry.load, {'resources': resources})
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
def test_list_type_validation_invalid_support_status(self):
registry = environment.ResourceRegistry(None, {})
@@ -797,7 +796,7 @@ class ResourceRegistryTest(common.HeatTestCase):
registry.get_types,
support_status='junk')
msg = ('Invalid support status and should be one of %s' %
- six.text_type(support.SUPPORT_STATUSES))
+ str(support.SUPPORT_STATUSES))
self.assertIn(msg, ex.message)
diff --git a/heat/tests/test_environment_format.py b/heat/tests/test_environment_format.py
index facc5d3a3..55e1213e8 100644
--- a/heat/tests/test_environment_format.py
+++ b/heat/tests/test_environment_format.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import yaml
from heat.common import environment_format
diff --git a/heat/tests/test_event.py b/heat/tests/test_event.py
index d35328bb4..8d9a2521b 100644
--- a/heat/tests/test_event.py
+++ b/heat/tests/test_event.py
@@ -11,7 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
import uuid
@@ -282,7 +283,7 @@ class EventEncryptedTest(EventCommon):
self.resource.name, self.resource.type())
e.store()
- # verify the resource_properties_data db data is encrypted
+ # verify the resource_properties_data DB data is encrypted
e_obj = event_object.Event.get_all_by_stack(self.resource.context,
self.stack.id)[0]
rpd_id = e_obj['rsrc_prop_data_id']
diff --git a/heat/tests/test_exception.py b/heat/tests/test_exception.py
index 593c29f86..eb2eb8ace 100644
--- a/heat/tests/test_exception.py
+++ b/heat/tests/test_exception.py
@@ -15,9 +15,9 @@
# under the License.
+from unittest import mock
+
import fixtures
-import mock
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -39,7 +39,7 @@ class TestHeatException(common.HeatTestCase):
def test_format_string_error_message(self):
message = "This format %(message)s should work"
err = exception.Error(message)
- self.assertEqual(message, six.text_type(err))
+ self.assertEqual(message, str(err))
class TestStackValidationFailed(common.HeatTestCase):
@@ -135,7 +135,7 @@ class TestStackValidationFailed(common.HeatTestCase):
try:
raise exception.StackValidationFailed(**self.kwargs)
except exception.StackValidationFailed as ex:
- self.assertIn(self.expected, six.text_type(ex))
+ self.assertIn(self.expected, str(ex))
self.assertIn(self.called_error, ex.error)
self.assertEqual(self.called_path, ex.path)
self.assertEqual(self.called_msg, ex.error_message)
diff --git a/heat/tests/test_fault_middleware.py b/heat/tests/test_fault_middleware.py
index 37b0316af..2a49efd62 100644
--- a/heat/tests/test_fault_middleware.py
+++ b/heat/tests/test_fault_middleware.py
@@ -17,7 +17,6 @@ import re
from oslo_config import cfg
from oslo_log import log
from oslo_messaging._drivers import common as rpc_common
-import six
import webob
import heat.api.middleware.fault as fault
@@ -123,7 +122,7 @@ class FaultMiddlewareTest(common.HeatTestCase):
serialized, ["heat.common.exception"])
wrapper = fault.FaultWrapper(None)
msg = wrapper._error(remote_error)
- expected_message, expected_traceback = six.text_type(
+ expected_message, expected_traceback = str(
remote_error).split('\n', 1)
expected = {'code': 404,
'error': {'message': expected_message,
@@ -134,8 +133,7 @@ class FaultMiddlewareTest(common.HeatTestCase):
self.assertEqual(expected, msg)
def remote_exception_helper(self, name, error):
- if six.PY3:
- error.args = ()
+ error.args = ()
exc_info = (type(error), error, None)
serialized = rpc_common.serialize_remote_exception(exc_info)
@@ -230,7 +228,7 @@ class FaultMiddlewareTest(common.HeatTestCase):
wrapper = fault.FaultWrapper(None)
msg = wrapper._error(remote_error)
- expected_message, expected_traceback = six.text_type(
+ expected_message, expected_traceback = str(
remote_error).split('\n', 1)
expected = {'code': 404,
'error': {'message': expected_message,
diff --git a/heat/tests/test_function.py b/heat/tests/test_function.py
index 2073ede4b..4edc31877 100644
--- a/heat/tests/test_function.py
+++ b/heat/tests/test_function.py
@@ -14,7 +14,6 @@
import copy
import uuid
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -42,6 +41,11 @@ class TestFunction(function.Function):
return 'wibble'
+class NullFunction(function.Function):
+ def result(self):
+ return Ellipsis
+
+
class TestFunctionKeyError(function.Function):
def result(self):
raise TypeError
@@ -77,7 +81,7 @@ class FunctionTest(common.HeatTestCase):
func1 = TestFunction(None, 'foo', ['bar', 'baz'])
expected = '%s %s' % ("<heat.tests.test_function.TestFunction",
"{foo: ['bar', 'baz']} -> 'wibble'>")
- self.assertEqual(expected, six.text_type(func1))
+ self.assertEqual(expected, str(func1))
def test_function_stack_reference_none(self):
func1 = TestFunction(None, 'foo', ['bar', 'baz'])
@@ -87,7 +91,7 @@ class FunctionTest(common.HeatTestCase):
func1 = TestFunctionKeyError(None, 'foo', ['bar', 'baz'])
expected = '%s %s' % ("<heat.tests.test_function.TestFunctionKeyError",
"{foo: ['bar', 'baz']} -> ???>")
- self.assertEqual(expected, six.text_type(func1))
+ self.assertEqual(expected, str(func1))
def test_function_eq_exception_key_error(self):
func1 = TestFunctionKeyError(None, 'foo', ['bar', 'baz'])
@@ -106,7 +110,7 @@ class FunctionTest(common.HeatTestCase):
expected = '%s %s' % (
"<heat.tests.test_function.TestFunctionValueError",
"{foo: ['bar', 'baz']} -> ???>")
- self.assertEqual(expected, six.text_type(func1))
+ self.assertEqual(expected, str(func1))
def test_function_eq_exception_value_error(self):
func1 = TestFunctionValueError(None, 'foo', ['bar', 'baz'])
@@ -126,7 +130,7 @@ class FunctionTest(common.HeatTestCase):
"<heat.tests.test_function.TestFunctionResult",
"{foo: ['bar', 'baz']}",
"{'foo': ['bar', 'baz']}>")
- self.assertEqual(expected, six.text_type(func1))
+ self.assertEqual(expected, str(func1))
def test_copy(self):
func = TestFunction(None, 'foo', ['bar', 'baz'])
@@ -170,6 +174,28 @@ class ResolveTest(common.HeatTestCase):
result)
self.assertIsNot(result, snippet)
+ def test_resolve_func_with_null(self):
+ func = NullFunction(None, 'foo', ['bar', 'baz'])
+
+ self.assertIsNone(function.resolve(func))
+ self.assertIs(Ellipsis, function.resolve(func, nullable=True))
+
+ def test_resolve_dict_with_null(self):
+ func = NullFunction(None, 'foo', ['bar', 'baz'])
+ snippet = {'foo': 'bar', 'baz': func, 'blarg': 'wibble'}
+
+ result = function.resolve(snippet)
+
+ self.assertEqual({'foo': 'bar', 'blarg': 'wibble'}, result)
+
+ def test_resolve_list_with_null(self):
+ func = NullFunction(None, 'foo', ['bar', 'baz'])
+ snippet = ['foo', func, 'bar']
+
+ result = function.resolve(snippet)
+
+ self.assertEqual(['foo', 'bar'], result)
+
class ValidateTest(common.HeatTestCase):
def setUp(self):
@@ -278,7 +304,7 @@ class ValidateGetAttTest(common.HeatTestCase):
ex = self.assertRaises(exception.InvalidTemplateReference,
func.validate)
self.assertEqual('The specified reference "test_rsrc" (in unknown) '
- 'is incorrect.', six.text_type(ex))
+ 'is incorrect.', str(ex))
def test_resource_no_attribute_with_default_fn_get_att(self):
res_defn = rsrc_defn.ResourceDefinition('test_rsrc',
@@ -294,7 +320,7 @@ class ValidateGetAttTest(common.HeatTestCase):
ex = self.assertRaises(exception.InvalidTemplateAttribute,
func.validate)
self.assertEqual('The Referenced Attribute (test_rsrc Bar) '
- 'is incorrect.', six.text_type(ex))
+ 'is incorrect.', str(ex))
def test_resource_no_attribute_with_overwritten_fn_get_att(self):
res_defn = rsrc_defn.ResourceDefinition('test_rsrc',
@@ -315,4 +341,4 @@ class ValidateGetAttTest(common.HeatTestCase):
self.stack.defn, 'Fn::GetAtt', [self.rsrc.name])
self.assertEqual('Arguments to "Fn::GetAtt" must be '
'of the form [resource_name, attribute]',
- six.text_type(ex))
+ str(ex))
diff --git a/heat/tests/test_grouputils.py b/heat/tests/test_grouputils.py
index 5c26e60be..32ac5de7c 100644
--- a/heat/tests/test_grouputils.py
+++ b/heat/tests/test_grouputils.py
@@ -11,8 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import six
+from unittest import mock
from heat.common import grouputils
from heat.common import identifier
@@ -50,7 +49,7 @@ class GroupUtilsTest(common.HeatTestCase):
group.nested.return_value = stack
# member list (sorted)
- members = [r for r in six.itervalues(stack)]
+ members = [r for r in stack.values()]
expected = sorted(members, key=lambda r: (r.created_time, r.name))
actual = grouputils.get_members(group)
self.assertEqual(expected, actual)
diff --git a/heat/tests/test_hacking.py b/heat/tests/test_hacking.py
index 105e8d380..5863b5799 100644
--- a/heat/tests/test_hacking.py
+++ b/heat/tests/test_hacking.py
@@ -25,7 +25,7 @@ class HackingTestCase(common.HeatTestCase):
"obj.items()"))))
self.assertEqual(0, len(list(checks.check_python3_no_iteritems(
- "six.iteritems(obj)"))))
+ "obj.items()"))))
def test_dict_iterkeys(self):
self.assertEqual(1, len(list(checks.check_python3_no_iterkeys(
@@ -35,7 +35,7 @@ class HackingTestCase(common.HeatTestCase):
"obj.keys()"))))
self.assertEqual(0, len(list(checks.check_python3_no_iterkeys(
- "six.iterkeys(obj)"))))
+ "obj.keys()"))))
def test_dict_itervalues(self):
self.assertEqual(1, len(list(checks.check_python3_no_itervalues(
@@ -45,4 +45,4 @@ class HackingTestCase(common.HeatTestCase):
"obj.values()"))))
self.assertEqual(0, len(list(checks.check_python3_no_itervalues(
- "six.itervalues(obj)"))))
+ "obj.values()"))))
diff --git a/heat/tests/test_hot.py b/heat/tests/test_hot.py
index 753123921..a0ec62ae8 100644
--- a/heat/tests/test_hot.py
+++ b/heat/tests/test_hot.py
@@ -12,8 +12,7 @@
# under the License.
import copy
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.common import identifier
@@ -72,6 +71,10 @@ hot_pike_tpl_empty = template_format.parse('''
heat_template_version: 2017-09-01
''')
+hot_wallaby_tpl_empty = template_format.parse('''
+heat_template_version: 2021-04-16
+''')
+
hot_tpl_empty_sections = template_format.parse('''
heat_template_version: 2013-05-23
parameters:
@@ -244,7 +247,7 @@ class HOTemplateTest(common.HeatTestCase):
error = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.RESOURCES)
self.assertEqual('Each resource must contain a type key.',
- six.text_type(error))
+ str(error))
def test_translate_resources_bad_type(self):
"""Test translation of resources including invalid keyword."""
@@ -269,7 +272,7 @@ class HOTemplateTest(common.HeatTestCase):
tmpl.__getitem__, tmpl.RESOURCES)
self.assertEqual('"Type" is not a valid keyword '
'inside a resource definition',
- six.text_type(err))
+ str(err))
def test_translate_resources_bad_properties(self):
"""Test translation of resources including invalid keyword."""
@@ -294,7 +297,7 @@ class HOTemplateTest(common.HeatTestCase):
tmpl.__getitem__, tmpl.RESOURCES)
self.assertEqual('"Properties" is not a valid keyword '
'inside a resource definition',
- six.text_type(err))
+ str(err))
def test_translate_resources_resources_without_name(self):
hot_tpl = template_format.parse('''
@@ -312,8 +315,8 @@ class HOTemplateTest(common.HeatTestCase):
error = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.RESOURCES)
self.assertEqual('"resources" must contain a map of resource maps. '
- 'Found a [%s] instead' % six.text_type,
- six.text_type(error))
+ 'Found a [%s] instead' % str,
+ str(error))
def test_translate_resources_bad_metadata(self):
"""Test translation of resources including invalid keyword."""
@@ -339,7 +342,7 @@ class HOTemplateTest(common.HeatTestCase):
self.assertEqual('"Metadata" is not a valid keyword '
'inside a resource definition',
- six.text_type(err))
+ str(err))
def test_translate_resources_bad_depends_on(self):
"""Test translation of resources including invalid keyword."""
@@ -364,7 +367,7 @@ class HOTemplateTest(common.HeatTestCase):
tmpl.__getitem__, tmpl.RESOURCES)
self.assertEqual('"DependsOn" is not a valid keyword '
'inside a resource definition',
- six.text_type(err))
+ str(err))
def test_translate_resources_bad_deletion_policy(self):
"""Test translation of resources including invalid keyword."""
@@ -389,7 +392,7 @@ class HOTemplateTest(common.HeatTestCase):
tmpl.__getitem__, tmpl.RESOURCES)
self.assertEqual('"DeletionPolicy" is not a valid keyword '
'inside a resource definition',
- six.text_type(err))
+ str(err))
def test_translate_resources_bad_update_policy(self):
"""Test translation of resources including invalid keyword."""
@@ -414,7 +417,7 @@ class HOTemplateTest(common.HeatTestCase):
tmpl.__getitem__, tmpl.RESOURCES)
self.assertEqual('"UpdatePolicy" is not a valid keyword '
'inside a resource definition',
- six.text_type(err))
+ str(err))
def test_get_outputs_good(self):
"""Test get outputs."""
@@ -445,7 +448,7 @@ class HOTemplateTest(common.HeatTestCase):
error = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.OUTPUTS)
self.assertEqual('Each output must contain a value key.',
- six.text_type(error))
+ str(error))
def test_get_outputs_bad_without_name(self):
"""Test get outputs without name."""
@@ -461,8 +464,8 @@ class HOTemplateTest(common.HeatTestCase):
error = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.OUTPUTS)
self.assertEqual('"outputs" must contain a map of output maps. '
- 'Found a [%s] instead' % six.text_type,
- six.text_type(error))
+ 'Found a [%s] instead' % str,
+ str(error))
def test_get_outputs_bad_description(self):
"""Test get outputs with bad description name."""
@@ -478,7 +481,7 @@ class HOTemplateTest(common.HeatTestCase):
tmpl = template.Template(hot_tpl)
err = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.OUTPUTS)
- self.assertIn('Description', six.text_type(err))
+ self.assertIn('Description', str(err))
def test_get_outputs_bad_value(self):
"""Test get outputs with bad value name."""
@@ -494,7 +497,7 @@ class HOTemplateTest(common.HeatTestCase):
tmpl = template.Template(hot_tpl)
err = self.assertRaises(exception.StackValidationFailed,
tmpl.__getitem__, tmpl.OUTPUTS)
- self.assertIn('Value', six.text_type(err))
+ self.assertIn('Value', str(err))
def test_resource_group_list_join(self):
"""Test list_join on a ResourceGroup's inner attributes
@@ -596,7 +599,7 @@ class HOTemplateTest(common.HeatTestCase):
tmpl = template.Template(hot_tpl_empty)
ex = self.assertRaises(TypeError, self.resolve, snippet, tmpl)
self.assertIn('"str_replace" params must be strings or numbers, '
- 'param jsonvar1 is not valid', six.text_type(ex))
+ 'param jsonvar1 is not valid', str(ex))
def test_liberty_str_replace_map_param(self):
"""Test str_replace function with non-string map param."""
@@ -617,7 +620,7 @@ class HOTemplateTest(common.HeatTestCase):
tmpl = template.Template(hot_tpl_empty)
ex = self.assertRaises(TypeError, self.resolve, snippet, tmpl)
self.assertIn('"str_replace" params must be strings or numbers, '
- 'param listvar1 is not valid', six.text_type(ex))
+ 'param listvar1 is not valid', str(ex))
def test_liberty_str_replace_list_param(self):
"""Test str_replace function with non-string param."""
@@ -751,7 +754,7 @@ class HOTemplateTest(common.HeatTestCase):
tmpl = template.Template(hot_ocata_tpl_empty)
ex = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
self.assertEqual('The following params were not found in the '
- 'template: var3', six.text_type(ex))
+ 'template: var3', str(ex))
snippet = {'str_replace_strict':
{'template': 'Template var1 string var2',
@@ -760,7 +763,7 @@ class HOTemplateTest(common.HeatTestCase):
ex = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
self.assertEqual('The following params were not found in the '
- 'template: var0', six.text_type(ex))
+ 'template: var0', str(ex))
# str_replace_vstrict has same behaviour
snippet = {'str_replace_vstrict':
@@ -772,7 +775,7 @@ class HOTemplateTest(common.HeatTestCase):
tmpl = template.Template(hot_pike_tpl_empty)
ex = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
self.assertEqual('The following params were not found in the '
- 'template: longvarname,var0,var', six.text_type(ex))
+ 'template: longvarname,var0,var', str(ex))
def test_str_replace_strict_empty_param_ok(self):
"""Test str_replace_strict function with empty params."""
@@ -799,7 +802,7 @@ class HOTemplateTest(common.HeatTestCase):
snippet['str_replace_vstrict']['params']['var2'] = val
ex = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
self.assertIn('str_replace_vstrict has an undefined or empty '
- 'value for param var2', six.text_type(ex))
+ 'value for param var2', str(ex))
def test_str_replace_invalid_param_keys(self):
"""Test str_replace function parameter keys.
@@ -822,7 +825,7 @@ class HOTemplateTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
self.assertIn('"str_replace" syntax should be str_replace:\\n',
- six.text_type(ex))
+ str(ex))
def test_str_replace_strict_invalid_param_keys(self):
"""Test str_replace function parameter keys.
@@ -843,7 +846,7 @@ class HOTemplateTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
self.assertIn('"str_replace_strict" syntax should be '
- 'str_replace_strict:\\n', six.text_type(ex))
+ 'str_replace_strict:\\n', str(ex))
def test_str_replace_invalid_param_types(self):
"""Test str_replace function parameter values.
@@ -865,7 +868,7 @@ class HOTemplateTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
self.assertIn('str_replace: "str_replace" parameters must be a'
- ' mapping', six.text_type(ex))
+ ' mapping', str(ex))
def test_str_replace_invalid_param_type_init(self):
"""Test str_replace function parameter values.
@@ -879,7 +882,7 @@ class HOTemplateTest(common.HeatTestCase):
TypeError,
cfn_functions.Replace,
None, 'Fn::Replace', args)
- self.assertIn('parameters must be a mapping', six.text_type(ex))
+ self.assertIn('parameters must be a mapping', str(ex))
def test_str_replace_ref_get_param(self):
"""Test str_replace referencing parameters."""
@@ -938,7 +941,7 @@ class HOTemplateTest(common.HeatTestCase):
snippet, tmpl, stack)
self.assertEqual(
'Argument to "get_file" must be a string',
- six.text_type(notStrErr))
+ str(notStrErr))
def test_get_file_missing_files(self):
"""Test get_file function with no matching key in files section."""
@@ -955,7 +958,7 @@ class HOTemplateTest(common.HeatTestCase):
self.assertEqual(
('No content found in the "files" section for '
'get_file path: file:///tmp/foo.yaml'),
- six.text_type(missingErr))
+ str(missingErr))
def test_get_file_nested_does_not_resolve(self):
"""Test get_file function does not resolve nested calls."""
@@ -999,7 +1002,7 @@ class HOTemplateTest(common.HeatTestCase):
k_tmpl = template.Template(hot_kilo_tpl_empty)
exc = self.assertRaises(TypeError, self.resolve, snippet, k_tmpl)
self.assertEqual("Items to join must be strings not {'foo': 'json'}",
- six.text_type(exc))
+ str(exc))
def test_join_object_type_fail(self):
not_serializable = object
@@ -1007,10 +1010,10 @@ class HOTemplateTest(common.HeatTestCase):
l_tmpl = template.Template(hot_liberty_tpl_empty)
exc = self.assertRaises(TypeError, self.resolve, snippet, l_tmpl)
self.assertIn('Items to join must be string, map or list not',
- six.text_type(exc))
+ str(exc))
k_tmpl = template.Template(hot_kilo_tpl_empty)
exc = self.assertRaises(TypeError, self.resolve, snippet, k_tmpl)
- self.assertIn("Items to join must be strings", six.text_type(exc))
+ self.assertIn("Items to join must be strings", str(exc))
def test_join_json_fail(self):
not_serializable = object
@@ -1018,9 +1021,9 @@ class HOTemplateTest(common.HeatTestCase):
l_tmpl = template.Template(hot_liberty_tpl_empty)
exc = self.assertRaises(TypeError, self.resolve, snippet, l_tmpl)
self.assertIn('Items to join must be string, map or list',
- six.text_type(exc))
+ str(exc))
self.assertIn("failed json serialization",
- six.text_type(exc))
+ str(exc))
def test_join_invalid(self):
snippet = {'list_join': 'bad'}
@@ -1028,25 +1031,25 @@ class HOTemplateTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, l_tmpl)
self.assertIn('list_join: Incorrect arguments to "list_join"',
- six.text_type(exc))
+ str(exc))
k_tmpl = template.Template(hot_kilo_tpl_empty)
exc1 = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, k_tmpl)
self.assertIn('list_join: Incorrect arguments to "list_join"',
- six.text_type(exc1))
+ str(exc1))
def test_join_int_invalid(self):
snippet = {'list_join': 5}
l_tmpl = template.Template(hot_liberty_tpl_empty)
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, l_tmpl)
- self.assertIn('list_join: Incorrect arguments', six.text_type(exc))
+ self.assertIn('list_join: Incorrect arguments', str(exc))
k_tmpl = template.Template(hot_kilo_tpl_empty)
exc1 = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, k_tmpl)
- self.assertIn('list_join: Incorrect arguments', six.text_type(exc1))
+ self.assertIn('list_join: Incorrect arguments', str(exc1))
def test_join_invalid_value(self):
snippet = {'list_join': [',']}
@@ -1054,19 +1057,19 @@ class HOTemplateTest(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, l_tmpl)
self.assertIn('list_join: Incorrect arguments to "list_join"',
- six.text_type(exc))
+ str(exc))
k_tmpl = template.Template(hot_kilo_tpl_empty)
exc1 = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, k_tmpl)
self.assertIn('list_join: Incorrect arguments to "list_join"',
- six.text_type(exc1))
+ str(exc1))
def test_join_invalid_multiple(self):
snippet = {'list_join': [',', 'bad', ['foo']]}
tmpl = template.Template(hot_liberty_tpl_empty)
exc = self.assertRaises(TypeError, self.resolve, snippet, tmpl)
- self.assertIn('must operate on a list', six.text_type(exc))
+ self.assertIn('must operate on a list', str(exc))
def test_merge(self):
snippet = {'map_merge': [{'f1': 'b1', 'f2': 'b2'}, {'f1': 'b2'}]}
@@ -1086,7 +1089,7 @@ class HOTemplateTest(common.HeatTestCase):
snippet = {'map_merge': [{'f1': 'b1', 'f2': 'b2'}, ['f1', 'b2']]}
tmpl = template.Template(hot_mitaka_tpl_empty)
exc = self.assertRaises(TypeError, self.resolve, snippet, tmpl)
- self.assertIn('Incorrect arguments', six.text_type(exc))
+ self.assertIn('Incorrect arguments', str(exc))
def test_merge_containing_repeat(self):
snippet = {'map_merge': {'repeat': {'template': {'ROLE': 'ROLE'},
@@ -1385,12 +1388,12 @@ class HOTemplateTest(common.HeatTestCase):
error_msg = ('equals: Arguments to "equals" must be '
'of the form: [value_1, value_2]')
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
snippet = {'equals': "invalid condition"}
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve_condition, snippet, tmpl)
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
def test_equals_with_non_supported_function(self):
@@ -1400,7 +1403,7 @@ class HOTemplateTest(common.HeatTestCase):
{'get_attr': [None, 'att2']}]}
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve_condition, snippet, tmpl)
- self.assertIn('"get_attr" is invalid', six.text_type(exc))
+ self.assertIn('"get_attr" is invalid', str(exc))
def test_if(self):
snippet = {'if': ['create_prod', 'value_if_true', 'value_if_false']}
@@ -1507,13 +1510,42 @@ resources:
self.assertEqual('', self.stack['AResource'].properties['Foo'])
def test_if_invalid_args(self):
- snippet = {'if': ['create_prod', 'one_value']}
+ snippets = [
+ {'if': ['create_prod', 'one_value']},
+ {'if': ['create_prod', 'one_value', 'two_values', 'three_values']},
+ ]
tmpl = template.Template(hot_newton_tpl_empty)
- exc = self.assertRaises(exception.StackValidationFailed,
- self.resolve, snippet, tmpl)
- self.assertIn('Arguments to "if" must be of the form: '
- '[condition_name, value_if_true, value_if_false]',
- six.text_type(exc))
+ for snippet in snippets:
+ exc = self.assertRaises(exception.StackValidationFailed,
+ self.resolve, snippet, tmpl)
+ self.assertIn('Arguments to "if" must be of the form: '
+ '[condition_name, value_if_true, value_if_false]',
+ str(exc))
+
+ def test_if_nullable_invalid_args(self):
+ snippets = [
+ {'if': ['create_prod']},
+ {'if': ['create_prod', 'one_value', 'two_values', 'three_values']},
+ ]
+ tmpl = template.Template(hot_wallaby_tpl_empty)
+ for snippet in snippets:
+ exc = self.assertRaises(exception.StackValidationFailed,
+ self.resolve, snippet, tmpl)
+ self.assertIn('Arguments to "if" must be of the form: '
+ '[condition_name, value_if_true, value_if_false]',
+ str(exc))
+
+ def test_if_nullable(self):
+ snippet = {
+ 'single': {'if': [False, 'value_if_true']},
+ 'nested_true': {'if': [True, {'if': [False, 'foo']}, 'bar']},
+ 'nested_false': {'if': [False, 'baz', {'if': [False, 'quux']}]},
+ 'control': {'if': [False, True, None]},
+ }
+
+ tmpl = template.Template(hot_wallaby_tpl_empty)
+ resolved = self.resolve(snippet, tmpl, None)
+ self.assertEqual({'control': None}, resolved)
def test_if_condition_name_non_existing(self):
snippet = {'if': ['cd_not_existing', 'value_true', 'value_false']}
@@ -1525,8 +1557,8 @@ resources:
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl, stack)
self.assertIn('Invalid condition "cd_not_existing"',
- six.text_type(exc))
- self.assertIn('if:', six.text_type(exc))
+ str(exc))
+ self.assertIn('if:', str(exc))
def _test_repeat(self, templ=hot_kilo_tpl_empty):
"""Test repeat function."""
@@ -1701,7 +1733,7 @@ resources:
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
self.assertIn('"permutations" should be boolean type '
- 'for repeat function', six.text_type(exc))
+ 'for repeat function', str(exc))
def test_repeat_bad_args(self):
"""Tests reporting error by repeat function.
@@ -1757,7 +1789,7 @@ resources:
]
for snippet in invalid_snippets:
exc = self.assertRaises(TypeError, self.resolve, snippet, tmpl)
- self.assertIn('must be a list of strings', six.text_type(exc))
+ self.assertIn('must be a list of strings', str(exc))
def test_digest_incorrect_number_arguments(self):
tmpl = template.Template(hot_kilo_tpl_empty)
@@ -1771,14 +1803,14 @@ resources:
for snippet in invalid_snippets:
exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
self.assertIn('usage: ["<algorithm>", "<value>"]',
- six.text_type(exc))
+ str(exc))
def test_digest_invalid_algorithm(self):
tmpl = template.Template(hot_kilo_tpl_empty)
snippet = {'digest': ['invalid_algorithm', 'foobar']}
exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
- self.assertIn('Algorithm must be one of', six.text_type(exc))
+ self.assertIn('Algorithm must be one of', str(exc))
def test_str_split(self):
tmpl = template.Template(hot_liberty_tpl_empty)
@@ -1802,28 +1834,28 @@ resources:
tmpl = template.Template(hot_liberty_tpl_empty)
snippet = {'str_split': [',', 'bar,baz', 'bad']}
exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
- self.assertIn('Incorrect index to \"str_split\"', six.text_type(exc))
+ self.assertIn('Incorrect index to \"str_split\"', str(exc))
def test_str_split_index_out_of_range(self):
tmpl = template.Template(hot_liberty_tpl_empty)
snippet = {'str_split': [',', 'bar,baz', '2']}
exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
expected = 'Incorrect index to \"str_split\" should be between 0 and 1'
- self.assertEqual(expected, six.text_type(exc))
+ self.assertEqual(expected, str(exc))
def test_str_split_bad_novalue(self):
tmpl = template.Template(hot_liberty_tpl_empty)
snippet = {'str_split': [',']}
exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
self.assertIn('Incorrect arguments to \"str_split\"',
- six.text_type(exc))
+ str(exc))
def test_str_split_bad_empty(self):
tmpl = template.Template(hot_liberty_tpl_empty)
snippet = {'str_split': []}
exc = self.assertRaises(ValueError, self.resolve, snippet, tmpl)
self.assertIn('Incorrect arguments to \"str_split\"',
- six.text_type(exc))
+ str(exc))
def test_str_split_none_string_to_split(self):
tmpl = template.Template(hot_liberty_tpl_empty)
@@ -1857,11 +1889,11 @@ resources:
# Hot template test
keyError = self.assertRaises(KeyError, tmpl.__getitem__, 'parameters')
- self.assertIn(err_str, six.text_type(keyError))
+ self.assertIn(err_str, str(keyError))
# CFN template test
keyError = self.assertRaises(KeyError, tmpl.__getitem__, 'Parameters')
- self.assertIn(err_str, six.text_type(keyError))
+ self.assertIn(err_str, str(keyError))
def test_parameters_section_not_iterable(self):
"""Check parameters section is not returned using the template as iter.
@@ -1964,7 +1996,7 @@ resources:
self.resolve,
snippet,
stack.t, stack)
- self.assertIn(next(iter(snippet)), six.text_type(error))
+ self.assertIn(next(iter(snippet)), str(error))
def test_resource_facade_missing_deletion_policy(self):
snippet = {'resource_facade': 'deletion_policy'}
@@ -2042,7 +2074,7 @@ resources:
empty = template.Template(copy.deepcopy(hot_tpl_empty))
stack = parser.Stack(utils.dummy_context(), 'test_stack', source)
- for defn in six.itervalues(source.outputs(stack)):
+ for defn in source.outputs(stack).values():
empty.add_output(defn)
self.assertEqual(hot_tpl['outputs'], empty.t['outputs'])
@@ -2377,7 +2409,7 @@ resources:
tmpl = template.Template(hot_pike_tpl_empty)
msg = 'Incorrect arguments'
exc = self.assertRaises(TypeError, self.resolve, snippet, tmpl)
- self.assertIn(msg, six.text_type(exc))
+ self.assertIn(msg, str(exc))
def test_list_concat_with_dict_arg(self):
snippet = {'list_concat': [{'k1': 'v2'}, ['v3', 'v4']]}
@@ -2409,7 +2441,7 @@ resources:
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
msg = 'Incorrect arguments to '
- self.assertIn(msg, six.text_type(exc))
+ self.assertIn(msg, str(exc))
def test_contains_with_invalid_args_number(self):
snippet = {'contains': ['v1', ['v1', 'v2'], 'redundant']}
@@ -2417,7 +2449,7 @@ resources:
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, tmpl)
msg = 'must be of the form: [value1, [value1, value2]]'
- self.assertIn(msg, six.text_type(exc))
+ self.assertIn(msg, str(exc))
def test_contains_with_invalid_sequence(self):
snippet = {'contains': ['v1', {'key': 'value'}]}
@@ -2425,7 +2457,7 @@ resources:
exc = self.assertRaises(TypeError,
self.resolve, snippet, tmpl)
msg = 'should be a sequence'
- self.assertIn(msg, six.text_type(exc))
+ self.assertIn(msg, str(exc))
class HotStackTest(common.HeatTestCase):
@@ -2855,7 +2887,7 @@ class StackGetAttrValidationTest(common.HeatTestCase):
try:
stack.validate()
except exception.StackValidationFailed as exc:
- self.fail("Validation should have passed: %s" % six.text_type(exc))
+ self.fail("Validation should have passed: %s" % str(exc))
self.assertEqual([],
stack.resources['resource2'].properties['a_list'])
self.assertEqual({},
@@ -2872,7 +2904,7 @@ class StackGetAttrValidationTest(common.HeatTestCase):
try:
stack.validate()
except exception.StackValidationFailed as exc:
- self.fail("Validation should have passed: %s" % six.text_type(exc))
+ self.fail("Validation should have passed: %s" % str(exc))
self.assertEqual([],
stack.resources['resource2'].properties['a_list'])
self.assertEqual({},
@@ -3023,19 +3055,19 @@ class HOTParamValidatorTest(common.HeatTestCase):
value = 'wp'
err = self.assertRaises(exception.StackValidationFailed, v, value)
- self.assertIn(len_desc, six.text_type(err))
+ self.assertIn(len_desc, str(err))
value = 'abcdefghijklmnopq'
err = self.assertRaises(exception.StackValidationFailed, v, value)
- self.assertIn(len_desc, six.text_type(err))
+ self.assertIn(len_desc, str(err))
value = 'abcdefgh1'
err = self.assertRaises(exception.StackValidationFailed, v, value)
- self.assertIn(pattern_desc1, six.text_type(err))
+ self.assertIn(pattern_desc1, str(err))
value = 'Abcdefghi'
err = self.assertRaises(exception.StackValidationFailed, v, value)
- self.assertIn(pattern_desc2, six.text_type(err))
+ self.assertIn(pattern_desc2, str(err))
value = 'abcdefghi'
self.assertTrue(v(value))
@@ -3073,22 +3105,22 @@ class HOTParamValidatorTest(common.HeatTestCase):
value = 'wp'
err = self.assertRaises(exception.StackValidationFailed,
run_parameters, value)
- self.assertIn(len_desc, six.text_type(err))
+ self.assertIn(len_desc, str(err))
value = 'abcdefghijklmnopq'
err = self.assertRaises(exception.StackValidationFailed,
run_parameters, value)
- self.assertIn(len_desc, six.text_type(err))
+ self.assertIn(len_desc, str(err))
value = 'abcdefgh1'
err = self.assertRaises(exception.StackValidationFailed,
run_parameters, value)
- self.assertIn(pattern_desc1, six.text_type(err))
+ self.assertIn(pattern_desc1, str(err))
value = 'Abcdefghi'
err = self.assertRaises(exception.StackValidationFailed,
run_parameters, value)
- self.assertIn(pattern_desc2, six.text_type(err))
+ self.assertIn(pattern_desc2, str(err))
value = 'abcdefghi'
self.assertTrue(run_parameters(value))
@@ -3118,11 +3150,11 @@ class HOTParamValidatorTest(common.HeatTestCase):
value = 29999
err = self.assertRaises(exception.StackValidationFailed, v, value)
- self.assertIn(range_desc, six.text_type(err))
+ self.assertIn(range_desc, str(err))
value = 50001
err = self.assertRaises(exception.StackValidationFailed, v, value)
- self.assertIn(range_desc, six.text_type(err))
+ self.assertIn(range_desc, str(err))
value = 30000
self.assertTrue(v(value))
@@ -3161,11 +3193,11 @@ class HOTParamValidatorTest(common.HeatTestCase):
value = "1"
err = self.assertRaises(exception.StackValidationFailed, v, value)
- self.assertEqual(desc, six.text_type(err))
+ self.assertEqual(desc, str(err))
value = "2"
err = self.assertRaises(exception.StackValidationFailed, v, value)
- self.assertEqual(desc, six.text_type(err))
+ self.assertEqual(desc, str(err))
value = "0"
self.assertTrue(v(value))
@@ -3198,7 +3230,7 @@ class HOTParamValidatorTest(common.HeatTestCase):
param['db_port'])
err = self.assertRaises(exception.InvalidSchemaError,
schema.validate)
- self.assertIn(range_desc, six.text_type(err))
+ self.assertIn(range_desc, str(err))
def test_validate_schema_wrong_key(self):
hot_tpl = template_format.parse('''
@@ -3211,7 +3243,7 @@ class HOTParamValidatorTest(common.HeatTestCase):
exception.InvalidSchemaError, cfn_param.CfnParameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual("Invalid key 'foo' for parameter (param1)",
- six.text_type(error))
+ str(error))
def test_validate_schema_no_type(self):
hot_tpl = template_format.parse('''
@@ -3224,7 +3256,7 @@ class HOTParamValidatorTest(common.HeatTestCase):
exception.InvalidSchemaError, cfn_param.CfnParameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual("Missing parameter type for parameter: param1",
- six.text_type(error))
+ str(error))
def test_validate_schema_unknown_type(self):
hot_tpl = template_format.parse('''
@@ -3237,7 +3269,7 @@ class HOTParamValidatorTest(common.HeatTestCase):
exception.InvalidSchemaError, cfn_param.CfnParameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
- "Invalid type (Unicode)", six.text_type(error))
+ "Invalid type (Unicode)", str(error))
def test_validate_schema_constraints(self):
hot_tpl = template_format.parse('''
@@ -3254,7 +3286,7 @@ class HOTParamValidatorTest(common.HeatTestCase):
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
"Invalid key 'allowed_valus' for parameter constraints",
- six.text_type(error))
+ str(error))
def test_validate_schema_constraints_not_list(self):
hot_tpl = template_format.parse('''
@@ -3270,7 +3302,7 @@ class HOTParamValidatorTest(common.HeatTestCase):
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
"Invalid parameter constraints for parameter param1, "
- "expected a list", six.text_type(error))
+ "expected a list", str(error))
def test_validate_schema_constraints_not_mapping(self):
hot_tpl = template_format.parse('''
@@ -3286,7 +3318,7 @@ class HOTParamValidatorTest(common.HeatTestCase):
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
"Invalid parameter constraints, expected a mapping",
- six.text_type(error))
+ str(error))
def test_validate_schema_empty_constraints(self):
hot_tpl = template_format.parse('''
@@ -3301,7 +3333,7 @@ class HOTParamValidatorTest(common.HeatTestCase):
error = self.assertRaises(
exception.InvalidSchemaError, cfn_param.CfnParameters,
"stack_testit", template.Template(hot_tpl))
- self.assertEqual("No constraint expressed", six.text_type(error))
+ self.assertEqual("No constraint expressed", str(error))
def test_validate_schema_constraints_range_wrong_format(self):
hot_tpl = template_format.parse('''
@@ -3318,7 +3350,7 @@ class HOTParamValidatorTest(common.HeatTestCase):
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
"Invalid range constraint, expected a mapping",
- six.text_type(error))
+ str(error))
def test_validate_schema_constraints_range_invalid_key(self):
hot_tpl = template_format.parse('''
@@ -3334,7 +3366,7 @@ class HOTParamValidatorTest(common.HeatTestCase):
exception.InvalidSchemaError, cfn_param.CfnParameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
- "Invalid key 'foo' for range constraint", six.text_type(error))
+ "Invalid key 'foo' for range constraint", str(error))
def test_validate_schema_constraints_length_wrong_format(self):
hot_tpl = template_format.parse('''
@@ -3351,7 +3383,7 @@ class HOTParamValidatorTest(common.HeatTestCase):
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
"Invalid length constraint, expected a mapping",
- six.text_type(error))
+ str(error))
def test_validate_schema_constraints_length_invalid_key(self):
hot_tpl = template_format.parse('''
@@ -3367,7 +3399,7 @@ class HOTParamValidatorTest(common.HeatTestCase):
exception.InvalidSchemaError, cfn_param.CfnParameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
- "Invalid key 'foo' for length constraint", six.text_type(error))
+ "Invalid key 'foo' for length constraint", str(error))
def test_validate_schema_constraints_wrong_allowed_pattern(self):
hot_tpl = template_format.parse('''
@@ -3383,7 +3415,7 @@ class HOTParamValidatorTest(common.HeatTestCase):
exception.InvalidSchemaError, cfn_param.CfnParameters,
"stack_testit", template.Template(hot_tpl))
self.assertEqual(
- "AllowedPattern must be a string", six.text_type(error))
+ "AllowedPattern must be a string", str(error))
def test_modulo_constraint(self):
modulo_desc = 'Value must be an odd number'
@@ -3409,11 +3441,11 @@ class HOTParamValidatorTest(common.HeatTestCase):
value = 2
err = self.assertRaises(exception.StackValidationFailed, v, value)
- self.assertIn(modulo_desc, six.text_type(err))
+ self.assertIn(modulo_desc, str(err))
value = 100
err = self.assertRaises(exception.StackValidationFailed, v, value)
- self.assertIn(modulo_desc, six.text_type(err))
+ self.assertIn(modulo_desc, str(err))
value = 1
self.assertTrue(v(value))
@@ -3442,7 +3474,7 @@ class HOTParamValidatorTest(common.HeatTestCase):
schema = hot_param.HOTParamSchema20170224.from_dict(
modulo_name, param[modulo_name])
err = self.assertRaises(exception.InvalidSchemaError, schema.validate)
- self.assertIn(modulo_desc, six.text_type(err))
+ self.assertIn(modulo_desc, str(err))
class TestGetAttAllAttributes(common.HeatTestCase):
@@ -3566,7 +3598,7 @@ class TestGetAttAllAttributes(common.HeatTestCase):
if self.raises is not None:
ex = self.assertRaises(self.raises,
self.resolve, self.snippet, tmpl, stack)
- self.assertEqual(self.expected, six.text_type(ex))
+ self.assertEqual(self.expected, str(ex))
else:
self.assertEqual(self.expected,
self.resolve(self.snippet, tmpl, stack))
diff --git a/heat/tests/test_lifecycle_plugin_utils.py b/heat/tests/test_lifecycle_plugin_utils.py
index 352922b4a..9d8f42bf4 100644
--- a/heat/tests/test_lifecycle_plugin_utils.py
+++ b/heat/tests/test_lifecycle_plugin_utils.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.common import lifecycle_plugin_utils
from heat.engine import lifecycle_plugin
diff --git a/heat/tests/test_loguserdata.py b/heat/tests/test_loguserdata.py
index 85802ebdb..7bb17002a 100644
--- a/heat/tests/test_loguserdata.py
+++ b/heat/tests/test_loguserdata.py
@@ -14,8 +14,8 @@
import errno
import os
import subprocess
+from unittest import mock
-import mock
from heat.cloudinit import loguserdata
from heat.tests import common
diff --git a/heat/tests/test_metadata_refresh.py b/heat/tests/test_metadata_refresh.py
index b638f3d36..4b8d43a00 100644
--- a/heat/tests/test_metadata_refresh.py
+++ b/heat/tests/test_metadata_refresh.py
@@ -10,12 +10,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from heat.common import identifier
from heat.common import template_format
from heat.engine.clients.os import glance
+from heat.engine.clients.os import heat_plugin
from heat.engine.clients.os import nova
from heat.engine import environment
from heat.engine.resources.aws.cfn.wait_condition_handle import (
@@ -222,14 +224,16 @@ class WaitConditionMetadataUpdateTest(common.HeatTestCase):
@mock.patch.object(nova.NovaClientPlugin, 'find_flavor_by_name_or_id')
@mock.patch.object(glance.GlanceClientPlugin, 'find_image_by_name_or_id')
+ @mock.patch.object(heat_plugin.HeatClientPlugin, 'get_heat_cfn_url')
@mock.patch.object(instance.Instance, 'handle_create')
@mock.patch.object(instance.Instance, 'check_create_complete')
@mock.patch.object(scheduler.TaskRunner, '_sleep')
@mock.patch.object(WaitConditionHandle, 'identifier')
def test_wait_metadata(self, mock_identifier, mock_sleep,
- mock_check, mock_handle, *args):
+ mock_check, mock_handle, mock_get, *args):
"""Tests a wait condition metadata update after a signal call."""
+ mock_get.return_value = 'http://server.test:8000/v1'
# Setup Stack
temp = template_format.parse(TEST_TEMPLATE_WAIT_CONDITION)
template = tmpl.Template(temp)
diff --git a/heat/tests/test_nested_stack.py b/heat/tests/test_nested_stack.py
index e7e979f71..33a9ab9fd 100644
--- a/heat/tests/test_nested_stack.py
+++ b/heat/tests/test_nested_stack.py
@@ -12,10 +12,10 @@
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from requests import exceptions
-import six
import yaml
from heat.common import exception
@@ -30,6 +30,7 @@ from heat.engine import rsrc_defn
from heat.engine import stack as parser
from heat.engine import template
from heat.objects import resource_data as resource_data_object
+from heat.objects import stack as stack_object
from heat.tests import common
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
@@ -164,7 +165,7 @@ Resources:
res = self.assertRaises(exception.StackValidationFailed,
stack.validate)
- self.assertIn('Recursion depth exceeds', six.text_type(res))
+ self.assertIn('Recursion depth exceeds', str(res))
calls = [mock.call('https://server.test/depth1.template'),
mock.call('https://server.test/depth2.template'),
@@ -229,7 +230,7 @@ Resources:
tr.return_value = 2
res = self.assertRaises(exception.StackValidationFailed,
stack.validate)
- self.assertIn('Recursion depth exceeds', six.text_type(res))
+ self.assertIn('Recursion depth exceeds', str(res))
expected_count = cfg.CONF.get('max_nested_stack_depth') + 1
self.assertEqual(expected_count, urlfetch.get.call_count)
@@ -302,7 +303,7 @@ Resources:
nested_t = template_format.parse(self.nested_template)
nested_t['Parameters']['KeyName']['Default'] = 'Key'
- nested_stack = parser.Stack(ctx, 'test',
+ nested_stack = parser.Stack(ctx, 'test_nested',
template.Template(nested_t))
nested_stack.store()
@@ -387,6 +388,10 @@ Outputs:
self.assertIsNone(self.res.validate())
self.res.store()
+ self.patchobject(stack_object.Stack, 'get_status',
+ return_value=('CREATE', 'COMPLETE',
+ 'Created', 'Sometime'))
+
def test_handle_create(self):
self.res.create_with_template = mock.Mock(return_value=None)
diff --git a/heat/tests/test_noauth.py b/heat/tests/test_noauth.py
index 7a524ccb4..0a6abda92 100644
--- a/heat/tests/test_noauth.py
+++ b/heat/tests/test_noauth.py
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import six
import webob
from heat.common import noauth
@@ -44,9 +43,10 @@ class FakeApp(object):
def __call__(self, env, start_response):
"""Assert that expected environment is present when finally called."""
for k, v in self.expected_env.items():
- assert env[k] == v, '%s != %s' % (env[k], v)
+ if env[k] != v:
+ raise AssertionError('%s != %s' % (env[k], v))
resp = webob.Response()
- resp.body = six.b('SUCCESS')
+ resp.body = 'SUCCESS'.encode('latin-1')
return resp(env, start_response)
diff --git a/heat/tests/test_notifications.py b/heat/tests/test_notifications.py
index f909774ff..e31575da7 100644
--- a/heat/tests/test_notifications.py
+++ b/heat/tests/test_notifications.py
@@ -11,7 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import timeutils
from heat.common import timeutils as heat_timeutils
diff --git a/heat/tests/test_parameters.py b/heat/tests/test_parameters.py
index f438c37fb..4779ea438 100644
--- a/heat/tests/test_parameters.py
+++ b/heat/tests/test_parameters.py
@@ -12,7 +12,6 @@
# under the License.
from oslo_serialization import jsonutils as json
-import six
from heat.common import exception
from heat.common import identifier
@@ -125,11 +124,11 @@ class ParameterTestCommon(common.HeatTestCase):
err = self.assertRaises(exception.InvalidSchemaError,
new_parameter, 'p', schema)
self.assertIn('AllowedValues constraint invalid for Json',
- six.text_type(err))
+ str(err))
else:
err = self.assertRaises(exception.InvalidSchemaError,
new_parameter, 'p', schema)
- self.assertIn('wibble', six.text_type(err))
+ self.assertIn('wibble', str(err))
def test_description(self):
description = 'Description of the parameter'
@@ -200,7 +199,7 @@ class ParameterTestSpecific(common.HeatTestCase):
'MinLength': '4'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, 'foo')
- self.assertIn('wibble', six.text_type(err))
+ self.assertIn('wibble', str(err))
def test_string_overflow(self):
schema = {'Type': 'String',
@@ -208,7 +207,7 @@ class ParameterTestSpecific(common.HeatTestCase):
'MaxLength': '2'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, 'foo')
- self.assertIn('wibble', six.text_type(err))
+ self.assertIn('wibble', str(err))
def test_string_pattern_good(self):
schema = {'Type': 'String',
@@ -222,7 +221,7 @@ class ParameterTestSpecific(common.HeatTestCase):
'AllowedPattern': '[a-z]*'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, '1foo')
- self.assertIn('wibble', six.text_type(err))
+ self.assertIn('wibble', str(err))
def test_string_pattern_bad_suffix(self):
schema = {'Type': 'String',
@@ -230,7 +229,7 @@ class ParameterTestSpecific(common.HeatTestCase):
'AllowedPattern': '[a-z]*'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, 'foo1')
- self.assertIn('wibble', six.text_type(err))
+ self.assertIn('wibble', str(err))
def test_string_value_list_good(self):
schema = {'Type': 'String',
@@ -249,7 +248,7 @@ class ParameterTestSpecific(common.HeatTestCase):
'AllowedValues': ['foo', 'bar', 'baz']}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, 'blarg')
- self.assertIn('wibble', six.text_type(err))
+ self.assertIn('wibble', str(err))
def test_number_int_good(self):
schema = {'Type': 'Number',
@@ -278,7 +277,7 @@ class ParameterTestSpecific(common.HeatTestCase):
'MinValue': '4'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, '3')
- self.assertIn('wibble', six.text_type(err))
+ self.assertIn('wibble', str(err))
def test_number_high(self):
schema = {'Type': 'Number',
@@ -286,19 +285,19 @@ class ParameterTestSpecific(common.HeatTestCase):
'MaxValue': '2'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, '3')
- self.assertIn('wibble', six.text_type(err))
+ self.assertIn('wibble', str(err))
def test_number_bad(self):
schema = {'Type': 'Number'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, 'str')
- self.assertIn('float', six.text_type(err))
+ self.assertIn('float', str(err))
def test_number_bad_type(self):
schema = {'Type': 'Number'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, ['foo'])
- self.assertIn('int', six.text_type(err))
+ self.assertIn('int', str(err))
def test_number_value_list_good(self):
schema = {'Type': 'Number',
@@ -312,7 +311,7 @@ class ParameterTestSpecific(common.HeatTestCase):
'AllowedValues': ['1', '3', '5']}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, '2')
- self.assertIn('wibble', six.text_type(err))
+ self.assertIn('wibble', str(err))
def test_list_value_list_default_empty(self):
schema = {'Type': 'CommaDelimitedList', 'Default': ''}
@@ -345,7 +344,7 @@ class ParameterTestSpecific(common.HeatTestCase):
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema,
'foo,baz,blarg')
- self.assertIn('wibble', six.text_type(err))
+ self.assertIn('wibble', str(err))
def test_list_validate_good(self):
schema = {'Type': 'CommaDelimitedList'}
@@ -365,7 +364,7 @@ class ParameterTestSpecific(common.HeatTestCase):
p.user_value = val_s
err = self.assertRaises(exception.StackValidationFailed,
p.validate)
- self.assertIn('Parameter \'p\' is invalid', six.text_type(err))
+ self.assertIn('Parameter \'p\' is invalid', str(err))
def test_map_value(self):
'''Happy path for value that's already a map.'''
@@ -380,9 +379,9 @@ class ParameterTestSpecific(common.HeatTestCase):
schema = {'Type': 'Json',
'ConstraintDescription': 'wibble'}
val = {"foo": "bar", "not_json": len}
- err = self.assertRaises(ValueError,
+ err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, val)
- self.assertIn('Value must be valid JSON', six.text_type(err))
+ self.assertIn('Value must be valid JSON', str(err))
def test_map_value_parse(self):
'''Happy path for value that's a string.'''
@@ -398,9 +397,9 @@ class ParameterTestSpecific(common.HeatTestCase):
schema = {'Type': 'Json',
'ConstraintDescription': 'wibble'}
val = "I am not a map"
- err = self.assertRaises(ValueError,
+ err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, val)
- self.assertIn('Value must be valid JSON', six.text_type(err))
+ self.assertIn('Value must be valid JSON', str(err))
def test_map_underrun(self):
'''Test map length under MIN_LEN.'''
@@ -409,7 +408,7 @@ class ParameterTestSpecific(common.HeatTestCase):
val = {"foo": "bar", "items": [1, 2, 3]}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, val)
- self.assertIn('out of range', six.text_type(err))
+ self.assertIn('out of range', str(err))
def test_map_overrun(self):
'''Test map length over MAX_LEN.'''
@@ -418,7 +417,7 @@ class ParameterTestSpecific(common.HeatTestCase):
val = {"foo": "bar", "items": [1, 2, 3]}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'p', schema, val)
- self.assertIn('out of range', six.text_type(err))
+ self.assertIn('out of range', str(err))
def test_json_list(self):
schema = {'Type': 'Json'}
@@ -452,7 +451,7 @@ class ParameterTestSpecific(common.HeatTestCase):
p.user_value = val_s
err = self.assertRaises(exception.StackValidationFailed,
p.validate)
- self.assertIn('Parameter \'p\' is invalid', six.text_type(err))
+ self.assertIn('Parameter \'p\' is invalid', str(err))
def test_bool_value_true(self):
schema = {'Type': 'Boolean'}
@@ -470,7 +469,7 @@ class ParameterTestSpecific(common.HeatTestCase):
schema = {'Type': 'Boolean'}
err = self.assertRaises(exception.StackValidationFailed,
new_parameter, 'bo', schema, 'foo')
- self.assertIn("Unrecognized value 'foo'", six.text_type(err))
+ self.assertIn("Unrecognized value 'foo'", str(err))
def test_missing_param_str(self):
'''Test missing user parameter.'''
@@ -497,7 +496,7 @@ class ParameterTestSpecific(common.HeatTestCase):
new_parameter, 'testparam', schema, '234')
expected = ("Parameter 'testparam' is invalid: "
'"234" does not match pattern "[a-z]*"')
- self.assertEqual(expected, six.text_type(err))
+ self.assertEqual(expected, str(err))
params_schema = json.loads('''{
@@ -603,7 +602,7 @@ class ParametersTest(ParametersBase):
'None'),
'AWS::StackName': 'test_params'}
- mapped_params = params.map(six.text_type)
+ mapped_params = params.map(str)
mapped_params['Uni'] = mapped_params['Uni'].encode('utf-8')
self.assertEqual(expected, mapped_params)
@@ -694,7 +693,7 @@ class ParameterSchemaTest(common.HeatTestCase):
parameters.Schema.from_dict, 'param_name',
{"foo": "bar"})
self.assertEqual("Invalid key 'foo' for parameter (param_name)",
- six.text_type(error))
+ str(error))
def test_validate_schema_no_type(self):
error = self.assertRaises(exception.InvalidSchemaError,
@@ -702,4 +701,4 @@ class ParameterSchemaTest(common.HeatTestCase):
'broken',
{"Description": "Hi!"})
self.assertEqual("Missing parameter type for parameter: broken",
- six.text_type(error))
+ str(error))
diff --git a/heat/tests/test_plugin_loader.py b/heat/tests/test_plugin_loader.py
index e8cbe9b62..681e7b2e9 100644
--- a/heat/tests/test_plugin_loader.py
+++ b/heat/tests/test_plugin_loader.py
@@ -14,8 +14,7 @@
import pkgutil
import sys
-
-import mock
+from unittest import mock
from heat.common import plugin_loader
import heat.engine
diff --git a/heat/tests/test_properties.py b/heat/tests/test_properties.py
index cf5dd14ec..97f8a8027 100644
--- a/heat/tests/test_properties.py
+++ b/heat/tests/test_properties.py
@@ -11,18 +11,20 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
-import six
from heat.common import exception
from heat.engine import constraints
+from heat.engine import function
from heat.engine.hot import functions as hot_funcs
from heat.engine.hot import parameters as hot_param
from heat.engine import parameters
from heat.engine import plugin_manager
from heat.engine import properties
from heat.engine import resources
+from heat.engine import rsrc_defn
from heat.engine import support
from heat.engine import translation
from heat.tests import common
@@ -70,8 +72,8 @@ class PropertySchemaTest(common.HeatTestCase):
s = properties.Schema(properties.Schema.STRING, 'A string',
default='wibble',
constraints=[constraints.Length(4, 8)])
- l = properties.Schema(properties.Schema.LIST, 'A list', schema=s)
- self.assertEqual(d, dict(l))
+ ls = properties.Schema(properties.Schema.LIST, 'A list', schema=s)
+ self.assertEqual(d, dict(ls))
def test_schema_map_schema(self):
d = {
@@ -136,14 +138,14 @@ class PropertySchemaTest(common.HeatTestCase):
constraints=[constraints.Length(4, 8)])
m = properties.Schema(properties.Schema.MAP, 'A map',
schema={'Foo': s})
- l = properties.Schema(properties.Schema.LIST, 'A list', schema=m)
- self.assertEqual(d, dict(l))
+ ls = properties.Schema(properties.Schema.LIST, 'A list', schema=m)
+ self.assertEqual(d, dict(ls))
def test_all_resource_schemata(self):
for resource_type in resources.global_env().get_types():
- for schema in six.itervalues(getattr(resource_type,
- 'properties_schema',
- {})):
+ for schema in getattr(resource_type,
+ 'properties_schema',
+ {}).values():
properties.Schema.from_legacy(schema)
def test_from_legacy_idempotency(self):
@@ -291,7 +293,7 @@ class PropertySchemaTest(common.HeatTestCase):
self.assertEqual('[a-z]*', c.pattern)
def test_from_legacy_list(self):
- l = properties.Schema.from_legacy({
+ ls = properties.Schema.from_legacy({
'Type': 'List',
'Default': ['wibble'],
'Schema': {
@@ -300,15 +302,15 @@ class PropertySchemaTest(common.HeatTestCase):
'MaxLength': 8,
}
})
- self.assertEqual(properties.Schema.LIST, l.type)
- self.assertEqual(['wibble'], l.default)
+ self.assertEqual(properties.Schema.LIST, ls.type)
+ self.assertEqual(['wibble'], ls.default)
- ss = l.schema[0]
+ ss = ls.schema[0]
self.assertEqual(properties.Schema.STRING, ss.type)
self.assertEqual('wibble', ss.default)
def test_from_legacy_map(self):
- l = properties.Schema.from_legacy({
+ ls = properties.Schema.from_legacy({
'Type': 'Map',
'Schema': {
'foo': {
@@ -317,9 +319,9 @@ class PropertySchemaTest(common.HeatTestCase):
}
}
})
- self.assertEqual(properties.Schema.MAP, l.type)
+ self.assertEqual(properties.Schema.MAP, ls.type)
- ss = l.schema['foo']
+ ss = ls.schema['foo']
self.assertEqual(properties.Schema.STRING, ss.type)
self.assertEqual('wibble', ss.default)
@@ -635,7 +637,7 @@ class PropertySchemaTest(common.HeatTestCase):
update = True
sub_schema = prop.schema
if sub_schema:
- for sub_prop_key, sub_prop in six.iteritems(sub_schema):
+ for sub_prop_key, sub_prop in sub_schema.items():
if not update:
self.assertEqual(update, sub_prop.update_allowed,
"Mismatch in update policies: "
@@ -651,11 +653,11 @@ class PropertySchemaTest(common.HeatTestCase):
check_update_policy(resource_type, sub_prop_key,
sub_prop, update)
- for resource_type, resource_class in six.iteritems(all_resources):
+ for resource_type, resource_class in all_resources.items():
props_schemata = properties.schemata(
resource_class.properties_schema)
- for prop_key, prop in six.iteritems(props_schemata):
+ for prop_key, prop in props_schemata.items():
check_update_policy(resource_type, prop_key, prop)
@@ -807,7 +809,7 @@ class PropertyTest(common.HeatTestCase):
schema = {'Type': 'Integer'}
p = properties.Property(schema)
ex = self.assertRaises(TypeError, p.get_value, '3a')
- self.assertEqual("Value '3a' is not an integer", six.text_type(ex))
+ self.assertEqual("Value '3a' is not an integer", str(ex))
def test_integer_low(self):
schema = {'Type': 'Integer',
@@ -1004,7 +1006,7 @@ class PropertyTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
p.get_value, {'valid': 'fish'}, True)
self.assertEqual('Property error: valid: "fish" is not a '
- 'valid boolean', six.text_type(ex))
+ 'valid boolean', str(ex))
def test_map_schema_missing_data(self):
map_schema = {'valid': {'Type': 'Boolean'}}
@@ -1017,7 +1019,7 @@ class PropertyTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
p.get_value, {}, True)
self.assertEqual('Property error: Property valid not assigned',
- six.text_type(ex))
+ str(ex))
def test_list_schema_good(self):
map_schema = {'valid': {'Type': 'Boolean'}}
@@ -1036,7 +1038,7 @@ class PropertyTest(common.HeatTestCase):
p.get_value,
[{'valid': 'True'}, {'valid': 'fish'}], True)
self.assertEqual('Property error: [1].valid: "fish" is not '
- 'a valid boolean', six.text_type(ex))
+ 'a valid boolean', str(ex))
def test_list_schema_int_good(self):
list_schema = {'Type': 'Integer'}
@@ -1049,7 +1051,7 @@ class PropertyTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
p.get_value, [42, 'fish'], True)
self.assertEqual("Property error: [1]: Value 'fish' is not "
- "an integer", six.text_type(ex))
+ "an integer", str(ex))
class PropertiesTest(common.HeatTestCase):
@@ -1072,7 +1074,7 @@ class PropertiesTest(common.HeatTestCase):
'default_override': 21,
}
- def double(d):
+ def double(d, nullable=False):
return d * 2
self.props = properties.Properties(schema, data, double, 'wibble')
@@ -1117,7 +1119,7 @@ class PropertiesTest(common.HeatTestCase):
ex = self.assertRaises(KeyError, self.props.get_user_value, 'foo')
# Note we have to use args here: https://bugs.python.org/issue2651
self.assertEqual('Invalid Property foo',
- six.text_type(ex.args[0]))
+ str(ex.args[0]))
def test_bad_key(self):
self.assertEqual('wibble', self.props.get('foo', 'wibble'))
@@ -1126,7 +1128,7 @@ class PropertiesTest(common.HeatTestCase):
ex = self.assertRaises(KeyError, self.props.__getitem__, 'foo')
# Note we have to use args here: https://bugs.python.org/issue2651
self.assertEqual('Invalid Property foo',
- six.text_type(ex.args[0]))
+ str(ex.args[0]))
def test_none_string(self):
schema = {'foo': {'Type': 'String'}}
@@ -1207,7 +1209,7 @@ class PropertiesTest(common.HeatTestCase):
def test_resolve_returns_none(self):
schema = {'foo': {'Type': 'String', "MinLength": "5"}}
- def test_resolver(prop):
+ def test_resolver(prop, nullable=False):
return None
self.patchobject(properties.Properties,
@@ -1244,7 +1246,7 @@ class PropertiesTest(common.HeatTestCase):
}
# define parameters for function
- def test_resolver(prop):
+ def test_resolver(prop, nullable=False):
return 'None'
class rsrc(object):
@@ -1651,6 +1653,50 @@ class PropertiesTest(common.HeatTestCase):
props_b = properties.Properties(schema, {'foo': 1})
self.assertTrue(props_a != props_b)
+ def test_description_substitution(self):
+ schema = {
+ 'description': properties.Schema('String',
+ update_allowed=True),
+ 'not_description': properties.Schema('String',
+ update_allowed=True),
+ }
+ blank_rsrc = rsrc_defn.ResourceDefinition('foo', 'FooResource', {},
+ description='Foo resource')
+ bar_rsrc = rsrc_defn.ResourceDefinition('foo', 'FooResource',
+ {'description': 'bar'},
+ description='Foo resource')
+
+ blank_props = blank_rsrc.properties(schema)
+ self.assertEqual('Foo resource', blank_props['description'])
+ self.assertEqual(None, blank_props['not_description'])
+
+ replace_schema = {'description': properties.Schema('String')}
+ empty_props = blank_rsrc.properties(replace_schema)
+ self.assertEqual(None, empty_props['description'])
+
+ bar_props = bar_rsrc.properties(schema)
+ self.assertEqual('bar', bar_props['description'])
+
+ def test_null_property_value(self):
+ class NullFunction(function.Function):
+ def result(self):
+ return Ellipsis
+
+ schema = {
+ 'Foo': properties.Schema('String', required=False),
+ 'Bar': properties.Schema('String', required=False),
+ 'Baz': properties.Schema('String', required=False),
+ }
+ user_props = {'Foo': NullFunction(None, 'null', []), 'Baz': None}
+ props = properties.Properties(schema, user_props, function.resolve)
+
+ self.assertEqual(None, props['Foo'])
+ self.assertEqual(None, props.get_user_value('Foo'))
+ self.assertEqual(None, props['Bar'])
+ self.assertEqual(None, props.get_user_value('Bar'))
+ self.assertEqual('', props['Baz'])
+ self.assertEqual('', props.get_user_value('Baz'))
+
class PropertiesValidationTest(common.HeatTestCase):
def test_required(self):
@@ -1688,14 +1734,14 @@ class PropertiesValidationTest(common.HeatTestCase):
props = properties.Properties(schema, {'foo': ['foo', 'bar']})
ex = self.assertRaises(exception.StackValidationFailed, props.validate)
self.assertIn('Property error: foo: Value must be a string',
- six.text_type(ex))
+ str(ex))
def test_dict_instead_string(self):
schema = {'foo': {'Type': 'String'}}
props = properties.Properties(schema, {'foo': {'foo': 'bar'}})
ex = self.assertRaises(exception.StackValidationFailed, props.validate)
self.assertIn('Property error: foo: Value must be a string',
- six.text_type(ex))
+ str(ex))
def test_none_string(self):
schema = {'foo': {'Type': 'String'}}
@@ -1862,7 +1908,7 @@ class PropertiesValidationTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
props.validate)
self.assertEqual('Property error: foo[0]: Unknown Property bar',
- six.text_type(ex))
+ str(ex))
def test_nested_properties_schema_invalid_property_in_map(self):
child_schema = {'Key': {'Type': 'String',
@@ -1881,7 +1927,7 @@ class PropertiesValidationTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
props.validate)
self.assertEqual('Property error: foo.boo: Unknown Property bar',
- six.text_type(ex))
+ str(ex))
def test_more_nested_properties_schema_invalid_property_in_list(self):
nested_child_schema = {'Key': {'Type': 'String',
@@ -1899,7 +1945,7 @@ class PropertiesValidationTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
props.validate)
self.assertEqual('Property error: foo[0].doo: Unknown Property bar',
- six.text_type(ex))
+ str(ex))
def test_more_nested_properties_schema_invalid_property_in_map(self):
nested_child_schema = {'Key': {'Type': 'String',
@@ -1917,7 +1963,7 @@ class PropertiesValidationTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
props.validate)
self.assertEqual('Property error: foo.boo.doo: Unknown Property bar',
- six.text_type(ex))
+ str(ex))
def test_schema_to_template_empty_schema(self):
schema = {}
diff --git a/heat/tests/test_properties_group.py b/heat/tests/test_properties_group.py
index 86ca1b23f..535157459 100644
--- a/heat/tests/test_properties_group.py
+++ b/heat/tests/test_properties_group.py
@@ -11,7 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
from heat.common import exception
from heat.engine import properties_group as pg
@@ -86,7 +85,7 @@ class TestSchemaSimpleValidation(common.HeatTestCase):
if self.message is not None:
ex = self.assertRaises(exception.InvalidSchemaError,
pg.PropertiesGroup, self.schema)
- self.assertEqual(self.message, six.text_type(ex))
+ self.assertEqual(self.message, str(ex))
else:
self.assertIsInstance(pg.PropertiesGroup(self.schema),
pg.PropertiesGroup)
diff --git a/heat/tests/test_provider_template.py b/heat/tests/test_provider_template.py
index a1f7060f4..9064b51ed 100644
--- a/heat/tests/test_provider_template.py
+++ b/heat/tests/test_provider_template.py
@@ -14,11 +14,9 @@
import collections
import json
import os
+from unittest import mock
import uuid
-import mock
-import six
-
from heat.common import exception
from heat.common.i18n import _
from heat.common import identifier
@@ -34,6 +32,7 @@ from heat.engine import rsrc_defn
from heat.engine import stack as parser
from heat.engine import support
from heat.engine import template
+from heat.objects import stack as stack_object
from heat.tests import common
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
@@ -441,7 +440,7 @@ class ProviderTemplateTest(common.HeatTestCase):
temp_res.validate)
self.assertEqual("Property Foo type mismatch between facade "
"DummyResource (Map) and provider (String)",
- six.text_type(ex))
+ str(ex))
def test_properties_list_with_none(self):
provider = {
@@ -626,7 +625,7 @@ class ProviderTemplateTest(common.HeatTestCase):
ex = self.assertRaises(exception.NotFound, env.get_class,
'OS::ResourceType', 'fred')
self.assertIn('Could not fetch remote template "some_magic.yaml"',
- six.text_type(ex))
+ str(ex))
def test_metadata_update_called(self):
provider = {
@@ -739,7 +738,7 @@ class ProviderTemplateTest(common.HeatTestCase):
"""Test that templates are registered correctly.
Test that templates persisted in the database prior to
- https://review.openstack.org/#/c/79953/1 are registered correctly.
+ https://review.opendev.org/#/c/79953/1 are registered correctly.
"""
env = {'resource_registry': {'http://example.com/test.template': None,
'resources': {}}}
@@ -892,7 +891,7 @@ class ProviderTemplateTest(common.HeatTestCase):
err = self.assertRaises(exception.StackValidationFailed,
temp_res.validate)
self.assertIn('Error parsing template http://heatr/bad_tmpl.yaml',
- six.text_type(err))
+ str(err))
mock_get.assert_called_once_with(test_templ_name,
allowed_schemes=('http', 'https',))
@@ -974,6 +973,10 @@ class TemplateResourceCrudTest(common.HeatTestCase):
self.defn, self.stack)
self.assertIsNone(self.res.validate())
+ self.patchobject(stack_object.Stack, 'get_status',
+ return_value=('CREATE', 'COMPLETE',
+ 'Created', 'Sometime'))
+
def test_handle_create(self):
self.res.create_with_template = mock.Mock(return_value=None)
@@ -1002,8 +1005,8 @@ class TemplateResourceCrudTest(common.HeatTestCase):
def test_handle_delete(self):
self.res.rpc_client = mock.MagicMock()
self.res.id = 55
- self.res.uuid = six.text_type(uuid.uuid4())
- self.res.resource_id = six.text_type(uuid.uuid4())
+ self.res.uuid = str(uuid.uuid4())
+ self.res.resource_id = str(uuid.uuid4())
self.res.action = self.res.CREATE
self.res.nested = mock.MagicMock()
ident = identifier.HeatIdentifier(self.ctx.tenant_id,
diff --git a/heat/tests/test_resource.py b/heat/tests/test_resource.py
index 1bac44654..92e06857c 100644
--- a/heat/tests/test_resource.py
+++ b/heat/tests/test_resource.py
@@ -13,16 +13,15 @@
import collections
import datetime
-import eventlet
import itertools
import json
import os
import sys
+from unittest import mock
import uuid
-import mock
+import eventlet
from oslo_config import cfg
-import six
from heat.common import exception
from heat.common.i18n import _
@@ -173,7 +172,7 @@ class ResourceTest(common.HeatTestCase):
resource.Resource, 'wrong/name',
snippet, self.stack)
self.assertEqual('Resource name may not contain "/"',
- six.text_type(ex))
+ str(ex))
@mock.patch.object(translation, 'resolve_and_find')
@mock.patch.object(parser.Stack, 'db_resource_get')
@@ -227,7 +226,7 @@ class ResourceTest(common.HeatTestCase):
resource.Resource, resource_name,
snippet, self.stack)
self.assertIn(_('Resource "%s" has no type') % resource_name,
- six.text_type(ex))
+ str(ex))
def test_state_defaults(self):
tmpl = rsrc_defn.ResourceDefinition('test_res_def', 'Foo')
@@ -247,7 +246,7 @@ class ResourceTest(common.HeatTestCase):
ex = self.assertRaises(exception.NotSupported,
res.signal)
self.assertEqual('Signal resource during %s is not '
- 'supported.' % action, six.text_type(ex))
+ 'supported.' % action, str(ex))
ev.assert_called_with(
action, status,
'Cannot signal resource during %s' % action)
@@ -366,6 +365,15 @@ class ResourceTest(common.HeatTestCase):
self.assertEqual((res.CHECK, res.COMPLETE), res.state)
self.assertEqual('f00d', res.resource_id)
+ def test_create_from_external_with_check(self):
+ tmpl = rsrc_defn.ResourceDefinition(
+ 'test_resource', 'GenericResourceType',
+ external_id='f00d')
+ res = generic_rsrc.CheckableResource('test_resource', tmpl, self.stack)
+ scheduler.TaskRunner(res.create)()
+ self.assertEqual((res.CHECK, res.COMPLETE), res.state)
+ self.assertEqual('f00d', res.resource_id)
+
def test_create_from_external_not_found(self):
external_id = 'f00d'
tmpl = rsrc_defn.ResourceDefinition(
@@ -382,7 +390,7 @@ class ResourceTest(common.HeatTestCase):
"(%(type)s) can not be found.") %
{'external_id': external_id,
'type': res.type()})
- self.assertEqual(message, six.text_type(e))
+ self.assertEqual(message, str(e))
def test_updated_from_external(self):
tmpl = rsrc_defn.ResourceDefinition('test_resource',
@@ -397,7 +405,7 @@ class ResourceTest(common.HeatTestCase):
err = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.update, utmpl)
)
- self.assertEqual(expected_err_msg, six.text_type(err))
+ self.assertEqual(expected_err_msg, str(err))
def test_state_set_invalid(self):
tmpl = rsrc_defn.ResourceDefinition('test_resource', 'Foo')
@@ -908,7 +916,7 @@ class ResourceTest(common.HeatTestCase):
res.update_template_diff_properties,
after_props, before_props)
self.assertIn("Update to properties Spam, Viking of",
- six.text_type(ex))
+ str(ex))
def test_resource(self):
tmpl = rsrc_defn.ResourceDefinition('test_resource', 'Foo',
@@ -983,7 +991,7 @@ class ResourceTest(common.HeatTestCase):
res.state_set(res.CREATE, res.IN_PROGRESS, 'test_store')
# Modernity, the data is where it belongs
- # The db object data is encrypted
+ # The DB object data is encrypted
rsrc_prop_data_db_obj = db_api.resource_prop_data_get(
self.stack.context, res._rsrc_prop_data_id)
self.assertNotEqual(rsrc_prop_data_db_obj['data'], {'Foo': 'lucky'})
@@ -1005,7 +1013,7 @@ class ResourceTest(common.HeatTestCase):
'Property Foo not assigned')
create = scheduler.TaskRunner(res.create)
err = self.assertRaises(exception.ResourceFailure, create)
- self.assertIn(estr, six.text_type(err))
+ self.assertIn(estr, str(err))
self.assertEqual((res.CREATE, res.FAILED), res.state)
def test_create_fail_prop_typo(self):
@@ -1019,7 +1027,7 @@ class ResourceTest(common.HeatTestCase):
'Unknown Property Food')
create = scheduler.TaskRunner(res.create)
err = self.assertRaises(exception.ResourceFailure, create)
- self.assertIn(estr, six.text_type(err))
+ self.assertIn(estr, str(err))
self.assertEqual((res.CREATE, res.FAILED), res.state)
def test_create_fail_metadata_parse_error(self):
@@ -1123,7 +1131,7 @@ class ResourceTest(common.HeatTestCase):
'Went to status ERROR due to "just because"')
create = scheduler.TaskRunner(res.create)
err = self.assertRaises(exception.ResourceFailure, create)
- self.assertEqual(estr, six.text_type(err))
+ self.assertEqual(estr, str(err))
self.assertEqual((res.CREATE, res.FAILED), res.state)
self.assertEqual(
1, generic_rsrc.ResourceWithProps.handle_create.call_count)
@@ -1278,7 +1286,7 @@ class ResourceTest(common.HeatTestCase):
updater = scheduler.TaskRunner(res.update, utmpl)
ex = self.assertRaises(resource.UpdateReplace, updater)
self.assertEqual('The Resource test_resource requires replacement.',
- six.text_type(ex))
+ str(ex))
generic_rsrc.ResourceWithProps.handle_update.assert_called_once_with(
utmpl, mock.ANY, prop_diff)
@@ -1301,7 +1309,7 @@ class ResourceTest(common.HeatTestCase):
updater = scheduler.TaskRunner(res.update, utmpl)
ex = self.assertRaises(resource.UpdateReplace, updater)
self.assertEqual('The Resource Unknown requires replacement.',
- six.text_type(ex))
+ str(ex))
generic_rsrc.ResourceWithProps.handle_update.assert_called_once_with(
utmpl, mock.ANY, prop_diff)
@@ -1479,7 +1487,7 @@ class ResourceTest(common.HeatTestCase):
exc = self.assertRaises(exception.Error,
res._verify_check_conditions, checks)
- exc_text = six.text_type(exc)
+ exc_text = str(exc)
self.assertNotIn("'foo2':", exc_text)
self.assertNotIn("'foo4':", exc_text)
self.assertIn("'foo1': expected 'bar1', got 'baz1'", exc_text)
@@ -1516,9 +1524,9 @@ class ResourceTest(common.HeatTestCase):
for state in invalid_states:
res.state_set(*state)
suspend = scheduler.TaskRunner(res.suspend)
- expected = 'State %s invalid for suspend' % six.text_type(state)
+ expected = 'State %s invalid for suspend' % str(state)
exc = self.assertRaises(exception.ResourceFailure, suspend)
- self.assertIn(expected, six.text_type(exc))
+ self.assertIn(expected, str(exc))
def test_resume_fail_invalid_states(self):
tmpl = rsrc_defn.ResourceDefinition('test_resource',
@@ -1536,9 +1544,9 @@ class ResourceTest(common.HeatTestCase):
for state in invalid_states:
res.state_set(*state)
resume = scheduler.TaskRunner(res.resume)
- expected = 'State %s invalid for resume' % six.text_type(state)
+ expected = 'State %s invalid for resume' % str(state)
exc = self.assertRaises(exception.ResourceFailure, resume)
- self.assertIn(expected, six.text_type(exc))
+ self.assertIn(expected, str(exc))
def test_suspend_fail_exception(self):
tmpl = rsrc_defn.ResourceDefinition('test_resource',
@@ -1861,8 +1869,7 @@ class ResourceTest(common.HeatTestCase):
prop.schema.value,
res_name)
else:
- for nest_prop_name, nest_prop in six.iteritems(
- prop.schema):
+ for nest_prop_name, nest_prop in prop.schema.items():
_validate_property_schema(nest_prop_name,
nest_prop,
res_name)
@@ -1871,8 +1878,8 @@ class ResourceTest(common.HeatTestCase):
for res_type in resource_types:
res_class = env.get_class(res_type)
if hasattr(res_class, "properties_schema"):
- for property_schema_name, property_schema in six.iteritems(
- res_class.properties_schema):
+ for property_schema_name, property_schema in \
+ res_class.properties_schema.items():
_validate_property_schema(
property_schema_name, property_schema,
res_class.__name__)
@@ -1963,7 +1970,7 @@ class ResourceTest(common.HeatTestCase):
'prop4': ['a', 'list'],
'prop5': True}
- # The db data should be encrypted when _store() is called
+ # The DB data should be encrypted when _store() is called
res = generic_rsrc.GenericResource('test_res_enc', tmpl, self.stack)
res._stored_properties_data = stored_properties_data
res._rsrc_prop_data = None
@@ -1972,7 +1979,7 @@ class ResourceTest(common.HeatTestCase):
self.assertNotEqual('string',
db_res.rsrc_prop_data.data['prop1'])
- # The db data should be encrypted when state_set is called
+ # The DB data should be encrypted when state_set is called
res = generic_rsrc.GenericResource('test_res_enc', tmpl, self.stack)
res._stored_properties_data = stored_properties_data
res.state_set(res.CREATE, res.IN_PROGRESS, 'test_store')
@@ -2008,7 +2015,7 @@ class ResourceTest(common.HeatTestCase):
'prop4': ['a', 'list'],
'prop5': True}
- # The db data should not be encrypted when state_set()
+ # The DB data should not be encrypted when state_set()
# is called
res = generic_rsrc.GenericResource('test_res_enc', tmpl, self.stack)
res._stored_properties_data = stored_properties_data
@@ -2017,7 +2024,7 @@ class ResourceTest(common.HeatTestCase):
db_res = db_api.resource_get(res.context, res.id)
self.assertEqual('string', db_res.rsrc_prop_data.data['prop1'])
- # The db data should not be encrypted when _store() is called
+ # The DB data should not be encrypted when _store() is called
res = generic_rsrc.GenericResource('test_res_enc', tmpl, self.stack)
res._stored_properties_data = stored_properties_data
db_res = db_api.resource_get(res.context, res.id)
@@ -2057,7 +2064,7 @@ class ResourceTest(common.HeatTestCase):
-1, pcb)
self.assertTrue(mock_create.called)
- self.assertItemsEqual([1, 3], res.requires)
+ self.assertCountEqual([1, 3], res.requires)
self._assert_resource_lock(res.id, None, None)
def test_create_convergence_throws_timeout(self):
@@ -2087,7 +2094,7 @@ class ResourceTest(common.HeatTestCase):
self.assertRaises(exception.ResourceNotAvailable,
res.create_convergence, self.stack.t.id, {5, 3},
'engine-007', self.dummy_timeout, self.dummy_event)
- self.assertItemsEqual([5, 3], res.requires)
+ self.assertCountEqual([5, 3], res.requires)
# The locking happens in create which we mocked out
self._assert_resource_lock(res.id, None, None)
@@ -2107,7 +2114,7 @@ class ResourceTest(common.HeatTestCase):
tr()
mock_adopt.assert_called_once_with(
resource_data={'resource_id': 'fluffy'})
- self.assertItemsEqual([5, 3], res.requires)
+ self.assertCountEqual([5, 3], res.requires)
self._assert_resource_lock(res.id, None, None)
def test_adopt_convergence_bad_data(self):
@@ -2122,7 +2129,7 @@ class ResourceTest(common.HeatTestCase):
{5, 3}, 'engine-007', self.dummy_timeout,
self.dummy_event)
exc = self.assertRaises(exception.ResourceFailure, tr)
- self.assertIn('Resource ID was not provided', six.text_type(exc))
+ self.assertIn('Resource ID was not provided', str(exc))
@mock.patch.object(resource.Resource, 'update_template_diff_properties')
@mock.patch.object(resource.Resource, '_needs_update')
@@ -2157,7 +2164,7 @@ class ResourceTest(common.HeatTestCase):
{4, 3}, 'engine-007', 120, new_stack)
tr()
- self.assertItemsEqual([3, 4], res.requires)
+ self.assertCountEqual([3, 4], res.requires)
self.assertEqual(res.action, resource.Resource.UPDATE)
self.assertEqual(res.status, resource.Resource.COMPLETE)
self._assert_resource_lock(res.id, None, 2)
@@ -2264,7 +2271,7 @@ class ResourceTest(common.HeatTestCase):
ex = self.assertRaises(exception.UpdateInProgress, tr)
msg = ("The resource %s is already being updated." %
res.name)
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
# ensure requirements are not updated for failed resource
rs = resource_objects.Resource.get_obj(self.stack.context, res.id)
self.assertEqual([2, 1], rs.requires)
@@ -2310,7 +2317,7 @@ class ResourceTest(common.HeatTestCase):
self.assertEqual(new_temp.id, res.current_template_id)
# check if requires was updated
- self.assertItemsEqual([2, 3, 4], res.requires)
+ self.assertCountEqual([2, 3, 4], res.requires)
self.assertEqual(res.action, resource.Resource.UPDATE)
self.assertEqual(res.status, resource.Resource.FAILED)
self._assert_resource_lock(res.id, None, 2)
@@ -2350,7 +2357,7 @@ class ResourceTest(common.HeatTestCase):
# ensure that current_template_id was not updated
self.assertEqual(stack.t.id, res.current_template_id)
# ensure that requires was not updated
- self.assertItemsEqual([2], res.requires)
+ self.assertCountEqual([2], res.requires)
self._assert_resource_lock(res.id, None, 2)
def test_convergence_update_replace_rollback(self):
@@ -2478,7 +2485,7 @@ class ResourceTest(common.HeatTestCase):
ex = self.assertRaises(exception.UpdateInProgress, tr)
msg = ("The resource %s is already being updated." %
res.name)
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
@mock.patch.object(resource_objects.Resource, 'get_obj')
def test_update_replacement_data(self, mock_get_obj):
@@ -2709,7 +2716,7 @@ class ResourceDeleteRetryTest(common.HeatTestCase):
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.delete))
- exc_text = six.text_type(exc)
+ exc_text = str(exc)
self.assertIn('Conflict', exc_text)
self.assertEqual(
self.num_retries + 1,
@@ -3053,7 +3060,7 @@ class ResourceDependenciesTest(common.HeatTestCase):
stack = parser.Stack(utils.dummy_context(), 'test', tmpl)
ex = self.assertRaises(exception.InvalidTemplateReference,
stack.validate)
- self.assertIn('"baz" (in bar.Properties.Foo)', six.text_type(ex))
+ self.assertIn('"baz" (in bar.Properties.Foo)', str(ex))
def test_validate_value_fail(self):
tmpl = template.Template({
@@ -3072,7 +3079,7 @@ class ResourceDependenciesTest(common.HeatTestCase):
stack.validate)
self.assertIn("Property error: resources.bar.properties.FooInt: "
"Value 'notanint' is not an integer",
- six.text_type(ex))
+ str(ex))
# You can turn off value validation via strict_validate
stack_novalidate = parser.Stack(utils.dummy_context(), 'test', tmpl,
@@ -3233,7 +3240,7 @@ class ResourceDependenciesTest(common.HeatTestCase):
stack = parser.Stack(utils.dummy_context(), 'test', tmpl)
ex = self.assertRaises(exception.InvalidTemplateReference,
getattr, stack, 'dependencies')
- self.assertIn('"baz" (in bar.Properties.Foo)', six.text_type(ex))
+ self.assertIn('"baz" (in bar.Properties.Foo)', str(ex))
def test_hot_getatt_fail(self):
tmpl = template.Template({
@@ -3251,7 +3258,7 @@ class ResourceDependenciesTest(common.HeatTestCase):
stack = parser.Stack(utils.dummy_context(), 'test', tmpl)
ex = self.assertRaises(exception.InvalidTemplateReference,
getattr, stack, 'dependencies')
- self.assertIn('"baz" (in bar.Properties.Foo)', six.text_type(ex))
+ self.assertIn('"baz" (in bar.Properties.Foo)', str(ex))
def test_getatt_fail_nested_deep(self):
tmpl = template.Template({
@@ -3275,7 +3282,7 @@ class ResourceDependenciesTest(common.HeatTestCase):
ex = self.assertRaises(exception.InvalidTemplateReference,
getattr, stack, 'dependencies')
self.assertIn('"baz" (in bar.Properties.Foo.Fn::Join[1][3])',
- six.text_type(ex))
+ str(ex))
def test_hot_getatt_fail_nested_deep(self):
tmpl = template.Template({
@@ -3299,7 +3306,7 @@ class ResourceDependenciesTest(common.HeatTestCase):
ex = self.assertRaises(exception.InvalidTemplateReference,
getattr, stack, 'dependencies')
self.assertIn('"baz" (in bar.Properties.Foo.Fn::Join[1][3])',
- six.text_type(ex))
+ str(ex))
def test_dependson(self):
tmpl = template.Template({
@@ -3354,7 +3361,7 @@ class ResourceDependenciesTest(common.HeatTestCase):
stack = parser.Stack(utils.dummy_context(), 'test', tmpl)
ex = self.assertRaises(exception.InvalidTemplateReference,
getattr, stack, 'dependencies')
- self.assertIn('"wibble" (in foo)', six.text_type(ex))
+ self.assertIn('"wibble" (in foo)', str(ex))
class MetadataTest(common.HeatTestCase):
@@ -3871,7 +3878,7 @@ class ResourceAvailabilityTest(common.HeatTestCase):
'type UnavailableResourceType, reason: '
'Service endpoint not in service catalog.')
self.assertEqual(msg,
- six.text_type(ex),
+ str(ex),
'invalid exception message')
# Make sure is_service_available is called on the right class
@@ -3907,7 +3914,7 @@ class ResourceAvailabilityTest(common.HeatTestCase):
'type UnavailableResourceType, reason: '
'Authorization failed.')
self.assertEqual(msg,
- six.text_type(ex),
+ str(ex),
'invalid exception message')
# Make sure is_service_available is called on the right class
@@ -3990,7 +3997,7 @@ class ResourceAvailabilityTest(common.HeatTestCase):
with mock.patch.object(res, '_default_client_plugin',
return_value=client_plugin):
ex = self.assertRaises(exception.Error, res.handle_delete)
- self.assertEqual('boom!', six.text_type(ex))
+ self.assertEqual('boom!', str(ex))
delete.assert_called_once_with('12345')
def test_handle_delete_no_entity(self):
@@ -4120,7 +4127,7 @@ class TestLiveStateUpdate(common.HeatTestCase):
'FooInt': 2})
res = generic_rsrc.ResourceWithProps('test_resource',
tmpl, self.stack)
- for prop in six.itervalues(res.properties.props):
+ for prop in res.properties.props.values():
prop.schema.update_allowed = True
res.update_allowed_properties = ('Foo', 'FooInt',)
@@ -4136,7 +4143,7 @@ class TestLiveStateUpdate(common.HeatTestCase):
"""
res.update_allowed_properties = []
res.update_allowed_set = []
- for prop in six.itervalues(res.properties.props):
+ for prop in res.properties.props.values():
prop.schema.update_allowed = False
def test_update_resource_live_state(self):
@@ -4180,7 +4187,7 @@ class TestLiveStateUpdate(common.HeatTestCase):
ex = self.assertRaises(exception.EntityNotFound,
res.get_live_resource_data)
self.assertEqual('The Resource (test_resource) could not be found.',
- six.text_type(ex))
+ str(ex))
self._clean_tests_after_resource_live_state(res)
def test_parse_live_resource_data(self):
@@ -4283,7 +4290,7 @@ class ResourceUpdateRestrictionTest(common.HeatTestCase):
self.assertEqual('ResourceActionRestricted: resources.bar: '
'update is restricted for resource.',
- six.text_type(error))
+ str(error))
self.assertEqual('UPDATE', error.action)
self.assertEqual((res.CREATE, res.COMPLETE), res.state)
ev.assert_called_with(res.UPDATE, res.FAILED,
@@ -4310,7 +4317,7 @@ class ResourceUpdateRestrictionTest(common.HeatTestCase):
scheduler.TaskRunner(res.update, snippet))
self.assertEqual('ResourceActionRestricted: resources.bar: '
'replace is restricted for resource.',
- six.text_type(error))
+ str(error))
self.assertEqual('UPDATE', error.action)
self.assertEqual((res.CREATE, res.COMPLETE), res.state)
ev.assert_called_with(res.UPDATE, res.FAILED,
@@ -4357,7 +4364,7 @@ class ResourceUpdateRestrictionTest(common.HeatTestCase):
props)
error = self.assertRaises(resource.UpdateReplace,
scheduler.TaskRunner(res.update, snippet))
- self.assertIn('requires replacement', six.text_type(error))
+ self.assertIn('requires replacement', str(error))
self.assertEqual(1, prep_replace.call_count)
ev.assert_not_called()
@@ -4387,7 +4394,7 @@ class ResourceUpdateRestrictionTest(common.HeatTestCase):
eventlet.event.Event()))
self.assertEqual('ResourceActionRestricted: resources.bar: '
'replace is restricted for resource.',
- six.text_type(error))
+ str(error))
self.assertEqual('UPDATE', error.action)
self.assertEqual((res.CREATE, res.COMPLETE), res.state)
ev.assert_called_with(res.UPDATE, res.FAILED,
@@ -4417,7 +4424,7 @@ class ResourceUpdateRestrictionTest(common.HeatTestCase):
self.dummy_timeout,
self.new_stack,
eventlet.event.Event()))
- self.assertIn('requires replacement', six.text_type(error))
+ self.assertIn('requires replacement', str(error))
ev.assert_not_called()
@@ -4426,9 +4433,9 @@ class TestResourceMapping(common.HeatTestCase):
def _check_mapping_func(self, func, module):
self.assertTrue(callable(func))
res = func()
- self.assertIsInstance(res, collections.Mapping)
- for r_type, r_class in six.iteritems(res):
- self.assertIsInstance(r_type, six.string_types)
+ self.assertIsInstance(res, collections.abc.Mapping)
+ for r_type, r_class in res.items():
+ self.assertIsInstance(r_type, str)
type_elements = r_type.split('::')
# type has fixed format
# Platform type::Service/Type::Optional Sub-sections::Name
@@ -4436,11 +4443,11 @@ class TestResourceMapping(common.HeatTestCase):
# type should be OS or AWS
self.assertIn(type_elements[0], ('AWS', 'OS'))
# check that value is a class object
- self.assertIsInstance(r_class, six.class_types)
+ self.assertIsInstance(r_class, type)
# check that class is subclass of Resource base class
self.assertTrue(issubclass(r_class, resource.Resource))
# check that mentioned class is presented in the same module
- self.assertTrue(hasattr(module, six.text_type(r_class.__name__)))
+ self.assertTrue(hasattr(module, str(r_class.__name__)))
return len(res)
def test_resource_mappings(self):
diff --git a/heat/tests/test_rpc_client.py b/heat/tests/test_rpc_client.py
index 7bf414216..a5fded812 100644
--- a/heat/tests/test_rpc_client.py
+++ b/heat/tests/test_rpc_client.py
@@ -18,8 +18,8 @@ Unit Tests for heat.rpc.client
"""
import copy
+from unittest import mock
-import mock
from oslo_messaging._drivers import common as rpc_common
from oslo_utils import reflection
diff --git a/heat/tests/test_rpc_listener_client.py b/heat/tests/test_rpc_listener_client.py
index fe10f3333..f753f9385 100644
--- a/heat/tests/test_rpc_listener_client.py
+++ b/heat/tests/test_rpc_listener_client.py
@@ -11,7 +11,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
+from unittest import mock
+
import oslo_messaging as messaging
from heat.rpc import api as rpc_api
diff --git a/heat/tests/test_rpc_worker_client.py b/heat/tests/test_rpc_worker_client.py
index 730e189a2..af1de7fb1 100644
--- a/heat/tests/test_rpc_worker_client.py
+++ b/heat/tests/test_rpc_worker_client.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
+from unittest import mock
from heat.rpc import worker_api as rpc_api
from heat.rpc import worker_client as rpc_client
diff --git a/heat/tests/test_rsrc_defn.py b/heat/tests/test_rsrc_defn.py
index 9109e10db..da914f9ea 100644
--- a/heat/tests/test_rsrc_defn.py
+++ b/heat/tests/test_rsrc_defn.py
@@ -11,11 +11,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
from heat.common import exception
from heat.common import template_format
from heat.engine.cfn import functions as cfn_funcs
+from heat.engine import function
from heat.engine.hot import functions as hot_funcs
from heat.engine import properties
from heat.engine import rsrc_defn
@@ -87,7 +87,7 @@ class ResourceDefinitionTest(common.HeatTestCase):
rd = self.make_me_one_with_everything()
metadata = rd.metadata()
self.assertEqual({'Baz': 'quux'}, metadata)
- self.assertIsInstance(metadata['Baz'], six.string_types)
+ self.assertIsInstance(metadata['Baz'], str)
def test_dependencies_default(self):
rd = rsrc_defn.ResourceDefinition('rsrc', 'SomeType')
@@ -170,6 +170,39 @@ class ResourceDefinitionTest(common.HeatTestCase):
self.assertEqual('bar', frozen._properties['Foo'])
self.assertEqual('wibble', frozen._metadata['Baz'])
+ def test_freeze_nullable_top_level(self):
+ class NullFunction(function.Function):
+ def result(self):
+ return Ellipsis
+
+ null_func = NullFunction(None, 'null', [])
+
+ rd = rsrc_defn.ResourceDefinition(
+ 'rsrc', 'SomeType',
+ properties=null_func,
+ metadata=null_func,
+ update_policy=null_func)
+
+ frozen = rd.freeze()
+ self.assertIsNone(frozen._properties)
+ self.assertIsNone(frozen._metadata)
+ self.assertIsNone(frozen._update_policy)
+
+ rd2 = rsrc_defn.ResourceDefinition(
+ 'rsrc', 'SomeType',
+ properties={'Foo': null_func,
+ 'Blarg': 'wibble'},
+ metadata={'Bar': null_func,
+ 'Baz': 'quux'},
+ update_policy={'some_policy': null_func})
+
+ frozen2 = rd2.freeze()
+ self.assertNotIn('Foo', frozen2._properties)
+ self.assertEqual('wibble', frozen2._properties['Blarg'])
+ self.assertNotIn('Bar', frozen2._metadata)
+ self.assertEqual('quux', frozen2._metadata['Baz'])
+ self.assertEqual({}, frozen2._update_policy)
+
def test_render_hot(self):
rd = self.make_me_one_with_everything()
diff --git a/heat/tests/test_server_tags.py b/heat/tests/test_server_tags.py
index 2d484b160..d23102227 100644
--- a/heat/tests/test_server_tags.py
+++ b/heat/tests/test_server_tags.py
@@ -11,10 +11,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
import uuid
-import mock
-
from heat.common import template_format
from heat.engine.clients.os import glance
from heat.engine.clients.os import nova
diff --git a/heat/tests/test_signal.py b/heat/tests/test_signal.py
index 87e54c9b7..e47705ea1 100644
--- a/heat/tests/test_signal.py
+++ b/heat/tests/test_signal.py
@@ -12,11 +12,11 @@
# under the License.
import datetime
+from unittest import mock
+from urllib import parse as urlparse
from keystoneauth1 import exceptions as kc_exceptions
-import mock
-import six
-from six.moves.urllib import parse as urlparse
+from oslo_utils import timeutils
from heat.common import exception
from heat.common import template_format
@@ -151,6 +151,10 @@ class SignalTest(common.HeatTestCase):
@mock.patch.object(heat_plugin.HeatClientPlugin, 'get_heat_cfn_url')
def test_FnGetAtt_alarm_url(self, mock_get):
+ now = datetime.datetime(2012, 11, 29, 13, 49, 37)
+ timeutils.set_time_override(now)
+ self.addCleanup(timeutils.clear_time_override)
+
# Setup
stack_id = stack_name = 'FnGetAtt-alarm-url'
stack = self._create_stack(TEMPLATE_CFN_SIGNAL,
@@ -160,8 +164,6 @@ class SignalTest(common.HeatTestCase):
mock_get.return_value = 'http://server.test:8000/v1'
rsrc = stack['signal_handler']
- created_time = datetime.datetime(2012, 11, 29, 13, 49, 37)
- rsrc.created_time = created_time
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
@@ -624,7 +626,7 @@ class SignalTest(common.HeatTestCase):
msg = 'Signal resource during %s is not supported.' % action
exc = self.assertRaises(exception.NotSupported, rsrc.signal,
details=err_metadata)
- self.assertEqual(msg, six.text_type(exc))
+ self.assertEqual(msg, str(exc))
def test_signal_in_delete_state(self):
# assert that we get the correct exception when calling a
diff --git a/heat/tests/test_stack.py b/heat/tests/test_stack.py
index 5d7102356..790d0f4ca 100644
--- a/heat/tests/test_stack.py
+++ b/heat/tests/test_stack.py
@@ -17,12 +17,11 @@ import datetime
import json
import logging
import time
+from unittest import mock
import eventlet
import fixtures
-import mock
from oslo_config import cfg
-import six
from heat.common import context
from heat.common import exception
@@ -302,7 +301,7 @@ class StackTest(common.HeatTestCase):
all_resources = list(self.stack.iter_resources())
- # Verify, the db query is called with expected filter
+ # Verify, the DB query is called with expected filter
mock_db_call.assert_called_once_with(self.ctx, self.stack.id)
# And returns the resources
@@ -368,7 +367,7 @@ class StackTest(common.HeatTestCase):
filters=dict(name=['A'])
))
- # Verify, the db query is called with expected filter
+ # Verify, the DB query is called with expected filter
mock_db_call.assert_has_calls([
mock.call(self.ctx, self.stack.id, dict(name=['A'])),
mock.call(self.ctx, self.stack.id),
@@ -439,7 +438,7 @@ class StackTest(common.HeatTestCase):
filters=dict(name=['A'])
))
- # Verify, the db query is called with expected filter
+ # Verify, the DB query is called with expected filter
mock_db_call.assert_has_calls([
mock.call(self.ctx, self.stack.id, dict(name=['A'])),
mock.call(self.ctx, self.stack.id),
@@ -479,7 +478,7 @@ class StackTest(common.HeatTestCase):
prev_raw_template_id=None,
current_deps=None, cache_data=None,
nested_depth=0,
- deleted_time=None)
+ deleted_time=None, refresh_cred=False)
template.Template.load.assert_called_once_with(
self.ctx, stk.raw_template_id, stk.raw_template)
@@ -505,9 +504,10 @@ class StackTest(common.HeatTestCase):
"resource_id": null, "action": "INIT", "type": "GenericResourceType",
"metadata": {}}}'''
env = environment.Environment({'parameters': {'param1': 'test'}})
+ self.ctx.tenant_id = '123'
self.stack = stack.Stack(self.ctx, 'stack_details_test',
template.Template(tpl, env=env),
- tenant_id='123',
+ tenant_id=self.ctx.tenant_id,
stack_user_project_id='234',
tags=['tag1', 'tag2'])
self.stack.store()
@@ -937,7 +937,7 @@ class StackTest(common.HeatTestCase):
def _mock_check(res):
res.handle_check = mock.Mock()
- [_mock_check(res) for res in six.itervalues(self.stack.resources)]
+ [_mock_check(res) for res in self.stack.resources.values()]
return self.stack
def test_check_supported(self):
@@ -949,7 +949,7 @@ class StackTest(common.HeatTestCase):
self.assertEqual(stack1.COMPLETE, stack1.status)
self.assertEqual(stack1.CHECK, stack1.action)
[self.assertTrue(res.handle_check.called)
- for res in six.itervalues(stack1.resources)]
+ for res in stack1.resources.values()]
self.assertNotIn('not fully supported', stack1.status_reason)
def test_check_not_supported(self):
@@ -1282,7 +1282,7 @@ class StackTest(common.HeatTestCase):
exception.StackValidationFailed, stack.Stack,
self.ctx, stack_name, self.tmpl)
self.assertIn("Invalid stack name %s must contain" % stack_name,
- six.text_type(ex))
+ str(ex))
def test_stack_name_invalid_type(self):
stack_names = [{"bad": 123}, ["no", "lists"]]
@@ -1291,7 +1291,7 @@ class StackTest(common.HeatTestCase):
exception.StackValidationFailed, stack.Stack,
self.ctx, stack_name, self.tmpl)
self.assertIn("Invalid stack name %s, must be a string"
- % stack_name, six.text_type(ex))
+ % stack_name, str(ex))
def test_resource_state_get_att(self):
tmpl = {
@@ -1406,7 +1406,8 @@ class StackTest(common.HeatTestCase):
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
- self.assertIsNone(test_stack.tags)
+ self.assertIsNone(test_stack._tags)
+ self.assertEqual([], test_stack.tags)
self.stack = stack.Stack(self.ctx, 'stack_name', self.tmpl)
self.stack.tags = ['tag1', 'tag2']
@@ -1423,7 +1424,7 @@ class StackTest(common.HeatTestCase):
self.stack.store()
stack_id = self.stack.id
test_stack = stack.Stack.load(self.ctx, stack_id=stack_id)
- self.assertIsNone(test_stack.tags)
+ self.assertEqual([], test_stack.tags)
self.stack = stack.Stack(self.ctx, 'stack_name', self.tmpl,
tags=['tag1', 'tag2'])
@@ -1439,7 +1440,7 @@ class StackTest(common.HeatTestCase):
self.stack.id)
self.assertIsNone(db_tags)
- self.stack = stack.Stack(self.ctx, 'tags_stack', self.tmpl,
+ self.stack = stack.Stack(self.ctx, 'tags_stack2', self.tmpl,
tags=['tag1', 'tag2'])
self.stack.store()
db_tags = stack_tag_object.StackTagList.get(self.stack.context,
@@ -1534,7 +1535,7 @@ class StackTest(common.HeatTestCase):
self.stack = stack.Stack(self.ctx, 'creds_stack', self.tmpl)
ex = self.assertRaises(exception.Error, self.stack.stored_context)
expected_err = 'Attempt to use stored_context with no user_creds'
- self.assertEqual(expected_err, six.text_type(ex))
+ self.assertEqual(expected_err, str(ex))
def test_store_gets_username_from_stack(self):
self.stack = stack.Stack(self.ctx, 'username_stack',
@@ -1629,6 +1630,31 @@ class StackTest(common.HeatTestCase):
saved_stack = stack.Stack.load(self.ctx, stack_id=stack_ownee.id)
self.assertEqual(self.stack.id, saved_stack.owner_id)
+ def _test_load_with_refresh_cred(self, refresh=True):
+ cfg.CONF.set_override('deferred_auth_method', 'trusts')
+ self.patchobject(self.ctx.auth_plugin, 'get_user_id',
+ return_value='old_trustor_user_id')
+ self.patchobject(self.ctx.auth_plugin, 'get_project_id',
+ return_value='test_tenant_id')
+
+ old_context = utils.dummy_context()
+ old_context.trust_id = 'atrust123'
+ old_context.trustor_user_id = (
+ 'trustor_user_id' if refresh else 'old_trustor_user_id')
+ m_sc = self.patchobject(context, 'StoredContext')
+ m_sc.from_dict.return_value = old_context
+ self.stack = stack.Stack(self.ctx, 'test_regenerate_trust', self.tmpl)
+ self.stack.store()
+ load_stack = stack.Stack.load(self.ctx, stack_id=self.stack.id,
+ check_refresh_cred=True)
+ self.assertEqual(refresh, load_stack.refresh_cred)
+
+ def test_load_with_refresh_cred(self):
+ self._test_load_with_refresh_cred()
+
+ def test_load_with_no_refresh_cred(self):
+ self._test_load_with_refresh_cred(refresh=False)
+
def test_requires_deferred_auth(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {'AResource': {'Type': 'GenericResourceType'},
@@ -1786,7 +1812,7 @@ class StackTest(common.HeatTestCase):
self.stack.outputs['Resource_attr'].get_value)
self.assertIn('The Referenced Attribute (AResource Bar) is '
'incorrect.',
- six.text_type(ex))
+ str(ex))
self.stack.delete()
@@ -1899,7 +1925,7 @@ class StackTest(common.HeatTestCase):
self.stack.validate)
self.assertIn('The specified reference "Resource" '
- '(in unknown) is incorrect.', six.text_type(ex))
+ '(in unknown) is incorrect.', str(ex))
def test_incorrect_outputs_incorrect_reference(self):
tmpl = template_format.parse("""
@@ -1915,7 +1941,7 @@ class StackTest(common.HeatTestCase):
self.stack.validate)
self.assertIn('The specified reference "resource" '
- '(in unknown) is incorrect.', six.text_type(ex))
+ '(in unknown) is incorrect.', str(ex))
def test_incorrect_outputs_cfn_missing_value(self):
tmpl = template_format.parse("""
@@ -1936,8 +1962,8 @@ class StackTest(common.HeatTestCase):
self.stack.validate)
self.assertIn('Each output definition must contain a Value key.',
- six.text_type(ex))
- self.assertIn('Outputs.Resource_attr', six.text_type(ex))
+ str(ex))
+ self.assertIn('Outputs.Resource_attr', str(ex))
def test_incorrect_outputs_cfn_empty_value(self):
tmpl = template_format.parse("""
@@ -1991,9 +2017,9 @@ class StackTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
- self.assertIn('Found a %s instead' % six.text_type.__name__,
- six.text_type(ex))
- self.assertIn('Outputs.Resource_attr', six.text_type(ex))
+ self.assertIn('Found a %s instead' % str.__name__,
+ str(ex))
+ self.assertIn('Outputs.Resource_attr', str(ex))
def test_prop_validate_value(self):
tmpl = template_format.parse("""
@@ -2011,7 +2037,7 @@ class StackTest(common.HeatTestCase):
self.stack.validate)
self.assertIn("'notanint' is not an integer",
- six.text_type(ex))
+ str(ex))
self.stack.strict_validate = False
self.assertIsNone(self.stack.validate())
@@ -2034,13 +2060,13 @@ class StackTest(common.HeatTestCase):
ex = self.assertRaises(exception.UserParameterMissing,
self.stack.validate)
self.assertIn("The Parameter (aparam) was not provided",
- six.text_type(ex))
+ str(ex))
self.stack.strict_validate = False
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
self.assertIn("The Parameter (aparam) was not provided",
- six.text_type(ex))
+ str(ex))
self.assertIsNone(self.stack.validate(validate_res_tmpl_only=True))
@@ -2061,21 +2087,21 @@ class StackTest(common.HeatTestCase):
self.stack.validate)
self.assertIn(
"The specified reference \"noexist\" (in AResource) is incorrect",
- six.text_type(ex))
+ str(ex))
self.stack.strict_validate = False
ex = self.assertRaises(exception.InvalidTemplateReference,
self.stack.validate)
self.assertIn(
"The specified reference \"noexist\" (in AResource) is incorrect",
- six.text_type(ex))
+ str(ex))
ex = self.assertRaises(exception.InvalidTemplateReference,
self.stack.validate,
validate_res_tmpl_only=True)
self.assertIn(
"The specified reference \"noexist\" (in AResource) is incorrect",
- six.text_type(ex))
+ str(ex))
def test_validate_property_getatt(self):
tmpl = {
@@ -2105,8 +2131,8 @@ class StackTest(common.HeatTestCase):
self.stack.validate)
self.assertIn("Parameter 'foo' is invalid: could not convert "
- "string to float:", six.text_type(ex))
- self.assertIn("abc", six.text_type(ex))
+ "string to float:", str(ex))
+ self.assertIn("abc", str(ex))
self.stack.strict_validate = False
self.assertIsNone(self.stack.validate())
@@ -2129,8 +2155,8 @@ class StackTest(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
self.stack.validate)
- self.assertIn('Found a list', six.text_type(ex))
- self.assertIn('Outputs.Resource_attr', six.text_type(ex))
+ self.assertIn('Found a list', str(ex))
+ self.assertIn('Outputs.Resource_attr', str(ex))
def test_incorrect_deletion_policy(self):
tmpl = template_format.parse("""
@@ -2154,7 +2180,7 @@ class StackTest(common.HeatTestCase):
self.stack.validate)
self.assertIn('Invalid deletion policy "[1, 2]"',
- six.text_type(ex))
+ str(ex))
def test_deletion_policy_apply_ref(self):
tmpl = template_format.parse("""
@@ -2224,7 +2250,7 @@ class StackTest(common.HeatTestCase):
self.stack.validate)
self.assertIn('Invalid deletion policy "[1, 2]',
- six.text_type(ex))
+ str(ex))
def test_incorrect_outputs_hot_get_attr(self):
tmpl = {'heat_template_version': '2013-05-23',
@@ -2762,7 +2788,7 @@ class StackTest(common.HeatTestCase):
stc._resources = {mock_res.name: mock_res}
expected_exception = self.assertRaises(AssertionError,
stc.validate)
- self.assertEqual(expected_msg, six.text_type(expected_exception))
+ self.assertEqual(expected_msg, str(expected_exception))
mock_dependency.validate.assert_called_once_with()
tmpl = template_format.parse("""
@@ -2775,7 +2801,7 @@ class StackTest(common.HeatTestCase):
template.Template(tmpl))
func_val.side_effect = AssertionError(expected_msg)
expected_exception = self.assertRaises(AssertionError, stc.validate)
- self.assertEqual(expected_msg, six.text_type(expected_exception))
+ self.assertEqual(expected_msg, str(expected_exception))
@mock.patch.object(update, 'StackUpdate')
def test_update_task_exception(self, mock_stack_update):
diff --git a/heat/tests/test_stack_collect_attributes.py b/heat/tests/test_stack_collect_attributes.py
index 175783263..3ed3e44cf 100644
--- a/heat/tests/test_stack_collect_attributes.py
+++ b/heat/tests/test_stack_collect_attributes.py
@@ -12,7 +12,6 @@
# under the License.
import itertools
-import six
from heat.common import template_format
from heat.engine import stack
@@ -229,7 +228,7 @@ class DepAttrsTest(common.HeatTestCase):
template.Template(self.parsed_tmpl))
def test_dep_attrs(self):
- for res in six.itervalues(self.stack):
+ for res in self.stack.values():
definitions = (self.stack.defn.resource_definition(n)
for n in self.parsed_tmpl['resources'])
self.assertEqual(self.expected[res.name],
@@ -237,7 +236,7 @@ class DepAttrsTest(common.HeatTestCase):
d.dep_attrs(res.name) for d in definitions)))
def test_all_dep_attrs(self):
- for res in six.itervalues(self.stack):
+ for res in self.stack.values():
definitions = (self.stack.defn.resource_definition(n)
for n in self.parsed_tmpl['resources'])
attrs = set(itertools.chain.from_iterable(
diff --git a/heat/tests/test_stack_delete.py b/heat/tests/test_stack_delete.py
index 6b07ce0b2..51fb26a0c 100644
--- a/heat/tests/test_stack_delete.py
+++ b/heat/tests/test_stack_delete.py
@@ -13,10 +13,10 @@
import copy
import time
+from unittest import mock
import fixtures
from keystoneauth1 import exceptions as kc_exceptions
-import mock
from oslo_log import log as logging
from heat.common import exception
diff --git a/heat/tests/test_stack_lock.py b/heat/tests/test_stack_lock.py
index 891f1f41a..7ea41c398 100644
--- a/heat/tests/test_stack_lock.py
+++ b/heat/tests/test_stack_lock.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from heat.common import exception
from heat.common import service_utils
@@ -36,7 +36,7 @@ class StackLockTest(common.HeatTestCase):
stack_object.Stack, 'get_by_id', return_value=stack)
class TestThreadLockException(Exception):
- pass
+ pass
def test_successful_acquire_new_lock(self):
mock_create = self.patchobject(stack_lock_object.StackLock,
diff --git a/heat/tests/test_stack_resource.py b/heat/tests/test_stack_resource.py
index 8404b7eeb..1d632f650 100644
--- a/heat/tests/test_stack_resource.py
+++ b/heat/tests/test_stack_resource.py
@@ -13,13 +13,12 @@
import contextlib
import json
+from unittest import mock
import uuid
-import mock
from oslo_config import cfg
from oslo_messaging import exceptions as msg_exceptions
from oslo_serialization import jsonutils
-import six
from heat.common import exception
from heat.common import identifier
@@ -220,7 +219,10 @@ class StackResourceTest(StackResourceBaseTest):
self.parent_resource.resource_id = 'fake_id'
self.parent_resource.prepare_abandon()
- self.parent_resource.delete_nested()
+ status = ('CREATE', 'COMPLETE', '', 'now_time')
+ with mock.patch.object(stack_object.Stack, 'get_status',
+ return_value=status):
+ self.parent_resource.delete_nested()
rpcc.return_value.abandon_stack.assert_called_once_with(
self.parent_resource.context, mock.ANY)
@@ -408,7 +410,7 @@ class StackResourceTest(StackResourceBaseTest):
'incorrect.')
exc = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
- self.assertEqual(raise_exc_msg, six.text_type(exc))
+ self.assertEqual(raise_exc_msg, str(exc))
def _test_validate_unknown_resource_type(self, stack_name, tmpl,
resource_name):
@@ -418,7 +420,7 @@ class StackResourceTest(StackResourceBaseTest):
exc = self.assertRaises(exception.StackValidationFailed,
rsrc.validate)
- self.assertIn(raise_exc_msg, six.text_type(exc))
+ self.assertIn(raise_exc_msg, str(exc))
def test_validate_resource_group(self):
# test validate without nested template
@@ -523,13 +525,16 @@ class StackResourceTest(StackResourceBaseTest):
def exc_filter(*args):
try:
yield
- except exception.NotFound:
+ except exception.EntityNotFound:
pass
rpcc.return_value.ignore_error_by_name.side_effect = exc_filter
rpcc.return_value.delete_stack = mock.Mock(
- side_effect=exception.NotFound())
- self.assertIsNone(self.parent_resource.delete_nested())
+ side_effect=exception.EntityNotFound('Stack', 'nested'))
+ status = ('CREATE', 'COMPLETE', '', 'now_time')
+ with mock.patch.object(stack_object.Stack, 'get_status',
+ return_value=status):
+ self.assertIsNone(self.parent_resource.delete_nested())
rpcc.return_value.delete_stack.assert_called_once_with(
self.parent_resource.context, mock.ANY, cast=False)
@@ -760,7 +765,7 @@ class StackResourceAttrTest(StackResourceBaseTest):
name = '%s-%s' % (self.parent_stack.name, self.parent_resource.name)
exc = self.assertRaises(AssertionError,
self.parent_resource.validate_nested_stack)
- self.assertEqual(expected_message, six.text_type(exc))
+ self.assertEqual(expected_message, str(exc))
mock_parse_nested.assert_called_once_with(name, 'foo', {})
@@ -815,7 +820,7 @@ class StackResourceCheckCompleteTest(StackResourceBaseTest):
complete = getattr(self.parent_resource,
'check_%s_complete' % self.action)
exc = self.assertRaises(exception.ResourceFailure, complete, None)
- self.assertEqual(exp, six.text_type(exc))
+ self.assertEqual(exp, str(exc))
self.mock_status.assert_called_once_with(
self.parent_resource.context, self.parent_resource.resource_id)
@@ -841,23 +846,6 @@ class StackResourceCheckCompleteTest(StackResourceBaseTest):
self.mock_status.assert_called_once_with(
self.parent_resource.context, self.parent_resource.resource_id)
- def test_update_not_started(self):
- if self.action != 'update':
- # only valid for updates at the moment.
- return
-
- self.status[1] = 'COMPLETE'
- self.status[3] = 'test'
- cookie = {'previous': {'state': ('UPDATE', 'COMPLETE'),
- 'updated_at': 'test'}}
-
- complete = getattr(self.parent_resource,
- 'check_%s_complete' % self.action)
-
- self.assertFalse(complete(cookie=cookie))
- self.mock_status.assert_called_once_with(
- self.parent_resource.context, self.parent_resource.resource_id)
-
def test_wrong_action(self):
self.status[0] = 'COMPLETE'
complete = getattr(self.parent_resource,
@@ -882,7 +870,7 @@ class WithTemplateTest(StackResourceBaseTest):
def __eq__(self, other):
if getattr(self, 'match', None) is not None:
return other == self.match
- if not isinstance(other, six.integer_types):
+ if not isinstance(other, int):
return False
self.match = other
diff --git a/heat/tests/test_stack_update.py b/heat/tests/test_stack_update.py
index 7a96a3f95..a466cef4f 100644
--- a/heat/tests/test_stack_update.py
+++ b/heat/tests/test_stack_update.py
@@ -12,12 +12,13 @@
# under the License.
import copy
+from unittest import mock
-import mock
from heat.common import exception
from heat.common import template_format
from heat.db.sqlalchemy import api as db_api
+from heat.engine.clients.os.keystone import fake_keystoneclient
from heat.engine import environment
from heat.engine import resource
from heat.engine import rsrc_defn
@@ -72,6 +73,37 @@ class StackUpdateTest(common.HeatTestCase):
self.assertRaises(exception.NotFound,
db_api.raw_template_get, self.ctx, raw_template_id)
+ def test_update_with_refresh_creds(self):
+ tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
+ 'Resources': {'AResource': {'Type': 'GenericResourceType'}}}
+
+ self.stack = stack.Stack(self.ctx, 'update_test_stack',
+ template.Template(tmpl))
+ self.stack.store()
+ self.stack.create()
+ self.assertEqual((stack.Stack.CREATE, stack.Stack.COMPLETE),
+ self.stack.state)
+
+ tmpl2 = {'HeatTemplateFormatVersion': '2012-12-12',
+ 'Resources': {
+ 'AResource': {'Type': 'GenericResourceType'},
+ 'BResource': {'Type': 'GenericResourceType'}}}
+ updated_stack = stack.Stack(self.ctx, 'updated_stack',
+ template.Template(tmpl2))
+ old_user_creds_id = self.stack.user_creds_id
+ self.stack.refresh_cred = True
+
+ self.stack.context.user_id = '5678'
+
+ mock_del_trust = self.patchobject(
+ fake_keystoneclient.FakeKeystoneClient, 'delete_trust')
+
+ self.stack.update(updated_stack)
+ self.assertEqual((stack.Stack.UPDATE, stack.Stack.COMPLETE),
+ self.stack.state)
+ self.assertEqual(1, mock_del_trust.call_count)
+ self.assertNotEqual(self.stack.user_creds_id, old_user_creds_id)
+
def test_update_remove(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
@@ -234,7 +266,7 @@ class StackUpdateTest(common.HeatTestCase):
self.stack.update(updated_stack)
self.assertEqual((stack.Stack.UPDATE, stack.Stack.COMPLETE),
self.stack.state)
- self.assertIsNone(self.stack.tags)
+ self.assertEqual([], self.stack.tags)
def test_update_modify_ok_replace(self):
tmpl = {'HeatTemplateFormatVersion': '2012-12-12',
@@ -2110,6 +2142,7 @@ class StackUpdateTest(common.HeatTestCase):
def update(self, after, before=None, prev_resource=None):
ResourceTypeB.count_b += 1
+ yield
resource._register_class('ResourceTypeB', ResourceTypeB)
@@ -2124,6 +2157,7 @@ class StackUpdateTest(common.HeatTestCase):
def update(self, after, before=None, prev_resource=None):
ResourceTypeA.count_a += 1
+ yield
resource._register_class('ResourceTypeA', ResourceTypeA)
diff --git a/heat/tests/test_stack_user.py b/heat/tests/test_stack_user.py
index 97853fa9d..856fad220 100644
--- a/heat/tests/test_stack_user.py
+++ b/heat/tests/test_stack_user.py
@@ -11,9 +11,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from keystoneauth1 import exceptions as kc_exceptions
-import mock
-import six
from heat.common import exception
from heat.common import short_id
@@ -378,7 +378,7 @@ class StackUserTest(common.HeatTestCase):
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
ex = self.assertRaises(ValueError, rsrc._user_token)
expected = "Can't get user token without password"
- self.assertEqual(expected, six.text_type(ex))
+ self.assertEqual(expected, str(ex))
self.fc.stack_domain_user_token.assert_not_called()
def test_user_token_err_noproject(self):
@@ -390,4 +390,4 @@ class StackUserTest(common.HeatTestCase):
ex = self.assertRaises(ValueError, rsrc._user_token)
expected = "Can't get user token, user not yet created"
- self.assertEqual(expected, six.text_type(ex))
+ self.assertEqual(expected, str(ex))
diff --git a/heat/tests/test_support.py b/heat/tests/test_support.py
index 2401f3c2c..7918c3f17 100644
--- a/heat/tests/test_support.py
+++ b/heat/tests/test_support.py
@@ -11,7 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
from heat.engine import support
from heat.tests import common
@@ -88,4 +87,4 @@ class SupportStatusTest(common.HeatTestCase):
ex = self.assertRaises(ValueError,
support.SupportStatus, previous_status='YARRR')
self.assertEqual('previous_status must be SupportStatus '
- 'instead of %s' % str, six.text_type(ex))
+ 'instead of %s' % str, str(ex))
diff --git a/heat/tests/test_template.py b/heat/tests/test_template.py
index 34cc22a7b..bb0abfb79 100644
--- a/heat/tests/test_template.py
+++ b/heat/tests/test_template.py
@@ -16,7 +16,6 @@ import hashlib
import json
import fixtures
-import six
from stevedore import extension
from heat.common import exception
@@ -203,7 +202,7 @@ class TestTemplateVersion(common.HeatTestCase):
ex = self.assertRaises(exception.InvalidTemplateVersion,
template.get_version, tmpl, self.versions)
self.assertEqual('The template version is invalid: Template version '
- 'was not provided', six.text_type(ex))
+ 'was not provided', str(ex))
def test_ambiguous_version(self):
tmpl = {
@@ -221,7 +220,7 @@ class ParserTest(common.HeatTestCase):
def test_list(self):
raw = ['foo', 'bar', 'baz']
parsed = join(raw)
- for i in six.moves.xrange(len(raw)):
+ for i in range(len(raw)):
self.assertEqual(raw[i], parsed[i])
self.assertIsNot(raw, parsed)
@@ -236,7 +235,7 @@ class ParserTest(common.HeatTestCase):
raw = {'foo': ['bar', 'baz'], 'blarg': 'wibble'}
parsed = join(raw)
self.assertEqual(raw['blarg'], parsed['blarg'])
- for i in six.moves.xrange(len(raw['foo'])):
+ for i in range(len(raw['foo'])):
self.assertEqual(raw['foo'][i], parsed['foo'][i])
self.assertIsNot(raw, parsed)
self.assertIsNot(raw['foo'], parsed['foo'])
@@ -244,7 +243,7 @@ class ParserTest(common.HeatTestCase):
def test_list_dict(self):
raw = [{'foo': 'bar', 'blarg': 'wibble'}, 'baz', 'quux']
parsed = join(raw)
- for i in six.moves.xrange(1, len(raw)):
+ for i in range(1, len(raw)):
self.assertEqual(raw[i], parsed[i])
for k in raw[0]:
self.assertEqual(raw[0][k], parsed[0][k])
@@ -263,7 +262,7 @@ class ParserTest(common.HeatTestCase):
raw = [{'Fn::Join': [' ', ['foo', 'bar', 'baz']]}, 'blarg', 'wibble']
parsed = join(raw)
self.assertEqual('foo bar baz', parsed[0])
- for i in six.moves.xrange(1, len(raw)):
+ for i in range(1, len(raw)):
self.assertEqual(raw[i], parsed[i])
self.assertIsNot(raw, parsed)
@@ -326,23 +325,23 @@ class TestTemplateConditionParser(common.HeatTestCase):
stk = stack.Stack(self.ctx, 'test_condition_with_get_attr_func', tmpl)
ex = self.assertRaises(exception.StackValidationFailed,
tmpl.conditions, stk)
- self.assertIn('"get_attr" is invalid', six.text_type(ex))
+ self.assertIn('"get_attr" is invalid', str(ex))
self.assertIn('conditions.prod_env.equals[1].get_attr',
- six.text_type(ex))
+ str(ex))
# test with get_resource in top level of a condition
tmpl.t['conditions']['prod_env'] = {'get_resource': 'R1'}
stk = stack.Stack(self.ctx, 'test_condition_with_get_attr_func', tmpl)
ex = self.assertRaises(exception.StackValidationFailed,
tmpl.conditions, stk)
- self.assertIn('"get_resource" is invalid', six.text_type(ex))
+ self.assertIn('"get_resource" is invalid', str(ex))
# test with get_attr in top level of a condition
tmpl.t['conditions']['prod_env'] = {'get_attr': [None, 'att']}
stk = stack.Stack(self.ctx, 'test_condition_with_get_attr_func', tmpl)
ex = self.assertRaises(exception.StackValidationFailed,
tmpl.conditions, stk)
- self.assertIn('"get_attr" is invalid', six.text_type(ex))
+ self.assertIn('"get_attr" is invalid', str(ex))
def test_condition_resolved_not_boolean(self):
t = {
@@ -364,7 +363,7 @@ class TestTemplateConditionParser(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
conditions.is_enabled, 'prod_env')
self.assertIn('The definition of condition "prod_env" is invalid',
- six.text_type(ex))
+ str(ex))
def test_condition_reference_condition(self):
t = {
@@ -399,10 +398,10 @@ class TestTemplateConditionParser(common.HeatTestCase):
conds = tmpl.conditions(stk)
ex = self.assertRaises(ValueError, conds.is_enabled, 'invalid_cd')
- self.assertIn('Invalid condition "invalid_cd"', six.text_type(ex))
+ self.assertIn('Invalid condition "invalid_cd"', str(ex))
# test condition name is not string
ex = self.assertRaises(ValueError, conds.is_enabled, 111)
- self.assertIn('Invalid condition "111"', six.text_type(ex))
+ self.assertIn('Invalid condition "111"', str(ex))
def test_res_condition_using_boolean(self):
tmpl = copy.deepcopy(self.tmpl)
@@ -422,14 +421,14 @@ class TestTemplateConditionParser(common.HeatTestCase):
self.tmpl.t['outputs']['foo']['condition'] = 'invalid_cd'
ex = self.assertRaises(exception.StackValidationFailed,
lambda: stk.outputs)
- self.assertIn('Invalid condition "invalid_cd"', six.text_type(ex))
- self.assertIn('outputs.foo.condition', six.text_type(ex))
+ self.assertIn('Invalid condition "invalid_cd"', str(ex))
+ self.assertIn('outputs.foo.condition', str(ex))
# test condition name is not string
self.tmpl.t['outputs']['foo']['condition'] = 222
ex = self.assertRaises(exception.StackValidationFailed,
lambda: stk.outputs)
- self.assertIn('Invalid condition "222"', six.text_type(ex))
- self.assertIn('outputs.foo.condition', six.text_type(ex))
+ self.assertIn('Invalid condition "222"', str(ex))
+ self.assertIn('outputs.foo.condition', str(ex))
def test_conditions_circular_ref(self):
t = {
@@ -452,7 +451,7 @@ class TestTemplateConditionParser(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
conds.is_enabled, 'first_cond')
self.assertIn('Circular definition for condition "first_cond"',
- six.text_type(ex))
+ str(ex))
def test_parse_output_condition_boolean(self):
t = copy.deepcopy(self.tmpl.t)
@@ -500,7 +499,7 @@ class TestTemplateValidate(common.HeatTestCase):
self.assertIsNone(tmpl.t_digest)
tmpl.validate()
self.assertEqual(
- hashlib.sha256(six.text_type(t).encode('utf-8')).hexdigest(),
+ hashlib.sha256(str(t).encode('utf-8')).hexdigest(),
tmpl.t_digest, 'invalid template digest')
def test_template_validate_cfn_good(self):
@@ -556,7 +555,7 @@ class TestTemplateValidate(common.HeatTestCase):
tmpl = template.Template(t)
err = self.assertRaises(exception.InvalidTemplateSection,
tmpl.validate)
- self.assertIn('Parameteers', six.text_type(err))
+ self.assertIn('Parameteers', str(err))
def test_template_validate_cfn_empty(self):
t = template_format.parse('''
@@ -610,7 +609,7 @@ class TestTemplateValidate(common.HeatTestCase):
error = self.assertRaises(exception.StackValidationFailed,
tmpl.validate)
self.assertEqual('Each Resource must contain a Type key.',
- six.text_type(error))
+ str(error))
def test_get_resources_no_type(self):
"""Test get resources with invalid key."""
@@ -633,7 +632,7 @@ class TestTemplateValidate(common.HeatTestCase):
error = self.assertRaises(exception.StackValidationFailed,
tmpl.validate)
self.assertEqual('Each Resource must contain a Type key.',
- six.text_type(error))
+ str(error))
def test_template_validate_hot_check_t_digest(self):
t = {
@@ -652,7 +651,7 @@ class TestTemplateValidate(common.HeatTestCase):
self.assertIsNone(tmpl.t_digest)
tmpl.validate()
self.assertEqual(hashlib.sha256(
- six.text_type(t).encode('utf-8')).hexdigest(),
+ str(t).encode('utf-8')).hexdigest(),
tmpl.t_digest, 'invalid template digest')
def test_template_validate_hot_good(self):
@@ -688,7 +687,7 @@ class TestTemplateValidate(common.HeatTestCase):
tmpl = template.Template(t)
err = self.assertRaises(exception.InvalidTemplateSection,
tmpl.validate)
- self.assertIn('parameteers', six.text_type(err))
+ self.assertIn('parameteers', str(err))
class TemplateTest(common.HeatTestCase):
@@ -734,14 +733,14 @@ class TemplateTest(common.HeatTestCase):
valid_versions = ['2013-05-23', '2014-10-16',
'2015-04-30', '2015-10-15', '2016-04-08',
'2016-10-14', '2017-02-24', '2017-09-01',
- '2018-03-02', '2018-08-31',
+ '2018-03-02', '2018-08-31', '2021-04-16',
'newton', 'ocata', 'pike',
- 'queens', 'rocky']
+ 'queens', 'rocky', 'wallaby']
ex_error_msg = ('The template version is invalid: '
'"heat_template_version: 2012-12-12". '
'"heat_template_version" should be one of: %s'
% ', '.join(valid_versions))
- self.assertEqual(ex_error_msg, six.text_type(init_ex))
+ self.assertEqual(ex_error_msg, str(init_ex))
def test_invalid_version_not_in_hot_versions(self):
invalid_hot_version_tmp = template_format.parse(
@@ -761,7 +760,7 @@ class TemplateTest(common.HeatTestCase):
'"heat_template_version: 2012-12-12". '
'"heat_template_version" should be '
'one of: 2013-05-23, 2013-06-23')
- self.assertEqual(ex_error_msg, six.text_type(init_ex))
+ self.assertEqual(ex_error_msg, str(init_ex))
template._template_classes = temp_copy
def test_invalid_aws_version(self):
@@ -774,7 +773,7 @@ class TemplateTest(common.HeatTestCase):
ex_error_msg = ('The template version is invalid: '
'"AWSTemplateFormatVersion: 2012-12-12". '
'"AWSTemplateFormatVersion" should be: 2010-09-09')
- self.assertEqual(ex_error_msg, six.text_type(init_ex))
+ self.assertEqual(ex_error_msg, str(init_ex))
def test_invalid_version_not_in_aws_versions(self):
invalid_aws_version_tmp = template_format.parse(
@@ -794,7 +793,7 @@ class TemplateTest(common.HeatTestCase):
'"AWSTemplateFormatVersion: 2012-12-12". '
'"AWSTemplateFormatVersion" should be '
'one of: 2010-09-09, 2011-06-23')
- self.assertEqual(ex_error_msg, six.text_type(init_ex))
+ self.assertEqual(ex_error_msg, str(init_ex))
template._template_classes = temp_copy
def test_invalid_heat_version(self):
@@ -808,7 +807,7 @@ class TemplateTest(common.HeatTestCase):
ex_error_msg = ('The template version is invalid: '
'"HeatTemplateFormatVersion: 2010-09-09". '
'"HeatTemplateFormatVersion" should be: 2012-12-12')
- self.assertEqual(ex_error_msg, six.text_type(init_ex))
+ self.assertEqual(ex_error_msg, str(init_ex))
def test_invalid_version_not_in_heat_versions(self):
invalid_heat_version_tmp = template_format.parse(
@@ -829,7 +828,7 @@ class TemplateTest(common.HeatTestCase):
'"HeatTemplateFormatVersion: 2010-09-09". '
'"HeatTemplateFormatVersion" should be '
'one of: 2012-12-12, 2014-12-12')
- self.assertEqual(ex_error_msg, six.text_type(init_ex))
+ self.assertEqual(ex_error_msg, str(init_ex))
template._template_classes = temp_copy
@@ -1013,12 +1012,12 @@ class TemplateTest(common.HeatTestCase):
error_msg = ('.Fn::Equals: Arguments to "Fn::Equals" must be '
'of the form: [value_1, value_2]')
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
# test invalid type
snippet = {'Fn::Equals': {"equal": False}}
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve_condition, snippet, tmpl)
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
def test_not(self):
tpl = template_format.parse('''
@@ -1054,19 +1053,19 @@ class TemplateTest(common.HeatTestCase):
self.resolve_condition, snippet, tmpl, stk)
error_msg = 'Invalid condition "invalid_arg"'
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
# test invalid type
snippet = {'Fn::Not': 'invalid'}
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve_condition, snippet, tmpl)
error_msg = 'Arguments to "Fn::Not" must be '
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
snippet = {'Fn::Not': ['cd1', 'cd2']}
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve_condition, snippet, tmpl)
error_msg = 'Arguments to "Fn::Not" must be '
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
def test_and(self):
tpl = template_format.parse('''
@@ -1117,21 +1116,21 @@ class TemplateTest(common.HeatTestCase):
snippet = {'Fn::And': ['invalid_arg']}
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve_condition, snippet, tmpl)
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
error_msg = 'Arguments to "Fn::And" must be'
# test invalid type
snippet = {'Fn::And': 'invalid'}
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve_condition, snippet, tmpl)
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
stk = stack.Stack(utils.dummy_context(), 'test_and_invalid', tmpl)
snippet = {'Fn::And': ['cd1', True]}
exc = self.assertRaises(ValueError,
self.resolve_condition, snippet, tmpl, stk)
error_msg = 'Invalid condition "cd1"'
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
def test_or(self):
tpl = template_format.parse('''
@@ -1178,21 +1177,21 @@ class TemplateTest(common.HeatTestCase):
snippet = {'Fn::Or': ['invalid_arg']}
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve_condition, snippet, tmpl)
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
error_msg = 'Arguments to "Fn::Or" must be'
# test invalid type
snippet = {'Fn::Or': 'invalid'}
exc = self.assertRaises(exception.StackValidationFailed,
self.resolve_condition, snippet, tmpl)
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
stk = stack.Stack(utils.dummy_context(), 'test_or_invalid', tmpl)
snippet = {'Fn::Or': ['invalid_cd', True]}
exc = self.assertRaises(ValueError,
self.resolve_condition, snippet, tmpl, stk)
error_msg = 'Invalid condition "invalid_cd"'
- self.assertIn(error_msg, six.text_type(exc))
+ self.assertIn(error_msg, str(exc))
def test_join(self):
tmpl = template.Template(empty_template)
@@ -1359,7 +1358,7 @@ class TemplateTest(common.HeatTestCase):
template.Template(empty_template))
error = self.assertRaises(exception.StackValidationFailed,
self.resolve, snippet, stk.t, stk)
- self.assertIn(next(iter(snippet)), six.text_type(error))
+ self.assertIn(next(iter(snippet)), str(error))
def test_resource_facade_missing_deletion_policy(self):
snippet = {'Fn::ResourceFacade': 'DeletionPolicy'}
@@ -1390,7 +1389,7 @@ class TemplateTest(common.HeatTestCase):
})
self.assertEqual(expected_description, tmpl['Description'])
keyError = self.assertRaises(KeyError, tmpl.__getitem__, 'Parameters')
- self.assertIn("can not be accessed directly", six.text_type(keyError))
+ self.assertIn("can not be accessed directly", str(keyError))
def test_parameters_section_not_iterable(self):
expected_description = "This can be accessed"
@@ -1451,7 +1450,7 @@ class TemplateTest(common.HeatTestCase):
empty = template.Template(copy.deepcopy(empty_template))
stk = stack.Stack(self.ctx, 'test_stack', source)
- for defn in six.itervalues(source.outputs(stk)):
+ for defn in source.outputs(stk).values():
empty.add_output(defn)
self.assertEqual(cfn_tpl['Outputs'], empty.t['Outputs'])
@@ -1649,7 +1648,7 @@ class TemplateFnErrorTest(common.HeatTestCase):
error = self.assertRaises(self.expect,
resolve,
self.snippet)
- self.assertIn(next(iter(self.snippet)), six.text_type(error))
+ self.assertIn(next(iter(self.snippet)), str(error))
class ResolveDataTest(common.HeatTestCase):
@@ -1706,7 +1705,7 @@ class ResolveDataTest(common.HeatTestCase):
[' ', ['foo', 45]]}, 'baz']]}
error = self.assertRaises(TypeError,
self.resolve, snippet)
- self.assertIn('45', six.text_type(error))
+ self.assertIn('45', str(error))
def test_base64_replace(self):
raw = {'Fn::Base64': {'Fn::Replace': [
diff --git a/heat/tests/test_template_format.py b/heat/tests/test_template_format.py
index beffdcaff..3ce127c24 100644
--- a/heat/tests/test_template_format.py
+++ b/heat/tests/test_template_format.py
@@ -12,10 +12,9 @@
# under the License.
import os
+from unittest import mock
-import mock
import re
-import six
import yaml
from heat.common import config
@@ -94,7 +93,7 @@ class YamlMinimalTest(common.HeatTestCase):
parse_ex = self.assertRaises(ValueError,
template_format.parse,
tmpl_str)
- self.assertIn(msg_str, six.text_type(parse_ex))
+ self.assertIn(msg_str, str(parse_ex))
def test_long_yaml(self):
template = {'HeatTemplateFormatVersion': '2012-12-12'}
@@ -110,7 +109,7 @@ class YamlMinimalTest(common.HeatTestCase):
'bytes) exceeds maximum allowed size (%(limit)s bytes).') % {
'actual_len': len(str(long_yaml)),
'limit': config.cfg.CONF.max_template_size}
- self.assertEqual(msg, six.text_type(ex))
+ self.assertEqual(msg, str(ex))
def test_parse_no_version_format(self):
yaml = ''
@@ -155,7 +154,7 @@ class YamlParseExceptions(common.HeatTestCase):
('parser', dict(raised_exception=yaml.parser.ParserError())),
('reader',
dict(raised_exception=yaml.reader.ReaderError(
- '', 42, six.b('x'), '', ''))),
+ '', 42, 'x'.encode('latin-1'), '', ''))),
]
def test_parse_to_value_exception(self):
@@ -169,7 +168,7 @@ class YamlParseExceptions(common.HeatTestCase):
'file://test.yaml')
self.assertIn('Error parsing template file://test.yaml',
- six.text_type(err))
+ str(err))
class JsonYamlResolvedCompareTest(common.HeatTestCase):
diff --git a/heat/tests/test_translation_rule.py b/heat/tests/test_translation_rule.py
index d0669f9cc..efbc6d40d 100644
--- a/heat/tests/test_translation_rule.py
+++ b/heat/tests/test_translation_rule.py
@@ -12,8 +12,7 @@
# under the License.
import copy
-import mock
-import six
+from unittest import mock
from heat.common import exception
from heat.engine.cfn import functions as cfn_funcs
@@ -94,7 +93,7 @@ class TestTranslationRule(common.HeatTestCase):
mock.ANY)
self.assertEqual('There is no rule EatTheCookie. List of allowed '
'rules is: Add, Replace, Delete, Resolve.',
- six.text_type(exc))
+ str(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
@@ -104,7 +103,7 @@ class TestTranslationRule(common.HeatTestCase):
'value')
self.assertEqual('"translation_path" should be non-empty list '
'with path to translate.',
- six.text_type(exc))
+ str(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
@@ -114,7 +113,7 @@ class TestTranslationRule(common.HeatTestCase):
mock.ANY)
self.assertEqual('"translation_path" should be non-empty list '
'with path to translate.',
- six.text_type(exc))
+ str(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
@@ -126,7 +125,7 @@ class TestTranslationRule(common.HeatTestCase):
'some_path')
self.assertEqual('"value_path", "value" and "value_name" are '
'mutually exclusive and cannot be specified '
- 'at the same time.', six.text_type(exc))
+ 'at the same time.', str(exc))
exc = self.assertRaises(ValueError,
translation.TranslationRule,
@@ -135,7 +134,7 @@ class TestTranslationRule(common.HeatTestCase):
['any'],
'value')
self.assertEqual('"value" must be list type when rule is Add.',
- six.text_type(exc))
+ str(exc))
def test_add_rule_exist(self):
schema = {
@@ -246,7 +245,7 @@ class TestTranslationRule(common.HeatTestCase):
ex = self.assertRaises(ValueError, tran.translate, 'far', 'tran')
self.assertEqual('Incorrect translation rule using - cannot '
'resolve Add rule for non-list translation '
- 'value "far".', six.text_type(ex))
+ 'value "far".', str(ex))
def test_replace_rule_map_exist(self):
schema = {
@@ -446,7 +445,7 @@ class TestTranslationRule(common.HeatTestCase):
ex = self.assertRaises(exception.StackValidationFailed,
tran.translate, 'bar', data['bar'])
self.assertEqual('Cannot define the following properties at '
- 'the same time: bar, far', six.text_type(ex))
+ 'the same time: bar, far', str(ex))
def test_replace_rule_str_value_path(self):
schema = {
@@ -491,7 +490,7 @@ class TestTranslationRule(common.HeatTestCase):
exc = self.assertRaises(exception.StackValidationFailed,
props.validate)
self.assertEqual("Property error: bar: Value 'one' is not an integer",
- six.text_type(exc))
+ str(exc))
def test_delete_rule_list(self):
schema = {
diff --git a/heat/tests/test_urlfetch.py b/heat/tests/test_urlfetch.py
index e12036381..ac3f75bd2 100644
--- a/heat/tests/test_urlfetch.py
+++ b/heat/tests/test_urlfetch.py
@@ -11,10 +11,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+import io
+import urllib.error
+import urllib.request
+
from oslo_config import cfg
import requests
from requests import exceptions
-import six
from heat.common import urlfetch
from heat.tests import common
@@ -42,15 +45,15 @@ class UrlFetchTest(common.HeatTestCase):
def test_file_scheme_supported(self):
data = '{ "foo": "bar" }'
url = 'file:///etc/profile'
- mock_open = self.patchobject(six.moves.urllib.request, 'urlopen')
- mock_open.return_value = six.moves.cStringIO(data)
+ mock_open = self.patchobject(urllib.request, 'urlopen')
+ mock_open.return_value = io.StringIO(data)
self.assertEqual(data, urlfetch.get(url, allowed_schemes=['file']))
mock_open.assert_called_once_with(url)
def test_file_scheme_failure(self):
url = 'file:///etc/profile'
- mock_open = self.patchobject(six.moves.urllib.request, 'urlopen')
- mock_open.side_effect = six.moves.urllib.error.URLError('oops')
+ mock_open = self.patchobject(urllib.request, 'urlopen')
+ mock_open.side_effect = urllib.error.URLError('oops')
self.assertRaises(urlfetch.URLFetchError,
urlfetch.get, url, allowed_schemes=['file'])
mock_open.assert_called_once_with(url)
@@ -109,5 +112,5 @@ class UrlFetchTest(common.HeatTestCase):
mock_get.return_value = response
exception = self.assertRaises(urlfetch.URLFetchError,
urlfetch.get, url)
- self.assertIn("Template exceeds", six.text_type(exception))
+ self.assertIn("Template exceeds", str(exception))
mock_get.assert_called_once_with(url, stream=True)
diff --git a/heat/tests/test_validate.py b/heat/tests/test_validate.py
index 94780f5db..a5521e435 100644
--- a/heat/tests/test_validate.py
+++ b/heat/tests/test_validate.py
@@ -11,9 +11,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_messaging.rpc import dispatcher
-import six
import webob
from heat.common import exception
@@ -1309,7 +1309,7 @@ class ValidateTest(common.HeatTestCase):
res = dict(self.engine.validate_template(self.ctx, t, {}))
self.assertEqual({'Error': 'Resources must contain Resource. '
- 'Found a [%s] instead' % six.text_type},
+ 'Found a [%s] instead' % str},
res)
def test_invalid_section_cfn(self):
@@ -1621,7 +1621,7 @@ class ValidateTest(common.HeatTestCase):
'parameter_groups.Database '
'Group: The InstanceType parameter must be '
'assigned to one parameter group only.'),
- six.text_type(exc))
+ str(exc))
def test_validate_duplicate_parameters_no_label(self):
t = template_format.parse(test_template_parameters_duplicate_no_label)
@@ -1634,7 +1634,7 @@ class ValidateTest(common.HeatTestCase):
'parameter_groups.: '
'The key_name parameter must be '
'assigned to one parameter group only.'),
- six.text_type(exc))
+ str(exc))
def test_validate_invalid_parameter_in_group(self):
t = template_format.parse(test_template_invalid_parameter_name)
@@ -1652,7 +1652,7 @@ class ValidateTest(common.HeatTestCase):
'parameter_groups.Database Group: The grouped '
'parameter SomethingNotHere does not '
'reference a valid parameter.'),
- six.text_type(exc))
+ str(exc))
def test_validate_invalid_parameter_no_label(self):
t = template_format.parse(test_template_invalid_parameter_no_label)
@@ -1666,7 +1666,7 @@ class ValidateTest(common.HeatTestCase):
'parameter_groups.: The grouped '
'parameter key_name does not '
'reference a valid parameter.'),
- six.text_type(exc))
+ str(exc))
def test_validate_no_parameters_in_group(self):
t = template_format.parse(test_template_no_parameters)
@@ -1677,7 +1677,7 @@ class ValidateTest(common.HeatTestCase):
self.assertEqual(_('Parameter Groups error: parameter_groups.Server '
'Group: The parameters must be provided for each '
- 'parameter group.'), six.text_type(exc))
+ 'parameter group.'), str(exc))
def test_validate_parameter_groups_not_list(self):
t = template_format.parse(test_template_parameter_groups_not_list)
@@ -1688,7 +1688,7 @@ class ValidateTest(common.HeatTestCase):
self.assertEqual(_('Parameter Groups error: parameter_groups: '
'The parameter_groups should be '
- 'a list.'), six.text_type(exc))
+ 'a list.'), str(exc))
def test_validate_parameters_not_list(self):
t = template_format.parse(test_template_parameters_not_list)
@@ -1700,7 +1700,7 @@ class ValidateTest(common.HeatTestCase):
self.assertEqual(_('Parameter Groups error: '
'parameter_groups.Server Group: '
'The parameters of parameter group should be '
- 'a list.'), six.text_type(exc))
+ 'a list.'), str(exc))
def test_validate_parameters_error_no_label(self):
t = template_format.parse(test_template_parameters_error_no_label)
@@ -1711,7 +1711,7 @@ class ValidateTest(common.HeatTestCase):
self.assertEqual(_('Parameter Groups error: parameter_groups.: '
'The parameters of parameter group should be '
- 'a list.'), six.text_type(exc))
+ 'a list.'), str(exc))
def test_validate_allowed_values_integer(self):
t = template_format.parse(test_template_allowed_integers)
@@ -1750,7 +1750,7 @@ class ValidateTest(common.HeatTestCase):
err = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('"3" is not an allowed value [1, 4, 8]',
- six.text_type(err))
+ str(err))
# test with size parameter provided as number
template.env = environment.Environment({'size': 3})
@@ -1758,7 +1758,7 @@ class ValidateTest(common.HeatTestCase):
err = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('3 is not an allowed value [1, 4, 8]',
- six.text_type(err))
+ str(err))
def test_validate_not_allowed_values_integer_str(self):
t = template_format.parse(test_template_allowed_integers_str)
@@ -1770,7 +1770,7 @@ class ValidateTest(common.HeatTestCase):
err = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('"3" is not an allowed value ["1", "4", "8"]',
- six.text_type(err))
+ str(err))
# test with size parameter provided as number
template.env = environment.Environment({'size': 3})
@@ -1778,7 +1778,7 @@ class ValidateTest(common.HeatTestCase):
err = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('3 is not an allowed value ["1", "4", "8"]',
- six.text_type(err))
+ str(err))
def test_validate_invalid_outputs(self):
t = template_format.parse(test_template_invalid_outputs)
@@ -1789,7 +1789,7 @@ class ValidateTest(common.HeatTestCase):
error_message = ('outputs.string.value.get_attr: Arguments to '
'"get_attr" must be of the form '
'[resource_name, attribute, (path), ...]')
- self.assertEqual(error_message, six.text_type(err))
+ self.assertEqual(error_message, str(err))
def test_validate_resource_attr_invalid_type(self):
t = template_format.parse("""
@@ -1802,7 +1802,7 @@ class ValidateTest(common.HeatTestCase):
stack = parser.Stack(self.ctx, 'test_stack', template)
ex = self.assertRaises(exception.StackValidationFailed, stack.validate)
self.assertEqual('Resource resource type type must be string',
- six.text_type(ex))
+ str(ex))
def test_validate_resource_attr_invalid_type_cfn(self):
t = template_format.parse("""
@@ -1814,7 +1814,7 @@ class ValidateTest(common.HeatTestCase):
stack = parser.Stack(self.ctx, 'test_stack', tmpl.Template(t))
ex = self.assertRaises(exception.StackValidationFailed, stack.validate)
self.assertEqual('Resource Resource Type type must be string',
- six.text_type(ex))
+ str(ex))
def test_validate_resource_invalid_key(self):
t = template_format.parse("""
@@ -1827,7 +1827,7 @@ class ValidateTest(common.HeatTestCase):
template = tmpl.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template)
ex = self.assertRaises(exception.StackValidationFailed, stack.validate)
- self.assertIn('wibble', six.text_type(ex))
+ self.assertIn('wibble', str(ex))
def test_validate_resource_invalid_cfn_key_in_hot(self):
t = template_format.parse("""
@@ -1840,7 +1840,7 @@ class ValidateTest(common.HeatTestCase):
template = tmpl.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template)
ex = self.assertRaises(exception.StackValidationFailed, stack.validate)
- self.assertIn('Properties', six.text_type(ex))
+ self.assertIn('Properties', str(ex))
def test_validate_resource_invalid_key_cfn(self):
t = template_format.parse("""
diff --git a/heat/tests/test_vpc.py b/heat/tests/test_vpc.py
index 373f8e165..5db8f9660 100644
--- a/heat/tests/test_vpc.py
+++ b/heat/tests/test_vpc.py
@@ -11,7 +11,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import uuid
from heat.common import exception
@@ -532,7 +532,7 @@ Resources:
'name': self.nic_name,
'admin_state_up': True}
if security_groups:
- self._port['security_groups'] = security_groups
+ self._port['security_groups'] = security_groups
self.mockclient.create_port.return_value = {
'port': {
diff --git a/heat/tests/utils.py b/heat/tests/utils.py
index 854e7caf4..cf3e383f8 100644
--- a/heat/tests/utils.py
+++ b/heat/tests/utils.py
@@ -90,6 +90,32 @@ def dummy_context(user='test_username', tenant_id='test_tenant_id',
})
+def dummy_system_admin_context():
+ """Return a heat.common.context.RequestContext for system-admin.
+
+ :returns: an instance of heat.common.context.RequestContext
+
+ """
+ ctx = dummy_context(roles=['admin', 'member', 'reader'])
+ ctx.system_scope = 'all'
+ ctx.project_id = None
+ ctx.tenant_id = None
+ return ctx
+
+
+def dummy_system_reader_context():
+ """Return a heat.common.context.RequestContext for system-reader.
+
+ :returns: an instance of heat.common.context.RequestContext
+
+ """
+ ctx = dummy_context(roles=['reader'])
+ ctx.system_scope = 'all'
+ ctx.project_id = None
+ ctx.tenant_id = None
+ return ctx
+
+
def parse_stack(t, params=None, files=None, stack_name=None,
stack_id=None, timeout_mins=None,
cache_data=None, tags=None):
diff --git a/heat_integrationtests/README.rst b/heat_integrationtests/README.rst
index ec4635a56..bfb0e7237 100644
--- a/heat_integrationtests/README.rst
+++ b/heat_integrationtests/README.rst
@@ -20,6 +20,7 @@ To run the tests against DevStack, do the following::
If the Heat Tempest Plugin is also installed, the tests from that will be run
as well.
-If custom configuration is required, add it in the file
-``heat_integrationtests/heat_integrationtests.conf``. A sample configuration is
-available in ``heat_integrationtests/heat_integrationtests.conf.sample``
+These tests require both tempest and heat tempest plugin installed.
+If custom configuration is required, it should be configured in the
+heat tempest plugin configuration of the tempest config
+(see heat tempest plugin and tempest docs for more info).
diff --git a/heat_integrationtests/__init__.py b/heat_integrationtests/__init__.py
index ec0913317..625951c3c 100644
--- a/heat_integrationtests/__init__.py
+++ b/heat_integrationtests/__init__.py
@@ -14,16 +14,14 @@
import os
import unittest
-from heat_integrationtests.common import config
-
from oslo_log import log as logging
+from tempest import config
LOG = logging.getLogger(__name__, project=__name__)
def load_tests(loader, standard_tests, pattern):
- logging.setup(config.init_conf(), __name__)
-
+ logging.setup(config.CONF, __name__)
suite = unittest.TestSuite()
heat_integration_dir = os.path.dirname(os.path.abspath(__file__))
diff --git a/heat_integrationtests/cleanup_test_env.sh b/heat_integrationtests/cleanup_test_env.sh
index ee32d17a3..12791c9fd 100755
--- a/heat_integrationtests/cleanup_test_env.sh
+++ b/heat_integrationtests/cleanup_test_env.sh
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-# This script is executed inside post_test_hook function in devstack gate.
+# This script is executed in devstack gate.
set -ex
@@ -30,4 +30,4 @@ openstack flavor delete m1.heat_int
openstack flavor delete m1.heat_micro
# delete the image created
-openstack image delete Fedora-Cloud-Base-29-1.2.x86_64
+openstack image delete Fedora-Cloud-Base-33-1.2.x86_64
diff --git a/heat_integrationtests/common/clients.py b/heat_integrationtests/common/clients.py
index d2cc92977..6079db19d 100644
--- a/heat_integrationtests/common/clients.py
+++ b/heat_integrationtests/common/clients.py
@@ -17,6 +17,7 @@ from heat.common.i18n import _
from heatclient import client as heat_client
from keystoneauth1.identity.generic import password
from keystoneauth1 import session
+from keystoneclient.v3 import client as kc_v3
from neutronclient.v2_0 import client as neutron_client
from novaclient import client as nova_client
from swiftclient import client as swift_client
@@ -79,6 +80,7 @@ class ClientManager(object):
self.ca_file = self.conf.ca_file
self.identity_client = self._get_identity_client()
+ self.keystone_client = self._get_keystone_client()
self.orchestration_client = self._get_orchestration_client()
self.compute_client = self._get_compute_client()
self.network_client = self._get_network_client()
@@ -143,6 +145,12 @@ class ClientManager(object):
return KeystoneWrapperClient(auth, verify_cert)
+ def _get_keystone_client(self):
+ # Create our default Keystone client to use in testing
+ return kc_v3.Client(
+ session=self.identity_client.session,
+ region_name=self.conf.region)
+
def _get_compute_client(self):
# Create our default Nova client to use in testing
return nova_client.Client(
diff --git a/heat_integrationtests/common/config.py b/heat_integrationtests/common/config.py
deleted file mode 100644
index 8ec24d833..000000000
--- a/heat_integrationtests/common/config.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-
-from oslo_config import cfg
-from oslo_log import log as logging
-
-import heat_integrationtests
-
-_CONF = None
-
-heat_group = cfg.OptGroup(name="heat_plugin",
- title="Heat Service Options")
-
-HeatGroup = [
- cfg.StrOpt("catalog_type",
- default="orchestration",
- help="Catalog type of the orchestration service."),
- cfg.StrOpt('username',
- help="Username to use for non admin API requests."),
- cfg.StrOpt('password',
- help="Non admin API key to use when authenticating.",
- secret=True),
- cfg.StrOpt('admin_username',
- help="Username to use for admin API requests."),
- cfg.StrOpt('admin_password',
- help="Admin API key to use when authentication.",
- secret=True),
- cfg.StrOpt('project_name',
- help="Project name to use for API requests.",
- deprecated_opts=[cfg.DeprecatedOpt('tenant_name',
- group='heat_plugin')]),
- cfg.StrOpt('admin_project_name',
- default='admin',
- help="Admin project name to use for admin API requests.",
- deprecated_opts=[cfg.DeprecatedOpt('admin_tenant_name',
- group='heat_plugin')]),
- cfg.StrOpt('auth_url',
- help="Full URI of the OpenStack Identity API (Keystone)."),
- cfg.StrOpt('auth_version',
- help="OpenStack Identity API version."),
- cfg.StrOpt('user_domain_name',
- help="User domain name, if keystone v3 auth_url "
- "is used"),
- cfg.StrOpt('project_domain_name',
- help="Project domain name, if keystone v3 auth_url "
- "is used"),
- cfg.StrOpt('user_domain_id',
- help="User domain id, if keystone v3 auth_url "
- "is used"),
- cfg.StrOpt('project_domain_id',
- help="Project domain id, if keystone v3 auth_url "
- "is used"),
- cfg.StrOpt('region',
- help="The region name to use"),
- cfg.StrOpt('instance_type',
- help="Instance type for tests. Needs to be big enough for a "
- "full OS plus the test workload"),
- cfg.StrOpt('minimal_instance_type',
- help="Instance type enough for simplest cases."),
- cfg.StrOpt('image_ref',
- help="Name of image to use for tests which boot servers."),
- cfg.StrOpt('keypair_name',
- help="Name of existing keypair to launch servers with."),
- cfg.StrOpt('minimal_image_ref',
- help="Name of minimal (e.g cirros) image to use when "
- "launching test instances."),
- cfg.BoolOpt('disable_ssl_certificate_validation',
- default=False,
- help="Set to True if using self-signed SSL certificates."),
- cfg.StrOpt('ca_file',
- help="CA certificate to pass for servers that have "
- "https endpoint."),
- cfg.IntOpt('build_interval',
- default=4,
- help="Time in seconds between build status checks."),
- cfg.IntOpt('build_timeout',
- default=1200,
- help="Timeout in seconds to wait for a stack to build."),
- cfg.StrOpt('network_for_ssh',
- default='heat-net',
- help="Network used for SSH connections."),
- cfg.StrOpt('fixed_network_name',
- default='heat-net',
- help="Visible fixed network name "),
- cfg.StrOpt('floating_network_name',
- default='public',
- help="Visible floating network name "),
- cfg.StrOpt('fixed_subnet_name',
- default='heat-subnet',
- help="Visible fixed sub-network name "),
- cfg.BoolOpt('skip_functional_tests',
- default=False,
- help="Skip all functional tests"),
- cfg.ListOpt('skip_functional_test_list',
- help="List of functional test class or class.method "
- "names to skip ex. AutoscalingGroupTest, "
- "InstanceGroupBasicTest.test_size_updates_work"),
- cfg.ListOpt('skip_test_stack_action_list',
- help="List of stack actions in tests to skip "
- "ex. ABANDON, ADOPT, SUSPEND, RESUME"),
- cfg.BoolOpt('convergence_engine_enabled',
- default=True,
- help="Test features that are only present for stacks with "
- "convergence enabled."),
- cfg.IntOpt('connectivity_timeout',
- default=120,
- help="Timeout in seconds to wait for connectivity to "
- "server."),
-]
-
-
-def init_conf(read_conf=True):
- global _CONF
- if _CONF is not None:
- return _CONF
-
- default_config_files = None
- if read_conf:
- confpath = os.path.join(
- os.path.dirname(os.path.realpath(heat_integrationtests.__file__)),
- 'heat_integrationtests.conf')
- if os.path.isfile(confpath):
- default_config_files = [confpath]
-
- _CONF = cfg.ConfigOpts()
- logging.register_options(_CONF)
- _CONF(args=[], project='heat_integrationtests',
- default_config_files=default_config_files)
-
- for group, opts in list_opts():
- _CONF.register_opts(opts, group=group)
- return _CONF
-
-
-def list_opts():
- yield heat_group.name, HeatGroup
diff --git a/heat_integrationtests/common/test.py b/heat_integrationtests/common/test.py
index 86d7d0a55..52a7fe45d 100644
--- a/heat_integrationtests/common/test.py
+++ b/heat_integrationtests/common/test.py
@@ -15,19 +15,18 @@ import random
import re
import subprocess
import time
+import urllib
import fixtures
from heatclient import exc as heat_exceptions
from keystoneauth1 import exceptions as kc_exceptions
from oslo_log import log as logging
from oslo_utils import timeutils
-import six
-from six.moves import urllib
+from tempest import config
import testscenarios
import testtools
from heat_integrationtests.common import clients
-from heat_integrationtests.common import config
from heat_integrationtests.common import exceptions
LOG = logging.getLogger(__name__)
@@ -59,7 +58,7 @@ def call_until_true(duration, sleep_for, func, *args, **kwargs):
def rand_name(name=''):
- randbits = six.text_type(random.randint(1, 0x7fffffff))
+ randbits = str(random.randint(1, 0x7fffffff))
if name:
return name + '-' + randbits
else:
@@ -71,8 +70,7 @@ def requires_convergence(test_method):
The decorated test will be skipped when convergence is disabled.
'''
- convergence_enabled = config.init_conf(
- ).heat_plugin.convergence_engine_enabled
+ convergence_enabled = config.CONF.heat_plugin.convergence_engine_enabled
skipper = testtools.skipUnless(convergence_enabled,
"Convergence-only tests are disabled")
return skipper(test_method)
@@ -84,7 +82,7 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
def setUp(self):
super(HeatIntegrationTest, self).setUp()
- self.conf = config.init_conf().heat_plugin
+ self.conf = config.CONF.heat_plugin
self.assertIsNotNone(self.conf.auth_url,
'No auth_url configured')
@@ -94,7 +92,6 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
'No password configured')
self.setup_clients(self.conf)
self.useFixture(fixtures.FakeLogger(format=_LOG_FORMAT))
- self.updated_time = {}
if self.conf.disable_ssl_certificate_validation:
self.verify_cert = False
else:
@@ -103,6 +100,7 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
def setup_clients(self, conf, admin_credentials=False):
self.manager = clients.ClientManager(conf, admin_credentials)
self.identity_client = self.manager.identity_client
+ self.keystone_client = self.manager.keystone_client
self.orchestration_client = self.manager.orchestration_client
self.compute_client = self.manager.compute_client
self.network_client = self.manager.network_client
@@ -166,9 +164,15 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
self.keypair = self.create_keypair()
self.keypair_name = self.keypair.id
- @classmethod
- def _stack_rand_name(cls):
- return rand_name(cls.__name__)
+ def _stack_rand_name(self):
+ test_name = self.id()
+ if test_name and '.' in test_name:
+ name = '-'.join(test_name.split('.')[-2:])
+ # remove 'testname(...)' cases
+ name = name.split('(')[0]
+ else:
+ name = self.__name__
+ return rand_name(name)
def _get_network(self, net_name=None):
if net_name is None:
@@ -230,7 +234,7 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
fail_regexp = re.compile(failure_pattern)
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
-
+ res = None
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
@@ -256,9 +260,11 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
resource_status_reason=res.resource_status_reason)
time.sleep(build_interval)
- message = ('Resource %s failed to reach %s status within '
- 'the required time (%s s).' %
- (resource_name, status, build_timeout))
+ message = ('Resource %s from stack %s failed to reach %s status '
+ 'within the required time (%s s). Current resource '
+ 'status: %s.' %
+ (resource_name, stack_identifier, status, build_timeout,
+ res.resource_status))
raise exceptions.TimeoutException(message)
def verify_resource_status(self, stack_identifier, resource_name,
@@ -272,17 +278,7 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
def _verify_status(self, stack, stack_identifier, status,
fail_regexp, is_action_cancelled=False):
if stack.stack_status == status:
- # Handle UPDATE_COMPLETE/FAILED case: Make sure we don't
- # wait for a stale UPDATE_COMPLETE/FAILED status.
- if status in ('UPDATE_FAILED', 'UPDATE_COMPLETE'):
- if is_action_cancelled:
- return True
-
- if self.updated_time.get(
- stack_identifier) != stack.updated_time:
- self.updated_time[stack_identifier] = stack.updated_time
- return True
- elif status == 'DELETE_COMPLETE' and stack.deletion_time is None:
+ if status == 'DELETE_COMPLETE' and stack.deletion_time is None:
# Wait for deleted_time to be filled, so that we have more
# confidence the operation is finished.
return False
@@ -292,20 +288,12 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
wait_for_action = status.split('_')[0]
if (stack.action == wait_for_action and
fail_regexp.search(stack.stack_status)):
- # Handle UPDATE_COMPLETE/UPDATE_FAILED case.
- if status in ('UPDATE_FAILED', 'UPDATE_COMPLETE'):
- if self.updated_time.get(
- stack_identifier) != stack.updated_time:
- self.updated_time[stack_identifier] = stack.updated_time
- raise exceptions.StackBuildErrorException(
- stack_identifier=stack_identifier,
- stack_status=stack.stack_status,
- stack_status_reason=stack.stack_status_reason)
- else:
- raise exceptions.StackBuildErrorException(
- stack_identifier=stack_identifier,
- stack_status=stack.stack_status,
- stack_status_reason=stack.stack_status_reason)
+ raise exceptions.StackBuildErrorException(
+ stack_identifier=stack_identifier,
+ stack_status=stack.stack_status,
+ stack_status_reason=stack.stack_status_reason)
+
+ return False
def _wait_for_stack_status(self, stack_identifier, status,
failure_pattern=None,
@@ -354,8 +342,9 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
time.sleep(build_interval)
message = ('Stack %s failed to reach %s status within '
- 'the required time (%s s).' %
- (stack_identifier, status, build_timeout))
+ 'the required time (%s s). Current stack state: %s.' %
+ (stack_identifier, status, build_timeout,
+ stack.stack_status))
raise exceptions.TimeoutException(message)
def _stack_delete(self, stack_identifier):
@@ -395,9 +384,6 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
env_files = files or {}
parameters = parameters or {}
- self.updated_time[stack_identifier] = self.client.stacks.get(
- stack_identifier, resolve_outputs=False).updated_time
-
self._handle_in_progress(
self.client.stacks.update,
stack_id=stack_identifier,
@@ -423,9 +409,6 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
stack_name = stack_identifier.split('/')[0]
- self.updated_time[stack_identifier] = self.client.stacks.get(
- stack_identifier, resolve_outputs=False).updated_time
-
if rollback:
self.client.actions.cancel_update(stack_name)
else:
@@ -484,7 +467,7 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
def _get_nested_identifier(self, stack_identifier, res_name):
rsrc = self.client.resources.get(stack_identifier, res_name)
- nested_link = [l for l in rsrc.links if l['rel'] == 'nested']
+ nested_link = [lk for lk in rsrc.links if lk['rel'] == 'nested']
nested_href = nested_link[0]['href']
nested_id = nested_href.split('/')[-1]
nested_identifier = '/'.join(nested_href.split('/')[-2:])
@@ -526,7 +509,7 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
if (filter_func(r) if callable(filter_func) else True))
def get_resource_stack_id(self, r):
- stack_link = [l for l in r.links if l.get('rel') == 'stack'][0]
+ stack_link = [lk for lk in r.links if lk.get('rel') == 'stack'][0]
return stack_link['href'].split("/")[-1]
def get_physical_resource_id(self, stack_identifier, resource_name):
diff --git a/heat_integrationtests/config-generator.conf b/heat_integrationtests/config-generator.conf
deleted file mode 100644
index 4c92c4851..000000000
--- a/heat_integrationtests/config-generator.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-[DEFAULT]
-output_file = heat_integrationtests/heat_integrationtests.conf.sample
-wrap_width = 79
-namespace = heat_integrationtests.common.config
diff --git a/heat_integrationtests/functional/test_autoscaling.py b/heat_integrationtests/functional/test_autoscaling.py
index 369fa000c..46da2a86e 100644
--- a/heat_integrationtests/functional/test_autoscaling.py
+++ b/heat_integrationtests/functional/test_autoscaling.py
@@ -15,7 +15,6 @@ import json
from heatclient import exc
from oslo_log import log as logging
-import six
from testtools import matchers
from heat_integrationtests.common import test
@@ -736,7 +735,7 @@ outputs:
stack_identifier, 'ScaleUpPolicy')
error_msg = 'Signal resource during SUSPEND is not supported'
- self.assertIn(error_msg, six.text_type(ex))
+ self.assertIn(error_msg, str(ex))
ev = self.wait_for_event_with_reason(
stack_identifier,
reason='Cannot signal resource during SUSPEND',
diff --git a/heat_integrationtests/functional/test_aws_stack.py b/heat_integrationtests/functional/test_aws_stack.py
index 05539dca8..3e775ca7a 100644
--- a/heat_integrationtests/functional/test_aws_stack.py
+++ b/heat_integrationtests/functional/test_aws_stack.py
@@ -13,8 +13,8 @@
import hashlib
import json
import random
+from urllib import parse
-from six.moves.urllib import parse
from swiftclient import utils as swiftclient_utils
import yaml
diff --git a/heat_integrationtests/functional/test_cancel_update.py b/heat_integrationtests/functional/test_cancel_update.py
index bfeeda643..68ba3f447 100644
--- a/heat_integrationtests/functional/test_cancel_update.py
+++ b/heat_integrationtests/functional/test_cancel_update.py
@@ -58,4 +58,8 @@ resources:
parameters=parameters,
expected_status='UPDATE_IN_PROGRESS')
+ # Ensure we start updating the server before rolling back
+ self._wait_for_resource_status(
+ stack_identifier, 'Server', 'CREATE_IN_PROGRESS')
+
self.cancel_update_stack(stack_identifier)
diff --git a/heat_integrationtests/functional/test_conditions.py b/heat_integrationtests/functional/test_conditions.py
index 9feb7cd2c..b6d286509 100644
--- a/heat_integrationtests/functional/test_conditions.py
+++ b/heat_integrationtests/functional/test_conditions.py
@@ -307,10 +307,15 @@ resources:
class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
- def res_assert_for_prod(self, resources, bj_prod=True, fj_zone=False,
+ def res_assert_for_prod(self, stack_identifier,
+ bj_prod=True, fj_zone=False,
shannxi_provice=False):
- res_names = {res.resource_name for res in resources
- if res.resource_status != 'DELETE_COMPLETE'}
+ def is_not_deleted(r):
+ return r.resource_status != 'DELETE_COMPLETE'
+
+ resources = self.list_resources(stack_identifier, is_not_deleted)
+ res_names = set(resources)
+
if bj_prod:
self.assertEqual(4, len(resources))
self.assertIn('beijing_prod_res', res_names)
@@ -330,10 +335,13 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
self.assertIn('prod_res', res_names)
self.assertIn('test_res', res_names)
- def res_assert_for_test(self, resources, fj_zone=False,
+ def res_assert_for_test(self, stack_identifier, fj_zone=False,
shannxi_provice=False):
- res_names = {res.resource_name for res in resources
- if res.resource_status != 'DELETE_COMPLETE'}
+ def is_not_deleted(r):
+ return r.resource_status != 'DELETE_COMPLETE'
+
+ resources = self.list_resources(stack_identifier, is_not_deleted)
+ res_names = set(resources)
if fj_zone:
self.assertEqual(4, len(resources))
@@ -399,8 +407,7 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
def test_stack_create_update_cfn_template_test_to_prod(self):
stack_identifier = self.stack_create(template=cfn_template)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_test(resources)
+ self.res_assert_for_test(stack_identifier)
self.output_assert_for_test(stack_identifier)
parms = {'zone': 'fuzhou'}
@@ -408,8 +415,7 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=cfn_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_test(resources, fj_zone=True)
+ self.res_assert_for_test(stack_identifier, fj_zone=True)
self.output_assert_for_test(stack_identifier)
parms = {'zone': 'xianyang'}
@@ -417,8 +423,7 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=cfn_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_test(resources, shannxi_provice=True)
+ self.res_assert_for_test(stack_identifier, shannxi_provice=True)
self.output_assert_for_test(stack_identifier)
parms = {'env_type': 'prod'}
@@ -426,8 +431,7 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=cfn_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_prod(resources)
+ self.res_assert_for_prod(stack_identifier)
self.output_assert_for_prod(stack_identifier)
parms = {'env_type': 'prod',
@@ -436,8 +440,7 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=cfn_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_prod(resources, False)
+ self.res_assert_for_prod(stack_identifier, False)
self.output_assert_for_prod(stack_identifier, False)
parms = {'env_type': 'prod',
@@ -446,8 +449,7 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=cfn_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_prod(resources, bj_prod=False, fj_zone=True)
+ self.res_assert_for_prod(stack_identifier, bj_prod=False, fj_zone=True)
self.output_assert_for_prod(stack_identifier, False)
parms = {'env_type': 'prod',
@@ -456,17 +458,15 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=cfn_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_prod(resources, bj_prod=False, fj_zone=False,
- shannxi_provice=True)
+ self.res_assert_for_prod(stack_identifier, bj_prod=False,
+ fj_zone=False, shannxi_provice=True)
self.output_assert_for_prod(stack_identifier, False)
def test_stack_create_update_cfn_template_prod_to_test(self):
parms = {'env_type': 'prod'}
stack_identifier = self.stack_create(template=cfn_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_prod(resources)
+ self.res_assert_for_prod(stack_identifier)
self.output_assert_for_prod(stack_identifier)
parms = {'zone': 'xiamen',
@@ -475,8 +475,7 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=cfn_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_prod(resources, bj_prod=False, fj_zone=True)
+ self.res_assert_for_prod(stack_identifier, bj_prod=False, fj_zone=True)
self.output_assert_for_prod(stack_identifier, bj_prod=False)
parms = {'zone': 'xianyang',
@@ -485,9 +484,8 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=cfn_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_prod(resources, bj_prod=False, fj_zone=False,
- shannxi_provice=True)
+ self.res_assert_for_prod(stack_identifier, bj_prod=False,
+ fj_zone=False, shannxi_provice=True)
self.output_assert_for_prod(stack_identifier, bj_prod=False)
parms = {'zone': 'shanghai',
@@ -496,9 +494,8 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=cfn_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_prod(resources, bj_prod=False, fj_zone=False,
- shannxi_provice=False)
+ self.res_assert_for_prod(stack_identifier, bj_prod=False,
+ fj_zone=False, shannxi_provice=False)
self.output_assert_for_prod(stack_identifier, bj_prod=False)
parms = {'env_type': 'test'}
@@ -506,8 +503,7 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=cfn_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_test(resources)
+ self.res_assert_for_test(stack_identifier)
self.output_assert_for_test(stack_identifier)
parms = {'env_type': 'test',
@@ -516,8 +512,7 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=cfn_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_test(resources, fj_zone=True)
+ self.res_assert_for_test(stack_identifier, fj_zone=True)
self.output_assert_for_test(stack_identifier)
parms = {'env_type': 'test',
@@ -526,15 +521,13 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=cfn_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_test(resources, fj_zone=False,
+ self.res_assert_for_test(stack_identifier, fj_zone=False,
shannxi_provice=True)
self.output_assert_for_test(stack_identifier)
def test_stack_create_update_hot_template_test_to_prod(self):
stack_identifier = self.stack_create(template=hot_template)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_test(resources)
+ self.res_assert_for_test(stack_identifier)
self.output_assert_for_test(stack_identifier)
parms = {'zone': 'xianyang'}
@@ -542,8 +535,7 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=hot_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_test(resources, shannxi_provice=True)
+ self.res_assert_for_test(stack_identifier, shannxi_provice=True)
self.output_assert_for_test(stack_identifier)
parms = {'env_type': 'prod'}
@@ -551,8 +543,7 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=hot_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_prod(resources)
+ self.res_assert_for_prod(stack_identifier)
self.output_assert_for_prod(stack_identifier)
parms = {'env_type': 'prod',
@@ -561,8 +552,7 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=hot_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_prod(resources, False)
+ self.res_assert_for_prod(stack_identifier, False)
self.output_assert_for_prod(stack_identifier, False)
parms = {'env_type': 'prod',
@@ -571,16 +561,14 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=hot_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_prod(resources, False, shannxi_provice=True)
+ self.res_assert_for_prod(stack_identifier, False, shannxi_provice=True)
self.output_assert_for_prod(stack_identifier, False)
def test_stack_create_update_hot_template_prod_to_test(self):
parms = {'env_type': 'prod'}
stack_identifier = self.stack_create(template=hot_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_prod(resources)
+ self.res_assert_for_prod(stack_identifier)
self.output_assert_for_prod(stack_identifier)
parms = {'env_type': 'prod',
@@ -589,8 +577,7 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=hot_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_prod(resources, False, shannxi_provice=True)
+ self.res_assert_for_prod(stack_identifier, False, shannxi_provice=True)
self.output_assert_for_prod(stack_identifier, False)
parms = {'env_type': 'test'}
@@ -598,8 +585,7 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=hot_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_test(resources)
+ self.res_assert_for_test(stack_identifier)
self.output_assert_for_test(stack_identifier)
parms = {'env_type': 'test',
@@ -608,8 +594,7 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
template=hot_template,
parameters=parms)
- resources = self.client.resources.list(stack_identifier)
- self.res_assert_for_test(resources, fj_zone=False,
+ self.res_assert_for_test(stack_identifier, fj_zone=False,
shannxi_provice=True)
self.output_assert_for_test(stack_identifier)
@@ -619,3 +604,64 @@ class CreateUpdateResConditionTest(functional_base.FunctionalTestsBase):
self.update_stack(stack_identifier, template=fail_rename_tmpl,
expected_status='UPDATE_FAILED')
self.update_stack(stack_identifier, template=recover_rename_tmpl)
+
+
+root_output_tmpl = '''
+heat_template_version: 2016-10-14
+parameters:
+ env_type:
+ type: string
+ default: test
+conditions:
+ cd1: {equals : [{get_param: env_type}, "prod"]}
+resources:
+ nested:
+ type: nested_output.yaml
+ properties:
+ env_type: {get_param: env_type}
+outputs:
+ standard:
+ value: {get_attr: [nested, standard]}
+ cond:
+ value: {get_attr: [nested, cond]}
+ condition: cd1
+ cond_value:
+ value: {get_attr: [nested, cond_value]}
+'''
+
+nested_output_tmpl = '''
+heat_template_version: 2016-10-14
+parameters:
+ env_type:
+ type: string
+conditions:
+ cd1: {equals : [{get_param: env_type}, "prod"]}
+outputs:
+ standard:
+ value: hello
+ cond:
+ value: world
+ condition: cd1
+ cond_value:
+ value: {if: [cd1, 'prod', 'test']}
+'''
+
+
+class CreateNestedOutputConditionTest(functional_base.FunctionalTestsBase):
+
+ def test_condition_nested_outputs(self):
+ stack_identifier = self.stack_create(template=root_output_tmpl,
+ files={'nested_output.yaml':
+ nested_output_tmpl})
+
+ standard = self.client.stacks.output_show(stack_identifier,
+ 'standard')['output']
+ self.assertEqual('hello', standard['output_value'])
+
+ cond = self.client.stacks.output_show(stack_identifier,
+ 'cond')['output']
+ self.assertIsNone(cond['output_value'])
+
+ cond_val = self.client.stacks.output_show(stack_identifier,
+ 'cond_value')['output']
+ self.assertEqual('test', cond_val['output_value'])
diff --git a/heat_integrationtests/functional/test_create_update.py b/heat_integrationtests/functional/test_create_update.py
index 72f5f87fe..93f693af3 100644
--- a/heat_integrationtests/functional/test_create_update.py
+++ b/heat_integrationtests/functional/test_create_update.py
@@ -62,15 +62,54 @@ test_template_two_resource = {
}
}
+test_template_updatae_flavor_and_volume_size = '''
+
+heat_template_version: 2013-05-23
+
+parameters:
+ volume_size:
+ default: 10
+ type: number
+ flavor:
+ type: string
+ network:
+ type: string
+ image:
+ type: string
+
+resources:
+ my_instance:
+ type: OS::Nova::Server
+ properties:
+ image: {get_param: image}
+ flavor: {get_param: flavor}
+ admin_pass: 1
+ networks:
+ - network: {get_param: network}
+ data_volume_attachment:
+ depends_on: my_instance
+ type: 'OS::Cinder::VolumeAttachment'
+ properties:
+ instance_uuid:
+ get_resource: my_instance
+ volume_id:
+ get_resource: data_volume
+ data_volume:
+ type: 'OS::Cinder::Volume'
+ properties:
+ name: myvolume
+ size: {get_param: volume_size}
+'''
+
def _change_rsrc_properties(template, rsrcs, values):
- modified_template = copy.deepcopy(template)
- for rsrc_name in rsrcs:
- rsrc_prop = modified_template['resources'][
- rsrc_name]['properties']
- for prop, new_val in values.items():
- rsrc_prop[prop] = new_val
- return modified_template
+ modified_template = copy.deepcopy(template)
+ for rsrc_name in rsrcs:
+ rsrc_prop = modified_template['resources'][
+ rsrc_name]['properties']
+ for prop, new_val in values.items():
+ rsrc_prop[prop] = new_val
+ return modified_template
class CreateStackTest(functional_base.FunctionalTestsBase):
@@ -165,6 +204,26 @@ resources:
self.assertEqual(expected_resources,
self.list_resources(stack_identifier))
+ def test_stack_update_flavor_volume(self):
+
+ parms = {'flavor': self.conf.minimal_instance_type,
+ 'volume_size': 10,
+ 'image': self.conf.minimal_image_ref,
+ 'network': self.conf.fixed_network_name}
+
+ stack_identifier = self.stack_create(
+ template=test_template_updatae_flavor_and_volume_size,
+ parameters=parms
+ )
+
+ parms_updated = parms
+ parms_updated['volume_size'] = 20
+ parms_updated['flavor'] = self.conf.instance_type
+ self.update_stack(
+ stack_identifier,
+ template=test_template_updatae_flavor_and_volume_size,
+ parameters=parms_updated)
+
def test_stack_in_place_update(self):
template = _change_rsrc_properties(test_template_one_resource,
['test1'],
diff --git a/heat_integrationtests/functional/test_heat_autoscaling.py b/heat_integrationtests/functional/test_heat_autoscaling.py
index cd1c9c73c..8d7fa6977 100644
--- a/heat_integrationtests/functional/test_heat_autoscaling.py
+++ b/heat_integrationtests/functional/test_heat_autoscaling.py
@@ -111,7 +111,7 @@ outputs:
# send scale up signals and ensure that asg honors max_size
asg = self.client.resources.get(stack_id, 'random_group')
max_size = 5
- for num in range(asg_size+1, max_size+2):
+ for num in range(asg_size + 1, max_size + 2):
expected_resources = num if num <= max_size else max_size
self.client.resources.signal(stack_id, 'scale_up_policy')
self.assertTrue(
@@ -133,7 +133,7 @@ outputs:
# send scale down signals and ensure that asg honors min_size
asg = self.client.resources.get(stack_id, 'random_group')
min_size = 2
- for num in range(asg_size-1, 0, -1):
+ for num in range(asg_size - 1, 0, -1):
expected_resources = num if num >= min_size else min_size
self.client.resources.signal(stack_id, 'scale_down_policy')
self.assertTrue(
diff --git a/heat_integrationtests/functional/test_keystone_user_with_domain.py b/heat_integrationtests/functional/test_keystone_user_with_domain.py
new file mode 100644
index 000000000..aff4f75a1
--- /dev/null
+++ b/heat_integrationtests/functional/test_keystone_user_with_domain.py
@@ -0,0 +1,183 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from heat_integrationtests.common import test
+from heat_integrationtests.functional import functional_base
+
+create_user = '''
+heat_template_version: 2014-10-16
+description: test template to test user role assignment with user{domain}
+parameters:
+ user_name:
+ type: string
+ label: User Name
+ description: Test user name
+ project_name:
+ type: string
+ label: Project Name
+ description: Test project name
+ domain_name:
+ type: string
+ label: Domain Name
+ description: Test domain name
+resources:
+ Domain:
+ properties:
+ description: "Test Domain"
+ enabled: true
+ name: {get_param: domain_name}
+ type: OS::Keystone::Domain
+ Project:
+ properties:
+ description: "Test Project"
+ enabled: true
+ name: {get_param: project_name}
+ type: OS::Keystone::Project
+ User:
+ type: OS::Keystone::User
+ properties:
+ name: {get_param: user_name}
+ domain: {get_resource: Domain}
+ description: Test user
+ enabled: true
+ email: xyz@abc.com
+ password: passWORD
+outputs:
+ project_name:
+ value: {get_attr: [Project, name]}
+ user_name:
+ value: {get_attr: [User, name]}
+'''
+assign_user_roles = '''
+heat_template_version: 2014-10-16
+description: test template to test user role assignment with user{domain}
+parameters:
+ user_name:
+ type: string
+ label: User Name
+ description: Test user name
+ project_name:
+ type: string
+ label: Project Name
+ description: Test project name
+ domain_name:
+ type: string
+ label: Domain Name
+ description: Test domain name
+resources:
+ UserRoleAssignemnt:
+ properties:
+ roles:
+ - role: admin
+ project: {get_param: project_name}
+ user:
+ list_join: ['',
+ [
+ {get_param: user_name},
+ '{',
+ {get_param: domain_name},
+ '}'
+ ]
+ ]
+ type: OS::Keystone::UserRoleAssignment
+'''
+disable_domain = '''
+heat_template_version: 2014-10-16
+description: test template to test user role assignment with user{domain}
+parameters:
+ user_name:
+ type: string
+ label: User Name
+ description: Test user name
+ project_name:
+ type: string
+ label: Project Name
+ description: Test project name
+ domain_name:
+ type: string
+ label: Domain Name
+ description: Test domain name
+resources:
+ Domain:
+ properties:
+ description: "Test Domain"
+ enabled: false
+ name: {get_param: domain_name}
+ type: OS::Keystone::Domain
+ Project:
+ properties:
+ description: "Test Project"
+ enabled: true
+ name: {get_param: project_name}
+ type: OS::Keystone::Project
+ User:
+ type: OS::Keystone::User
+ properties:
+ name: {get_param: user_name}
+ domain: {get_resource: Domain}
+ description: Test user
+ enabled: true
+ email: xyz@abc.com
+ password: passWORD
+outputs:
+ project_name:
+ value: {get_attr: [Project, name]}
+ user_name:
+ value: {get_attr: [User, name]}
+'''
+
+
+class CreateUserTest(functional_base.FunctionalTestsBase):
+
+ def get_user_and_project_outputs(self, stack_identifier):
+ stack = self.client.stacks.get(stack_identifier)
+ project_name = self._stack_output(stack, 'project_name')
+ user_name = self._stack_output(stack, 'user_name')
+ return project_name, user_name
+
+ def get_outputs(self, stack_identifier, output_key):
+ stack = self.client.stacks.get(stack_identifier)
+ return self._stack_output(stack, output_key)
+
+ def test_assign_user_role_with_domain(self):
+ # Setup admin clients
+ self.setup_clients_for_admin()
+ parms = {
+ 'user_name': test.rand_name('test-user-domain-user-name'),
+ 'project_name': test.rand_name('test-user-domain-project'),
+ 'domain_name': test.rand_name('test-user-domain-domain-name')
+ }
+ stack_identifier_create_user = self.stack_create(
+ template=create_user,
+ parameters=parms)
+
+ self.stack_create(
+ template=assign_user_roles,
+ parameters=parms)
+
+ project_name, user_name = self.get_user_and_project_outputs(
+ stack_identifier_create_user)
+ self.assertEqual(project_name, project_name)
+ self.assertEqual(user_name, user_name)
+ users = self.keystone_client.users.list()
+ projects = self.keystone_client.projects.list()
+ user_id = [x for x in users if x.name == user_name][0].id
+ project_id = [x for x in projects if x.name == project_name][0].id
+ self.assertIsNotNone(
+ self.keystone_client.role_assignments.list(
+ user=user_id, project=project_id))
+
+ # Disable domain so stack can be deleted
+ self.update_stack(
+ stack_identifier=stack_identifier_create_user,
+ template=disable_domain,
+ parameters=parms)
diff --git a/heat_integrationtests/functional/test_resource_group.py b/heat_integrationtests/functional/test_resource_group.py
index 95ae7799f..ebce70df7 100644
--- a/heat_integrationtests/functional/test_resource_group.py
+++ b/heat_integrationtests/functional/test_resource_group.py
@@ -14,7 +14,6 @@ import copy
import json
from heatclient import exc
-import six
import yaml
from heat_integrationtests.functional import functional_base
@@ -88,12 +87,12 @@ resources:
ex = self.assertRaises(exc.HTTPBadRequest, self.update_stack,
stack_identifier, template_two_nested,
environment=env, files=files)
- self.assertIn(expected_err, six.text_type(ex))
+ self.assertIn(expected_err, str(ex))
ex = self.assertRaises(exc.HTTPBadRequest, self.stack_create,
template=template_two_nested,
environment=env, files=files)
- self.assertIn(expected_err, six.text_type(ex))
+ self.assertIn(expected_err, str(ex))
def _validate_resources(self, stack_identifier, expected_count):
resources = self.list_group_resources(stack_identifier,
diff --git a/heat_integrationtests/functional/test_simultaneous_update.py b/heat_integrationtests/functional/test_simultaneous_update.py
index 0c562c075..004145d3b 100644
--- a/heat_integrationtests/functional/test_simultaneous_update.py
+++ b/heat_integrationtests/functional/test_simultaneous_update.py
@@ -12,6 +12,7 @@
import copy
+import json
import time
from heat_integrationtests.common import test
@@ -91,3 +92,85 @@ class SimultaneousUpdateStackTest(functional_base.FunctionalTestsBase):
time.sleep(50)
self.update_stack(stack_id, after)
+
+
+input_param = 'input'
+preempt_nested_stack_type = 'preempt.yaml'
+preempt_root_rsrcs = {
+ 'nested_stack': {
+ 'type': preempt_nested_stack_type,
+ 'properties': {
+ 'input': {'get_param': input_param},
+ },
+ }
+}
+preempt_root_out = {'get_attr': ['nested_stack', 'delay_stack']}
+preempt_delay_stack_type = 'delay.yaml'
+preempt_nested_rsrcs = {
+ 'delay_stack': {
+ 'type': preempt_delay_stack_type,
+ 'properties': {
+ 'input': {'get_param': input_param},
+ },
+ }
+}
+preempt_nested_out = {'get_resource': 'delay_stack'}
+preempt_delay_rsrcs = {
+ 'delay_resource': {
+ 'type': 'OS::Heat::TestResource',
+ 'properties': {
+ 'action_wait_secs': {
+ 'update': 6000,
+ },
+ 'value': {'get_param': input_param},
+ },
+ }
+}
+
+
+def _tmpl_with_rsrcs(rsrcs, output_value=None):
+ tmpl = {
+ 'heat_template_version': 'queens',
+ 'parameters': {
+ input_param: {
+ 'type': 'string',
+ },
+ },
+ 'resources': rsrcs,
+ }
+ if output_value is not None:
+ outputs = {'delay_stack': {'value': output_value}}
+ tmpl['outputs'] = outputs
+ return json.dumps(tmpl)
+
+
+class SimultaneousUpdateNestedStackTest(functional_base.FunctionalTestsBase):
+ @test.requires_convergence
+ def test_nested_preemption(self):
+ root_tmpl = _tmpl_with_rsrcs(preempt_root_rsrcs,
+ preempt_root_out)
+ files = {
+ preempt_nested_stack_type: _tmpl_with_rsrcs(preempt_nested_rsrcs,
+ preempt_nested_out),
+ preempt_delay_stack_type: _tmpl_with_rsrcs(preempt_delay_rsrcs),
+ }
+ stack_id = self.stack_create(template=root_tmpl, files=files,
+ parameters={input_param: 'foo'})
+ delay_stack_uuid = self.get_stack_output(stack_id, 'delay_stack')
+
+ # Start an update that includes a long delay in the second nested stack
+ self.update_stack(stack_id, template=root_tmpl, files=files,
+ parameters={input_param: 'bar'},
+ expected_status='UPDATE_IN_PROGRESS')
+ self._wait_for_resource_status(delay_stack_uuid, 'delay_resource',
+ 'UPDATE_IN_PROGRESS')
+
+ # Update again to check that we preempt update of the first nested
+ # stack. This will delete the second nested stack, after preempting the
+ # update of that stack as well, which will cause the delay resource
+ # within to be cancelled.
+ empty_nest_files = {
+ preempt_nested_stack_type: _tmpl_with_rsrcs({}),
+ }
+ self.update_stack(stack_id, template=root_tmpl, files=empty_nest_files,
+ parameters={input_param: 'baz'})
diff --git a/heat_integrationtests/functional/test_template_resource.py b/heat_integrationtests/functional/test_template_resource.py
index 21a4ec061..f2a513b3a 100644
--- a/heat_integrationtests/functional/test_template_resource.py
+++ b/heat_integrationtests/functional/test_template_resource.py
@@ -13,7 +13,6 @@
import json
from heatclient import exc as heat_exceptions
-import six
import yaml
from heat_integrationtests.common import test
@@ -804,7 +803,7 @@ outputs:
except heat_exceptions.HTTPBadRequest as exc:
exp = ('ERROR: Required property two for facade '
'OS::Thingy missing in provider')
- self.assertEqual(exp, six.text_type(exc))
+ self.assertEqual(exp, str(exc))
def test_missing_output(self):
templ_missing_output = '''
@@ -828,7 +827,7 @@ resources:
except heat_exceptions.HTTPBadRequest as exc:
exp = ('ERROR: Attribute here-it-is for facade '
'OS::Thingy missing in provider')
- self.assertEqual(exp, six.text_type(exc))
+ self.assertEqual(exp, str(exc))
class TemplateResourceNewParamTest(functional_base.FunctionalTestsBase):
diff --git a/heat_integrationtests/functional/test_template_versions.py b/heat_integrationtests/functional/test_template_versions.py
index 08b169844..013064c00 100644
--- a/heat_integrationtests/functional/test_template_versions.py
+++ b/heat_integrationtests/functional/test_template_versions.py
@@ -26,7 +26,8 @@ class TemplateVersionTest(functional_base.FunctionalTestsBase):
"2017-02-24", "ocata",
"2017-09-01", "pike",
"2018-03-02", "queens",
- "2018-08-31", "rocky"]
+ "2018-08-31", "rocky",
+ "2021-04-16", "wallaby"]
for template in template_versions:
self.assertIn(template.version.split(".")[1],
supported_template_versions)
diff --git a/heat_integrationtests/functional/test_update_restricted.py b/heat_integrationtests/functional/test_update_restricted.py
index 0c5d015f0..545255cb0 100644
--- a/heat_integrationtests/functional/test_update_restricted.py
+++ b/heat_integrationtests/functional/test_update_restricted.py
@@ -11,7 +11,6 @@
# under the License.
import copy
-import time
from heat_integrationtests.functional import functional_base
@@ -74,9 +73,6 @@ class UpdateRestrictedStackTest(functional_base.FunctionalTestsBase):
self._check_for_restriction_reason(resource_events,
reason_update_restrict))
- # Ensure the timestamp changes, since this will be very quick
- time.sleep(1)
-
# check update succeeds - with only 'replace' restricted
self.update_stack(stack_identifier, update_template,
env_replace_restrict,
@@ -112,9 +108,6 @@ class UpdateRestrictedStackTest(functional_base.FunctionalTestsBase):
self._check_for_restriction_reason(resource_events,
reason_replace_restrict))
- # Ensure the timestamp changes, since this will be very quick
- time.sleep(1)
-
# check replace fails - with only 'replace' restricted
self.update_stack(stack_identifier, update_template,
env_replace_restrict,
@@ -149,9 +142,6 @@ class UpdateRestrictedStackTest(functional_base.FunctionalTestsBase):
self._check_for_restriction_reason(resource_events,
reason_replace_restrict))
- # Ensure the timestamp changes, since this will be very quick
- time.sleep(1)
-
# check replace fails - with only 'replace' restricted
self.update_stack(stack_identifier, update_template,
env_replace_restrict,
diff --git a/heat_integrationtests/functional/test_validation.py b/heat_integrationtests/functional/test_validation.py
index da5cd680c..dd822f93f 100644
--- a/heat_integrationtests/functional/test_validation.py
+++ b/heat_integrationtests/functional/test_validation.py
@@ -14,6 +14,21 @@
from heat_integrationtests.functional import functional_base
+class SimpleStackValidationTest(functional_base.FunctionalTestsBase):
+
+ def test_validate_json_content(self):
+ template = u'''
+heat_template_version: rocky
+resources:
+ server:
+ type: OS::Heat::TestResource
+ properties:
+ value: =%da
+'''
+ self.stack_create(template=template,
+ expected_status='CREATE_COMPLETE')
+
+
class StackValidationTest(functional_base.FunctionalTestsBase):
def setUp(self):
diff --git a/heat_integrationtests/post_test_hook.sh b/heat_integrationtests/post_test_hook.sh
deleted file mode 100755
index 3d975c657..000000000
--- a/heat_integrationtests/post_test_hook.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This script is executed inside post_test_hook function in devstack gate.
-
-set -ex
-
-export DEST=${DEST:-/opt/stack/new}
-export TOP_DIR=${TOP_DIR:-/opt/stack/new/devstack}
-sudo -E $DEST/heat/heat_integrationtests/prepare_test_env.sh
-sudo -E $DEST/heat/heat_integrationtests/prepare_test_network.sh
-
-cd $DEST/tempest
-sudo tox -evenv-tempest -- stestr --test-path=$DEST/heat/heat_integrationtests --top-dir=$DEST/heat --group_regex='heat_tempest_plugin\.tests\.api\.test_heat_api[._]([^_]+)' run
-
-sudo -E $DEST/heat/heat_integrationtests/cleanup_test_env.sh
diff --git a/heat_integrationtests/pre_test_hook.sh b/heat_integrationtests/pre_test_hook.sh
deleted file mode 100755
index 8194e97cd..000000000
--- a/heat_integrationtests/pre_test_hook.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This script is executed inside pre_test_hook function in devstack gate.
-
-set -x
-
-localconf=$BASE/new/devstack/local.conf
-
-echo -e '[[post-config|$HEAT_CONF]]\n[DEFAULT]\n' >> $localconf
-
-if [ "$DISABLE_CONVERGENCE" == "true" ] ; then
- echo -e 'convergence_engine=false\n' >> $localconf
-fi
-
-echo -e 'stack_scheduler_hints=true\n' >> $localconf
-echo -e 'hidden_stack_tags=hidden\n' >> $localconf
-echo -e 'encrypt_parameters_and_properties=True\n' >> $localconf
-echo -e 'logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s\n' >> $localconf
-
-echo -e '[heat_api]\nworkers=2\n' >> $localconf
-echo -e '[heat_api_cfn]\nworkers=2\n' >> $localconf
-
-echo -e '[cache]\nenabled=True\n' >> $localconf
-
-echo -e '[eventlet_opts]\nclient_socket_timeout=120\n' >> $localconf
-
-echo -e '[oslo_messaging_notifications]\ndriver=messagingv2\n' >> $localconf
-
-echo "[[local|localrc]]" >> $localconf
-
-# NOTE(mnaser): This will use the region local mirrors to avoid going out
-# to network
-if [[ -e /etc/ci/mirror_info.sh ]]; then
- source /etc/ci/mirror_info.sh
- echo "IMAGE_URLS+=${NODEPOOL_FEDORA_MIRROR}/releases/29/Cloud/x86_64/images/Fedora-Cloud-Base-29-1.2.x86_64.qcow2" >> $localconf
-else
- echo "IMAGE_URLS+=https://download.fedoraproject.org/pub/fedora/linux/releases/29/Cloud/x86_64/images/Fedora-Cloud-Base-29-1.2.x86_64.qcow2" >> $localconf
-fi
-
-echo "CEILOMETER_PIPELINE_INTERVAL=60" >> $localconf
-echo "HEAT_ENABLE_ADOPT_ABANDON=True" >> $localconf
diff --git a/heat_integrationtests/prepare_test_env.sh b/heat_integrationtests/prepare_test_env.sh
index 65c40851b..45b86e493 100755
--- a/heat_integrationtests/prepare_test_env.sh
+++ b/heat_integrationtests/prepare_test_env.sh
@@ -46,7 +46,7 @@ function _config_iniset {
iniset $conf_file heat_plugin instance_type m1.heat_int
iniset $conf_file heat_plugin minimal_instance_type m1.heat_micro
- iniset $conf_file heat_plugin image_ref Fedora-Cloud-Base-29-1.2.x86_64
+ iniset $conf_file heat_plugin image_ref Fedora-Cloud-Base-33-1.2.x86_64
iniset $conf_file heat_plugin minimal_image_ref $default_image_name
iniset $conf_file heat_plugin hidden_stack_tag hidden
diff --git a/heat_integrationtests/prepare_test_network.sh b/heat_integrationtests/prepare_test_network.sh
index 490026382..df78fdcd7 100755
--- a/heat_integrationtests/prepare_test_network.sh
+++ b/heat_integrationtests/prepare_test_network.sh
@@ -22,5 +22,5 @@ HEAT_PRIVATE_SUBNET_CIDR=10.0.5.0/24
source $TOP_DIR/openrc demo demo
openstack network show heat-net || openstack network create heat-net
-openstack subnet show heat-subnet || openstack subnet create heat-subnet --network heat-net --subnet-range $HEAT_PRIVATE_SUBNET_CIDR
-openstack router add subnet router1 heat-subnet
+subnet_id=$((openstack subnet show heat-subnet || openstack subnet create heat-subnet --network heat-net --subnet-range $HEAT_PRIVATE_SUBNET_CIDR) | grep " id " | awk '{print $4}')
+openstack router show router1 -c interfaces_info | grep -q $subnet_id || openstack router add subnet router1 $subnet_id
diff --git a/lower-constraints.txt b/lower-constraints.txt
index 2c7cba0f8..47b9cac96 100644
--- a/lower-constraints.txt
+++ b/lower-constraints.txt
@@ -1,5 +1,5 @@
alembic==0.9.8
-amqp==2.2.2
+amqp==2.6.0
aodhclient==0.9.0
appdirs==1.4.3
asn1crypto==0.24.0
@@ -8,33 +8,34 @@ bandit==1.1.0
bcrypt==3.1.4
cachetools==2.0.1
certifi==2018.1.18
-cffi==1.11.5
+cffi==1.14.0
chardet==3.0.4
cliff==2.11.0
cmd2==0.8.1
contextlib2==0.5.5
coverage==4.0
croniter==0.3.4
-cryptography==2.1
+cryptography==2.5
+ddt==1.4.1
debtcollector==1.19.0
-decorator==4.2.1
+decorator==4.3.0
deprecation==2.0
-doc8==0.6.0
+doc8==0.8.1
docker-pycreds==0.2.2
docker==3.1.1
+docutils==0.13.1
dogpile.cache==0.6.5
enum-compat==0.0.2
eventlet==0.18.2
extras==1.0.0
fasteners==0.14.1
fixtures==3.0.0
-flake8==2.5.5
+flake8==3.7.0
future==0.16.0
futurist==1.6.0
gitdb2==2.0.3
GitPython==2.1.8
-greenlet==0.4.13
-hacking==0.12.0
+greenlet==0.4.17
idna==2.6
iso8601==0.1.12
Jinja2==2.10
@@ -42,17 +43,19 @@ jmespath==0.9.3
jsonpatch==1.21
jsonpointer==2.0
jsonschema==2.6.0
-keystoneauth1==3.4.0
-keystonemiddleware==4.17.0
-kombu==4.0.0
+keystoneauth1==3.18.0
+keystonemiddleware==5.1.0
+kombu==5.0.1
linecache2==1.0.0
-lxml==3.4.1
+lxml==4.5.0
Mako==1.0.7
-MarkupSafe==1.0
-mccabe==0.2.1
-mock==2.0.0
+MarkupSafe==1.1.1
+mccabe==0.6.0
+mock==3.0.3
monotonic==1.4
+mox3==0.28.0
msgpack==0.5.6
+msgpack-python==0.5.6
munch==2.2.0
netaddr==0.7.18
netifaces==0.10.6
@@ -63,40 +66,38 @@ os-service-types==1.2.0
osc-lib==1.10.0
oslo.cache==1.26.0
oslo.concurrency==3.26.0
-oslo.config==5.2.0
-oslo.context==2.19.2
-oslo.db==4.27.0
-oslo.i18n==3.15.3
-oslo.log==3.36.0
+oslo.config==6.0.0
+oslo.context==2.22.0
+oslo.db==6.0.0
+oslo.i18n==3.20.0
+oslo.log==4.3.0
oslo.messaging==5.29.0
oslo.middleware==3.31.0
-oslo.policy==1.30.0
+oslo.policy==3.7.0
oslo.reports==1.18.0
-oslo.serialization==2.18.0
+oslo.serialization==2.25.0
oslo.service==1.24.0
oslo.upgradecheck==0.1.0
-oslo.utils==3.37.0
+oslo.utils==3.40.0
oslo.versionedobjects==1.31.2
oslotest==3.2.0
osprofiler==1.4.0
packaging==17.1
-paramiko==2.4.1
+paramiko==2.7.1
Paste==2.0.3
PasteDeploy==1.5.0
-pbr==2.0.0
-pep8==1.5.7
+pbr==3.1.1
pika-pool==0.1.3
pika==0.10.0
ply==3.11
prettytable==0.7.2
psutil==5.4.3
-psycopg2==2.7
pyasn1==0.4.2
pycadf==2.7.0
pycparser==2.18
-pyflakes==0.8.1
+Pygments==2.2.0
pyinotify==0.9.6
-PyMySQL==0.7.6
+PyMySQL==0.8.0
PyNaCl==1.2.1
pyOpenSSL==17.5.0
pyparsing==2.2.0
@@ -110,24 +111,26 @@ python-designateclient==2.7.0
python-editor==1.0.3
python-glanceclient==2.8.0
python-heatclient==1.10.0
+python-ironicclient==2.8.0
python-keystoneclient==3.8.0
python-magnumclient==2.3.0
python-manilaclient==1.16.0
python-mimeparse==1.6.0
python-mistralclient==3.1.0
python-monascaclient==1.12.0
-python-neutronclient==6.7.0
+python-neutronclient==6.14.0
python-novaclient==9.1.0
-python-octaviaclient==1.3.0
+python-octaviaclient==1.8.0
python-openstackclient==3.12.0
python-saharaclient==1.4.0
python-subunit==1.2.0
python-swiftclient==3.2.0
python-troveclient==2.2.0
+python-vitrageclient==2.7.0
python-zaqarclient==1.3.0
-python-zunclient==2.0.0
+python-zunclient==3.4.0
pytz==2013.6
-PyYAML==3.12
+PyYAML==5.1
repoze.lru==0.7
requests==2.14.2
requestsexceptions==1.4.0
@@ -136,15 +139,15 @@ Routes==2.3.1
simplejson==3.13.2
six==1.10.0
smmap2==2.0.3
-sqlalchemy-migrate==0.11.0
+sqlalchemy-migrate==0.13.0
SQLAlchemy==1.0.10
sqlparse==0.2.4
statsd==3.2.2
stestr==2.0.0
-stevedore==1.20.0
+stevedore==3.1.0
tempest==17.1.0
Tempita==0.5.2
-tenacity==4.4.0
+tenacity==6.1.0
testresources==2.0.0
testscenarios==0.4
testtools==2.2.0
diff --git a/playbooks/devstack/functional/post.yaml b/playbooks/devstack/functional/post.yaml
index e07f5510a..5f3cf3282 100644
--- a/playbooks/devstack/functional/post.yaml
+++ b/playbooks/devstack/functional/post.yaml
@@ -1,15 +1,19 @@
- hosts: primary
+ name: Clear test env
tasks:
+ - name: clear test env
+ shell:
+ cmd: |
+ /opt/stack/heat/heat_integrationtests/cleanup_test_env.sh
+ executable: /bin/bash
+ chdir: "{{ zuul.project.src_dir }}"
+ environment:
+ DEVSTACK_BASE_DIR: "{{ devstack_base_dir }}"
+ become: true
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/logs/**
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
+- hosts: tempest
+ become: true
+ roles:
+ - role: fetch-subunit-output
+ zuul_work_dir: '{{ devstack_base_dir }}/tempest'
+ - role: process-stackviz
diff --git a/playbooks/devstack/functional/run.yaml b/playbooks/devstack/functional/run.yaml
index 80648ddab..67f7ce221 100644
--- a/playbooks/devstack/functional/run.yaml
+++ b/playbooks/devstack/functional/run.yaml
@@ -1,117 +1,10 @@
- hosts: all
- name: Job for functional tests
- tasks:
-
- - name: Ensure legacy workspace directory
- file:
- path: '{{ ansible_user_dir }}/workspace'
- state: directory
-
- - shell:
- cmd: |
- set -e
- set -x
- cat > clonemap.yaml << EOF
- clonemap:
- - name: openstack/devstack-gate
- dest: devstack-gate
- EOF
- /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
- https://opendev.org \
- openstack/devstack-gate
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
-
- - shell:
- cmd: |
- set -e
- set -x
- export PYTHONUNBUFFERED=true
- services=rabbit,tempest,mysql,dstat,key
- services+=,n-api,n-api-meta,n-cpu,n-cond,n-sch,n-crt
- services+=,placement-api,placement-client
- services+=,g-api,g-reg
- services+=,c-sch,c-api,c-vol,c-bak
- services+=,neutron-api,neutron-dhcp,neutron-metadata-agent,neutron-agent,neutron-l3,neutron-trunk
-
- if [ "{{ use_python3 }}" -eq 1 ] ; then
- export DEVSTACK_GATE_USE_PYTHON3=True
- # Swift does not work so skip s-* for python3x for now
- else
- export DEVSTACK_GATE_USE_PYTHON3=False
- services+=,s-proxy,s-object,s-container,s-account
- fi
-
- export DEVSTACK_GATE_NEUTRON=1
- export DEVSTACK_GATE_TEMPEST=1
- export DEVSTACK_GATE_TEMPEST_NOTESTS=1
- export DEVSTACK_GATE_EXERCISES=0
- export DEVSTACK_GATE_INSTALL_TESTONLY=1
- export DEVSTACK_PROJECT_FROM_GIT=python-heatclient
- export KEEP_LOCALRC=1
- export PROJECTS="openstack/ceilometer $PROJECTS"
- export PROJECTS="openstack/aodh $PROJECTS"
- export PROJECTS="openstack/zaqar $PROJECTS"
- export PROJECTS="openstack/heat-agents $PROJECTS"
- export PROJECTS="openstack/python-zaqarclient $PROJECTS"
- export PROJECTS="openstack/neutron $PROJECTS"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ceilometer https://opendev.org/openstack/ceilometer"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin aodh https://opendev.org/openstack/aodh"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin zaqar https://opendev.org/openstack/zaqar"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin neutron https://opendev.org/openstack/neutron"
-
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin heat https://opendev.org/openstack/heat"
-
- # Enable octavia plugin and services
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin octavia https://opendev.org/openstack/octavia"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"OCTAVIA_AMP_IMAGE_FILE=/tmp/test-only-amphora-x64-haproxy-ubuntu-bionic.qcow2"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"OCTAVIA_AMP_IMAGE_SIZE=3"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"OCTAVIA_AMP_IMAGE_NAME=test-only-amphora-x64-haproxy-ubuntu-bionic"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"OCTAVIA_AMPHORA_DRIVER=amphora_noop_driver"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"OCTAVIA_COMPUTE_DRIVER=compute_noop_driver"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"OCTAVIA_NETWORK_DRIVER=network_noop_driver"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"DISABLE_AMP_IMAGE_BUILD=True"
- services+=,octavia,o-cw,o-hk,o-hm,o-api
- export PROJECTS="openstack/octavia $PROJECTS"
- export PROJECTS="openstack/barbican $PROJECTS"
- export PROJECTS="openstack/python-barbicanclient $PROJECTS"
- export PROJECTS="openstack/barbican-tempest-plugin $PROJECTS"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin barbican https://opendev.org/openstack/barbican"
-
- # use heat-tempest-plugin
- export PROJECTS="openstack/heat-tempest-plugin $PROJECTS"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"TEMPEST_PLUGINS+=' /opt/stack/new/heat-tempest-plugin'"
-
- export OVERRIDE_ENABLED_SERVICES=$services
-
- if [ "{{ branch_override }}" != "default" ] ; then
- export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
- fi
- if [ "{{ use_apache }}" -eq 0 ] ; then
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"HEAT_USE_MOD_WSGI=False"
- fi
- if [ "{{ use_amqp1 }}" -eq 1 ] ; then
- export PROJECTS="openstack/devstack-plugin-amqp1 $PROJECTS"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"export AMQP1_SERVICE=qpid-hybrid"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin devstack-plugin-amqp1 https://opendev.org/openstack/devstack-plugin-amqp1"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"export CELLSV2_SETUP=singleconductor"
- export DEVSTACK_PROJECT_FROM_GIT="oslo.messaging,$DEVSTACK_PROJECT_FROM_GIT"
- fi
- export DISABLE_CONVERGENCE="{{ disable_convergence }}"
- function pre_test_hook {
- cd /opt/stack/new/heat/heat_integrationtests
- source ./pre_test_hook.sh
- }
- export -f pre_test_hook
-
- function post_test_hook {
- cd /opt/stack/new/heat/heat_integrationtests
- source ./post_test_hook.sh
- }
- export -f post_test_hook
-
- cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
- ./safe-devstack-vm-gate-wrap.sh
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
+ roles:
+ - orchestrate-devstack
+
+- hosts: tempest
+ roles:
+ - setup-tempest-run-dir
+ - setup-tempest-data-dir
+ - acl-devstack-files
+ - run-heat-tests
diff --git a/playbooks/devstack/grenade/run.yaml b/playbooks/devstack/grenade/run.yaml
deleted file mode 100644
index ec2ee28b8..000000000
--- a/playbooks/devstack/grenade/run.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-- hosts: primary
- name: job for grenade-heat
- tasks:
-
- - name: Ensure legacy workspace directory
- file:
- path: '{{ ansible_user_dir }}/workspace'
- state: directory
-
- - shell:
- cmd: |
- set -e
- set -x
- cat > clonemap.yaml << EOF
- clonemap:
- - name: openstack/devstack-gate
- dest: devstack-gate
- EOF
- /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
- https://opendev.org \
- openstack/devstack-gate
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- set -e
- set -x
- cat << 'EOF' >>"/tmp/dg-local.conf"
- [[local|localrc]]
- enable_plugin heat https://opendev.org/openstack/heat
- TEMPEST_PLUGINS+=' ../heat-tempest-plugin'
- EOF
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- set -e
- set -x
- export PROJECTS="openstack/grenade $PROJECTS"
- export PROJECTS="openstack/heat-tempest-plugin $PROJECTS"
- export PYTHONUNBUFFERED=true
- export GRENADE_PLUGINRC="enable_grenade_plugin heat https://opendev.org/openstack/heat"
- export DEVSTACK_GATE_NEUTRON=1
- export DEVSTACK_GATE_TEMPEST=1
- export DEVSTACK_GATE_TEMPEST_NOTESTS=1
- export DEVSTACK_GATE_GRENADE=pullup
- if [ "{{ branch_override }}" != "default" ] ; then
- export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
- fi
- export DEVSTACK_GATE_TOPOLOGY="{{ topology }}"
- export DEVSTACK_LOCAL_CONFIG=$'\n'"HOST_TOPOLOGY={{ topology }}"
- cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
- ./safe-devstack-vm-gate-wrap.sh
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/playbooks/devstack/multinode-networking/pre.yaml b/playbooks/devstack/multinode-networking/pre.yaml
deleted file mode 100644
index 3d29085c2..000000000
--- a/playbooks/devstack/multinode-networking/pre.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-- hosts: all
- roles:
- - multi-node-firewall
diff --git a/releasenotes/config.yaml b/releasenotes/config.yaml
new file mode 100644
index 000000000..225dc0ea0
--- /dev/null
+++ b/releasenotes/config.yaml
@@ -0,0 +1,4 @@
+---
+branch_name_re: '^stable/\w+$'
+release_tag_re: '((?:[\d.ab]|rc)+)'
+closed_branch_tag_re: '^(\w+)-eol$'
diff --git a/releasenotes/notes/Change-logger-path-e7a13878e5bb0bc2.yaml b/releasenotes/notes/Change-logger-path-e7a13878e5bb0bc2.yaml
new file mode 100644
index 000000000..4975d64c3
--- /dev/null
+++ b/releasenotes/notes/Change-logger-path-e7a13878e5bb0bc2.yaml
@@ -0,0 +1,10 @@
+---
+upgrade:
+ - |
+ We have change some log pathes as below
+ * Migrate ``heat.engine.clients.keystoneclient`` to
+ ``heat.engine.clients.os.keystone.heat_keystoneclient``
+ * remove ``heat.all``
+ * remove ``heat.api``
+ * remove ``heat.api.cfn``
+ * remove ``heat.engine``
diff --git a/releasenotes/notes/SOURCE_IP_PORT-to-LB_ALGORITHM-11f0edf22096df74.yaml b/releasenotes/notes/SOURCE_IP_PORT-to-LB_ALGORITHM-11f0edf22096df74.yaml
new file mode 100644
index 000000000..6d18f29d5
--- /dev/null
+++ b/releasenotes/notes/SOURCE_IP_PORT-to-LB_ALGORITHM-11f0edf22096df74.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - The ``lb_algorithm`` property of ``OS::Octavia::Pool`` resource now supports
+ SOURCE_IP_PORT option required for Octavia OVN provider driver.
diff --git a/releasenotes/notes/add-aodh-lbmemberhealth-alarm-c59502aac1944b8b.yaml b/releasenotes/notes/add-aodh-lbmemberhealth-alarm-c59502aac1944b8b.yaml
new file mode 100644
index 000000000..6a03fc302
--- /dev/null
+++ b/releasenotes/notes/add-aodh-lbmemberhealth-alarm-c59502aac1944b8b.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - OS::Aodh::LBMemberHealthAlarm resource plugin is added to manage
+ Aodh loadbalancer_member_health alarm. \ No newline at end of file
diff --git a/releasenotes/notes/add-dedicated-auth-endpoint-config-for-servers-b20f7eb351f619d0.yaml b/releasenotes/notes/add-dedicated-auth-endpoint-config-for-servers-b20f7eb351f619d0.yaml
new file mode 100644
index 000000000..52d93f8da
--- /dev/null
+++ b/releasenotes/notes/add-dedicated-auth-endpoint-config-for-servers-b20f7eb351f619d0.yaml
@@ -0,0 +1,16 @@
+---
+features:
+ - |
+ Added a new config option server_keystone_endpoint_type to specify
+ the keystone authentication endpoint (public/internal/admin)
+ to pass into cloud-init data.
+ If left unset the original behavior should remain unchanged.
+
+ This feature allows the deployer to unambiguously specify the
+ keystone endpoint passed to user provisioned servers, and is particularly
+ useful where the deployment network architecture requires the heat
+ service to interact with the internal endpoint,
+ but user provisioned servers only have access to the external network.
+
+ For more information see
+ http://lists.openstack.org/pipermail/openstack-discuss/2019-February/002925.html
diff --git a/releasenotes/notes/add-dns_domain-to-ProviderNet-84b14a85b8653c7c.yaml b/releasenotes/notes/add-dns_domain-to-ProviderNet-84b14a85b8653c7c.yaml
new file mode 100644
index 000000000..c654b5c43
--- /dev/null
+++ b/releasenotes/notes/add-dns_domain-to-ProviderNet-84b14a85b8653c7c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Added ``dns_domain`` property to resource type
+ ``OS::Neutron::ProviderNet``. This specifies the DNS domain to use when
+ publishing DNS records for ports on this network.
diff --git a/releasenotes/notes/add-octavia-flavor-flavorprofile-support-90ef922d19591c60.yaml b/releasenotes/notes/add-octavia-flavor-flavorprofile-support-90ef922d19591c60.yaml
new file mode 100644
index 000000000..534a9d60d
--- /dev/null
+++ b/releasenotes/notes/add-octavia-flavor-flavorprofile-support-90ef922d19591c60.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add support for ``OS::Octavia::Flavor`` and ``OS::Octavia::FlavorProfile``
+ resources and add ``flavor`` parameter in ``OS::Octavia::LoadBalancer``,
+ allowing users to configure Load Balancer capabilities.
diff --git a/releasenotes/notes/add-port-uplink-status-propagation-abd90d794e330d31.yaml b/releasenotes/notes/add-port-uplink-status-propagation-abd90d794e330d31.yaml
new file mode 100644
index 000000000..bd067a15e
--- /dev/null
+++ b/releasenotes/notes/add-port-uplink-status-propagation-abd90d794e330d31.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Added ``propagate_uplink_status`` property to resource type
+ ``OS::Neutron::Port``. This resource depends on Neutron API
+ extension ``uplink-status-propagation`` and the default is
+ ``False``. If this property is set to ``True``, the VF link
+ state can follow that of PF.
diff --git a/releasenotes/notes/add-tty-property-to-container-1b8bf92f0f47deca.yaml b/releasenotes/notes/add-tty-property-to-container-1b8bf92f0f47deca.yaml
new file mode 100644
index 000000000..6bc47a0af
--- /dev/null
+++ b/releasenotes/notes/add-tty-property-to-container-1b8bf92f0f47deca.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Add ``tty`` property to ``OS::Zun::Container``.
+ This property allows users to open the TTY of the container.
diff --git a/releasenotes/notes/add-vitrage-client-plugin-cb9e6b51ec2cc6ec.yaml b/releasenotes/notes/add-vitrage-client-plugin-cb9e6b51ec2cc6ec.yaml
new file mode 100644
index 000000000..e86394e81
--- /dev/null
+++ b/releasenotes/notes/add-vitrage-client-plugin-cb9e6b51ec2cc6ec.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - Introduce a Vitrage client plugin module that will be used by the
+ Vitrage resources.
diff --git a/releasenotes/notes/delay-resource-7d44c512081026c8.yaml b/releasenotes/notes/delay-resource-7d44c512081026c8.yaml
new file mode 100644
index 000000000..27460753c
--- /dev/null
+++ b/releasenotes/notes/delay-resource-7d44c512081026c8.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ The ``OS::Heat::Delay`` resource type is now usable.
diff --git a/releasenotes/notes/deprecate-nova-quota-injected_file-properties-6c6fd7f5231e4c40.yaml b/releasenotes/notes/deprecate-nova-quota-injected_file-properties-6c6fd7f5231e4c40.yaml
new file mode 100644
index 000000000..fb7760639
--- /dev/null
+++ b/releasenotes/notes/deprecate-nova-quota-injected_file-properties-6c6fd7f5231e4c40.yaml
@@ -0,0 +1,5 @@
+---
+deprecations:
+ - file injection is deprecated in compute api. Deprecating injected_files,
+ injected_file_content_bites, and injected_file_path_bytes properties
+ accordingly in OS::Nova::Quota resource.
diff --git a/releasenotes/notes/designate-zone-primaries-c48c37222ea06eb9.yaml b/releasenotes/notes/designate-zone-primaries-c48c37222ea06eb9.yaml
new file mode 100644
index 000000000..fb19a16db
--- /dev/null
+++ b/releasenotes/notes/designate-zone-primaries-c48c37222ea06eb9.yaml
@@ -0,0 +1,5 @@
+---
+deprecations:
+ - |
+ The ``OS::Designate::Zone`` resource type's ``masters`` property is now
+ known as ``primaries``. Existing templates will continue to work.
diff --git a/releasenotes/notes/fix-autoscalinggroup-reference-id-caf8b80c9288ad0f.yaml b/releasenotes/notes/fix-autoscalinggroup-reference-id-caf8b80c9288ad0f.yaml
new file mode 100644
index 000000000..1021aa528
--- /dev/null
+++ b/releasenotes/notes/fix-autoscalinggroup-reference-id-caf8b80c9288ad0f.yaml
@@ -0,0 +1,10 @@
+---
+fixes:
+ - |
+ The behavior of ``get_resource`` on an ``OS::Heat::InstanceGroup``
+ resource has changed. Previously it returned the physical resource name
+ (i.e. the name of the nested Heat stack which implemented the group). It
+ will now return the UUID of the nested stack if available. This will also
+ apply to any resource type that inherits from
+ ``OS::Heat::AutoScalingGroup``, ``OS::Heat::InstanceGroup``, and
+ ``AWS::AutoScaling::AutoScalingGroup``.
diff --git a/releasenotes/notes/granular-action-policy-b8c143bb5f203b68.yaml b/releasenotes/notes/granular-action-policy-b8c143bb5f203b68.yaml
new file mode 100644
index 000000000..63b1cbcb5
--- /dev/null
+++ b/releasenotes/notes/granular-action-policy-b8c143bb5f203b68.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Operators can now apply different authorization policies to each action
+ supported by the action API (``actions:suspend`` for suspend,
+ ``actions:resume`` for resume, ``actions:check`` for check,
+ ``actions:cancel_update`` for cancel operation and roll back, and
+ ``actions:cancel_without_rollback`` for cancel operation without rolling
+ back). The default for each is to use the existing ``actions:action`` rule
+ that was previously the only way to specify policy for actions.
diff --git a/releasenotes/notes/heat-template-support-trove-cluster-996efba5dfb6f02d.yaml b/releasenotes/notes/heat-template-support-trove-cluster-996efba5dfb6f02d.yaml
new file mode 100644
index 000000000..96ab6b943
--- /dev/null
+++ b/releasenotes/notes/heat-template-support-trove-cluster-996efba5dfb6f02d.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ The ``OS::Trove::Cluster`` resource type now supports specifying an
+ availability zone.
diff --git a/releasenotes/notes/hidden-multiattach-c761af6165c9571f.yaml b/releasenotes/notes/hidden-multiattach-c761af6165c9571f.yaml
new file mode 100644
index 000000000..d029bcee7
--- /dev/null
+++ b/releasenotes/notes/hidden-multiattach-c761af6165c9571f.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ ``multiattach``` property in ``OS::Cinder::Volume`` is now hidden.
+ Please use ``multiattach`` key in ``metadata`` property of
+ ``OS::Cinder::VolumeType`` instead.
diff --git a/releasenotes/notes/honoring_oslo_db_config-bf32711bf99a2e47.yaml b/releasenotes/notes/honoring_oslo_db_config-bf32711bf99a2e47.yaml
new file mode 100644
index 000000000..8de9ef7d9
--- /dev/null
+++ b/releasenotes/notes/honoring_oslo_db_config-bf32711bf99a2e47.yaml
@@ -0,0 +1,13 @@
+---
+fixes:
+ - |
+ Oslo db config is able to control wrap_db_retry call in heat.
+ We remove hard coded settings for wrap_db_retry and use following
+ configs from oslo_db instead.
+ * database.db_max_retries
+ * database.db_retry_interval
+ * database.db_inc_retry_interval
+ * database.db_max_retry_interval
+ So database cofig can now control db retries.
+ Please reference [1] for what each config options can do.
+ [1] https://opendev.org/openstack/oslo.db/src/branch/master/oslo_db/options.py
diff --git a/releasenotes/notes/if-macro-optional-properties-40647f036903731b.yaml b/releasenotes/notes/if-macro-optional-properties-40647f036903731b.yaml
new file mode 100644
index 000000000..971c417f1
--- /dev/null
+++ b/releasenotes/notes/if-macro-optional-properties-40647f036903731b.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ The ``wallaby`` template version introduces a new 2-argument form of the
+ ``if`` function. This allows users to specify optional property values, so
+ that when the condition is false Heat treats it the same as if no value
+ were specified for the property at all. The behaviour of existing templates
+ is unchanged, even after updating the template version to ``wallaby``.
diff --git a/releasenotes/notes/ike-properties-updateable-for-vpnaas-c42af7a4631e5dd3.yaml b/releasenotes/notes/ike-properties-updateable-for-vpnaas-c42af7a4631e5dd3.yaml
new file mode 100644
index 000000000..160232152
--- /dev/null
+++ b/releasenotes/notes/ike-properties-updateable-for-vpnaas-c42af7a4631e5dd3.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Properties of the VPNaaS ``OS::Neutron::IKEPolicy`` resource can now be
+ updated in place.
diff --git a/releasenotes/notes/ip-version-n-attribute-deprecation-bea1c6e4ca3678f1.yaml b/releasenotes/notes/ip-version-n-attribute-deprecation-bea1c6e4ca3678f1.yaml
new file mode 100644
index 000000000..a0e36f722
--- /dev/null
+++ b/releasenotes/notes/ip-version-n-attribute-deprecation-bea1c6e4ca3678f1.yaml
@@ -0,0 +1,7 @@
+---
+deprecations:
+ - |
+ The ``accessIPv4`` and ``accessIPv6`` attributes of the
+ ``OS::Nova::Server`` resource are now deprecated, since Nova returns empty
+ values for them. Use the ``addresses`` attribute instead to get IP
+ addresses.
diff --git a/releasenotes/notes/neutron-extrarouteset-379c5354e1ac7795.yaml b/releasenotes/notes/neutron-extrarouteset-379c5354e1ac7795.yaml
new file mode 100644
index 000000000..cb2c8f54c
--- /dev/null
+++ b/releasenotes/notes/neutron-extrarouteset-379c5354e1ac7795.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ New resource ``OS::Neutron::ExtraRouteSet`` is added to manage extra
+ routes of a Neutron router.
+deprecations:
+ - |
+ Unsupported contrib resource ``OS::Neutron::ExtraRoute`` is deprecated
+ in favor of ``OS::Neutron::ExtraRouteSet`` on all OpenStack clouds where
+ Neutron extension ``extraroute-atomic`` is available.
diff --git a/releasenotes/notes/neutron-qos-minimum-bandwidth-rule-cb38db4ebc27688e.yaml b/releasenotes/notes/neutron-qos-minimum-bandwidth-rule-cb38db4ebc27688e.yaml
new file mode 100644
index 000000000..e82fd803e
--- /dev/null
+++ b/releasenotes/notes/neutron-qos-minimum-bandwidth-rule-cb38db4ebc27688e.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ New resource ``OS::Neutron::QoSMinimumBandwidthRule`` to support
+ ``minimum_bandwidth_rules`` in Neutron QoS. This resource depends
+ on Neutron API extension ``qos-bw-minimum-ingress`` and according
+ to the default policy it is admin-only.
diff --git a/releasenotes/notes/octavia-pool-tls-enabled-373a8c74f7c7664b.yaml b/releasenotes/notes/octavia-pool-tls-enabled-373a8c74f7c7664b.yaml
new file mode 100644
index 000000000..8593ad688
--- /dev/null
+++ b/releasenotes/notes/octavia-pool-tls-enabled-373a8c74f7c7664b.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - Support ``tls_enabled`` property for the resource ``OS::Octavia::Pool``, the
+ property is allowed to be updated as well. The property 'tls_enabled' was
+ introduced in Octavia since Stein release. The default value is False if it
+ is not specified in Heat template.
diff --git a/releasenotes/notes/octavia-quota-resource-52c1ea86f16d9513.yaml b/releasenotes/notes/octavia-quota-resource-52c1ea86f16d9513.yaml
new file mode 100644
index 000000000..57e1dad18
--- /dev/null
+++ b/releasenotes/notes/octavia-quota-resource-52c1ea86f16d9513.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - New resource ``OS::Octavia::Quota`` is added to enable an admin to manage
+ Octavia service quotas for a specific project.
diff --git a/releasenotes/notes/os-neutron-net-segments-attribute-semi-predictable-b40a869317d053cc.yaml b/releasenotes/notes/os-neutron-net-segments-attribute-semi-predictable-b40a869317d053cc.yaml
new file mode 100644
index 000000000..f398d8928
--- /dev/null
+++ b/releasenotes/notes/os-neutron-net-segments-attribute-semi-predictable-b40a869317d053cc.yaml
@@ -0,0 +1,16 @@
+---
+fixes:
+ - |
+ The ordering in the list of segments returned by ``OS::Neutron::Net``
+ resources is not predictable. Stack updates changeing attributes
+ of the network can cause the list of segments to shift.
+
+ The ordering is now slightly more predictable, segments with name=``None``
+ are now placed first in the list. This doesn't guarantee the order, but
+ typically only the segment implicitly created by neutron has no name
+ attribute set. The template author should ensure other segments on the
+ network does have a name set, so that the implicit segment will always be
+ index 0. Resolving attributes of the implcitly created segment on the
+ network resource can then predictibly happen using index 0. See `bug:
+ 1894920 <https://bugs.launchpad.net/tripleo/+bug/1894920>`_.
+
diff --git a/releasenotes/notes/port-mac-address-update-b377d23434e7b48a.yaml b/releasenotes/notes/port-mac-address-update-b377d23434e7b48a.yaml
new file mode 100644
index 000000000..781fffde3
--- /dev/null
+++ b/releasenotes/notes/port-mac-address-update-b377d23434e7b48a.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ ``OS::Neutron::Port`` resources will now be replaced when the
+ ``mac_address`` property is modified. Neutron is unable to update the MAC
+ address of a port once the port is in use.
diff --git a/releasenotes/notes/providernet-segments-attribute-cc20b22bf3a25e96.yaml b/releasenotes/notes/providernet-segments-attribute-cc20b22bf3a25e96.yaml
new file mode 100644
index 000000000..ab76b9ecb
--- /dev/null
+++ b/releasenotes/notes/providernet-segments-attribute-cc20b22bf3a25e96.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - Adds a new ``segments`` attribute to the ``OS::Neutron::ProviderNet``
+ resource. The attribute resolves the segments of the network. \ No newline at end of file
diff --git a/releasenotes/notes/python2-7125a4d5b441e7a6.yaml b/releasenotes/notes/python2-7125a4d5b441e7a6.yaml
new file mode 100644
index 000000000..f12fb2e76
--- /dev/null
+++ b/releasenotes/notes/python2-7125a4d5b441e7a6.yaml
@@ -0,0 +1,5 @@
+---
+critical:
+ - |
+ Python 2 is no longer supported. This release runs only on Python 3 and is
+ tested only on Python 3.6 and 3.7.
diff --git a/releasenotes/notes/remove-default-domain-from-templates-b5965242bfb78145.yaml b/releasenotes/notes/remove-default-domain-from-templates-b5965242bfb78145.yaml
new file mode 100644
index 000000000..ca46f199f
--- /dev/null
+++ b/releasenotes/notes/remove-default-domain-from-templates-b5965242bfb78145.yaml
@@ -0,0 +1,13 @@
+---
+prelude: >
+ There was a mismatch between the way heat create role behaved with the
+ templates or with the openstack CLI on what relates to the default domain
+ if the domain is not specified on both cases the CLI will not assign on
+ to the created new role but the heat templates will assign the "default"
+ domain
+critical:
+ - |
+ Templates that creates roles but does not specify the domain will not get
+ a "default" domain from now on. To have a domain added to your new role
+ it needs to be assigned in the template.
+
diff --git a/releasenotes/notes/remove-designate-v1-support-107de4784f8da2a6.yaml b/releasenotes/notes/remove-designate-v1-support-107de4784f8da2a6.yaml
new file mode 100644
index 000000000..07a06aa85
--- /dev/null
+++ b/releasenotes/notes/remove-designate-v1-support-107de4784f8da2a6.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+ - |
+ Designate project had removed v1 api support since stable/queens.
+ Heat has now removed support for v1 resources ``OS::Designate::Domain``
+ and ``OS::Designate::Record`` completely and replaced them with
+ placeholders for existing templates with those resources. The
+ ``designate.domain`` custom constraint has also been removed.
diff --git a/releasenotes/notes/remove-nova-api-extension-934f8389ea42e9e4.yaml b/releasenotes/notes/remove-nova-api-extension-934f8389ea42e9e4.yaml
new file mode 100644
index 000000000..609edc4ea
--- /dev/null
+++ b/releasenotes/notes/remove-nova-api-extension-934f8389ea42e9e4.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ Nova has removed api extension support and its api bindings.
+ Heat has now removed support for extensions from nova client
+ plugin and the resource plugins using it.
diff --git a/releasenotes/notes/support-allowed-cidrs-for-octavia-listener-d563a759d34da8b0.yaml b/releasenotes/notes/support-allowed-cidrs-for-octavia-listener-d563a759d34da8b0.yaml
new file mode 100644
index 000000000..f115bb351
--- /dev/null
+++ b/releasenotes/notes/support-allowed-cidrs-for-octavia-listener-d563a759d34da8b0.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - Support ``allowed_cidrs`` property for the resource
+ ``OS::Octavia::Listener``, the property is allowed to be updated as well.
+ The property 'allowed_cidrs' was introduced in Octavia since Train release.
+ The default value is empty list if it is not specified in Heat template.
diff --git a/releasenotes/notes/support-cephx-access-type-in-manila-share-71a416bf55aea214.yaml b/releasenotes/notes/support-cephx-access-type-in-manila-share-71a416bf55aea214.yaml
new file mode 100644
index 000000000..9ccb63370
--- /dev/null
+++ b/releasenotes/notes/support-cephx-access-type-in-manila-share-71a416bf55aea214.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+ - |
+ Manila resources now use the 'sharev2' endpoint and API version '2.13'.
+fixes:
+ - |
+ OS::Manila::Share now properly supports 'cephx' as a value for property
+ '{"access_rules": [{"access_type": ""}]}'.
diff --git a/releasenotes/notes/support-domain-in-keystone-lookups-f657da8322f17938.yaml b/releasenotes/notes/support-domain-in-keystone-lookups-f657da8322f17938.yaml
new file mode 100644
index 000000000..a464386ea
--- /dev/null
+++ b/releasenotes/notes/support-domain-in-keystone-lookups-f657da8322f17938.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Supports user, group, role and project lookup across domains. Added domain
+ parameter to keystone lookup functions. Heat templates now support
+ user{domain}, group{domain}, role{domain} and project{domain} to support
+ cross domain lookup. Keystone constrains will also work across domain.
diff --git a/releasenotes/notes/support-handling-empty-string-for-volume-az-22ad78eb0f931954.yaml b/releasenotes/notes/support-handling-empty-string-for-volume-az-22ad78eb0f931954.yaml
new file mode 100644
index 000000000..e1b71be43
--- /dev/null
+++ b/releasenotes/notes/support-handling-empty-string-for-volume-az-22ad78eb0f931954.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Empty string passing in for volume availability_zone can be correctly
+ handled now. For this case, it's same as no AZ set, so the default AZ in
+ cinder.conf will be used.
diff --git a/releasenotes/notes/support-ignition-93daac40f43a2cfe.yaml b/releasenotes/notes/support-ignition-93daac40f43a2cfe.yaml
new file mode 100644
index 000000000..52d468f35
--- /dev/null
+++ b/releasenotes/notes/support-ignition-93daac40f43a2cfe.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Heat can now support software deployments with CoreOS by passing
+ a CoreOS Ignition config in the ``user_data`` property for an
+ ``OS::Nova::Server`` resource when the ``user_data_format`` is
+ set to ``SOFTWARE_CONFIG``.
diff --git a/releasenotes/notes/support-ironic-client-plugin-b7b91b7090579c81.yaml b/releasenotes/notes/support-ironic-client-plugin-b7b91b7090579c81.yaml
new file mode 100644
index 000000000..258317496
--- /dev/null
+++ b/releasenotes/notes/support-ironic-client-plugin-b7b91b7090579c81.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Introduce a Ironic client plugin module that will be used by the Ironic's
+ resources.
+ Support only ironicclient version >=2.8.0 to get allocation functionality
+ support.
diff --git a/releasenotes/notes/support-ironic-port-resource-type-304284a7c508d5d5.yaml b/releasenotes/notes/support-ironic-port-resource-type-304284a7c508d5d5.yaml
new file mode 100644
index 000000000..169082aa3
--- /dev/null
+++ b/releasenotes/notes/support-ironic-port-resource-type-304284a7c508d5d5.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ New resource type ``OS::Ironic::Port`` is now supported in orchestration service.
diff --git a/releasenotes/notes/support-rbac-824a2d02c8746d3d.yaml b/releasenotes/notes/support-rbac-824a2d02c8746d3d.yaml
new file mode 100644
index 000000000..faaa3283c
--- /dev/null
+++ b/releasenotes/notes/support-rbac-824a2d02c8746d3d.yaml
@@ -0,0 +1,15 @@
+---
+features:
+ - |
+ The default policies provided by heat api have been updated to add support
+ for default roles and system scope. This is part of a broader community
+ effort to support read-only roles and implement secure, consistent default
+ policies.
+
+ Refer to `the Keystone documentation`__ for more information on the reason
+ for these changes.
+
+ __ https://docs.openstack.org/keystone/latest/admin/service-api-protection.html
+deprecations:
+ - |
+ The old default policy rules have been deprecated for removal in Xena cycle.
diff --git a/releasenotes/notes/support-shared-servies-multi-region-mode-d9f167fb52d9c0a8.yaml b/releasenotes/notes/support-shared-servies-multi-region-mode-d9f167fb52d9c0a8.yaml
new file mode 100644
index 000000000..756730f9c
--- /dev/null
+++ b/releasenotes/notes/support-shared-servies-multi-region-mode-d9f167fb52d9c0a8.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - Support shared services in multi region mode. The services are declared
+ in a list in config. shared_services_types=image, volume, volumev2.
diff --git a/releasenotes/notes/support_case_insensitive_user_name_search-92d6126d8be2ce4f.yaml b/releasenotes/notes/support_case_insensitive_user_name_search-92d6126d8be2ce4f.yaml
new file mode 100644
index 000000000..ba37145ad
--- /dev/null
+++ b/releasenotes/notes/support_case_insensitive_user_name_search-92d6126d8be2ce4f.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ On clouds where Keystone usernames are `case-insensitive
+ <https://docs.openstack.org/keystone/latest/admin/case-insensitive.html>`_,
+ Heat will now allow usernames with any case as property and
+ parameter values where a Keystone user is expected
+ (i.e. a ``keystone.user`` custom constraint applies).
+ Previously the case had to match the case with which the name
+ was stored in Keystone, even if Keystone itself was
+ case-insensitive.
diff --git a/releasenotes/notes/support_set_group_for_multipart-79b5819b9b3a82ad.yaml b/releasenotes/notes/support_set_group_for_multipart-79b5819b9b3a82ad.yaml
new file mode 100644
index 000000000..718bca212
--- /dev/null
+++ b/releasenotes/notes/support_set_group_for_multipart-79b5819b9b3a82ad.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Add ``group`` property to ``OS::Heat::MultipartMime``. This allow you to
+ set group for entire multipart cofig resource like ``group`` property in
+ ``OS::Heat::SoftwareConfig``. Aware that, you must make sure all configs
+ in MultipartMime works with ``group``. Default value is
+ ``Heat::Ungrouped``.
diff --git a/releasenotes/notes/update-firwallpolicy-ruls-90a8904e899b2365.yaml b/releasenotes/notes/update-firwallpolicy-ruls-90a8904e899b2365.yaml
new file mode 100644
index 000000000..69049c923
--- /dev/null
+++ b/releasenotes/notes/update-firwallpolicy-ruls-90a8904e899b2365.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+- |
+ The `firewall_rules` property of the `OS::Neutron::FirewallPolicy`
+ resource type is now optional. \ No newline at end of file
diff --git a/releasenotes/notes/update-webimage-resource-properties-c3e06b2c98b7d127.yaml b/releasenotes/notes/update-webimage-resource-properties-c3e06b2c98b7d127.yaml
new file mode 100644
index 000000000..6fb9a7e9d
--- /dev/null
+++ b/releasenotes/notes/update-webimage-resource-properties-c3e06b2c98b7d127.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ The ``OS::Glance::WebImage`` resource type now supports an
+ ``active`` property to allow administrators to deactivate
+ and reactivate the Image. Images remain active by default.
+ - |
+ The ``OS::Glance::WebImage`` resource type now supports a
+ ``members`` property for managing a list of other tenants
+ with access to the Image. \ No newline at end of file
diff --git a/releasenotes/notes/vitrage-template-resource-8869a8e34418b22f.yaml b/releasenotes/notes/vitrage-template-resource-8869a8e34418b22f.yaml
new file mode 100644
index 000000000..012720c3b
--- /dev/null
+++ b/releasenotes/notes/vitrage-template-resource-8869a8e34418b22f.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - A new ``OS::Vitrage::Template`` resource is added to configure and create
+ a Vitrage template. The Vitrage template can be used, for example, for
+ executing a Mistral healing workflow in case there is an alarm on an
+ instance.
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index d0db3f0d7..b60748bb9 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -43,9 +43,8 @@ extensions = [
]
# openstackdocstheme options
-repository_name = 'openstack/heat'
-bug_project = 'heat'
-bug_tag = ''
+openstackdocs_repo_name = 'openstack/heat'
+openstackdocs_use_storyboard = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -60,7 +59,6 @@ source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
-project = u'Heat Release Notes'
copyright = u'2015, Heat Developers'
# Release notes are version independent, no need to set version and release
@@ -97,7 +95,7 @@ exclude_patterns = []
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
@@ -146,10 +144,6 @@ html_static_path = ['_static']
# directly to the root of the documentation.
# html_extra_path = []
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
-
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
@@ -193,17 +187,6 @@ htmlhelp_basename = 'HeatReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
-latex_elements = {
- # The paper size ('letterpaper' or 'a4paper').
- # 'papersize': 'letterpaper',
-
- # The font size ('10pt', '11pt' or '12pt').
- # 'pointsize': '10pt',
-
- # Additional stuff for the LaTeX preamble.
- # 'preamble': '',
-}
-
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 74e28d847..699c572c1 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,9 @@
:maxdepth: 1
unreleased
+ victoria
+ ussuri
+ train
stein
rocky
queens
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index c4e490a42..52f728ed8 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -1,14 +1,16 @@
# Andi Chandler <andi@gowling.com>, 2017. #zanata
# Andi Chandler <andi@gowling.com>, 2018. #zanata
+# Andi Chandler <andi@gowling.com>, 2019. #zanata
+# Andi Chandler <andi@gowling.com>, 2020. #zanata
msgid ""
msgstr ""
"Project-Id-Version: openstack-heat\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2019-01-24 04:16+0000\n"
+"POT-Creation-Date: 2020-12-11 07:28+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2018-12-04 05:27+0000\n"
+"PO-Revision-Date: 2020-12-13 12:33+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -43,6 +45,9 @@ msgstr "10.0.1"
msgid "10.0.2"
msgstr "10.0.2"
+msgid "10.0.3-31"
+msgstr "10.0.3-31"
+
msgid "11.0.0"
msgstr "11.0.0"
@@ -55,6 +60,45 @@ msgstr "11.0.0.0b3"
msgid "11.0.0.0rc1"
msgstr "11.0.0.0rc1"
+msgid "11.0.2"
+msgstr "11.0.2"
+
+msgid "11.0.3"
+msgstr "11.0.3"
+
+msgid "12.0.0"
+msgstr "12.0.0"
+
+msgid "12.0.0.0rc1"
+msgstr "12.0.0.0rc1"
+
+msgid "12.1.0"
+msgstr "12.1.0"
+
+msgid "13.0.0"
+msgstr "13.0.0"
+
+msgid "13.0.0.0rc1"
+msgstr "13.0.0.0rc1"
+
+msgid "13.0.1"
+msgstr "13.0.1"
+
+msgid "13.1.0-4"
+msgstr "13.1.0-4"
+
+msgid "14.0.0"
+msgstr "14.0.0"
+
+msgid "14.1.0-6"
+msgstr "14.1.0-6"
+
+msgid "15.0.0"
+msgstr "15.0.0"
+
+msgid "15.0.0-39"
+msgstr "15.0.0-39"
+
msgid "5.0.1"
msgstr "5.0.1"
@@ -115,6 +159,9 @@ msgstr "9.0.4"
msgid "9.0.5"
msgstr "9.0.5"
+msgid "9.0.7-17"
+msgstr "9.0.7-17"
+
msgid ""
"A new 'parameter_merge_strategies' section can be added to the environment "
"file, where 'default' and/or parameter specific merge strategies can be "
@@ -163,6 +210,20 @@ msgstr ""
"secrets that are relevant to certificates."
msgid ""
+"A new ``OS::Blazar::Host`` resource is added to manage compute hosts for the "
+"lease/reservation in OpenStack."
+msgstr ""
+"A new ``OS::Blazar::Host`` resource is added to manage compute hosts for the "
+"lease/reservation in OpenStack."
+
+msgid ""
+"A new ``OS::Blazar::Lease`` resource is added to manage reservations for "
+"specific type/amount of cloud resources in OpenStack."
+msgstr ""
+"A new ``OS::Blazar::Lease`` resource is added to manage reservations for "
+"specific type/amount of cloud resources in OpenStack."
+
+msgid ""
"A new ``OS::Keystone::Region`` resource that helps in managing the lifecycle "
"of keystone region."
msgstr ""
@@ -170,6 +231,20 @@ msgstr ""
"of Keystone region."
msgid ""
+"A new ``OS::Neutron::TaaS::TapFlow`` resource is added to support a Tap Flow "
+"in the Neutron Tap-as-a-service plugin."
+msgstr ""
+"A new ``OS::Neutron::TaaS::TapFlow`` resource is added to support a Tap Flow "
+"in the Neutron Tap-as-a-service plugin."
+
+msgid ""
+"A new ``OS::Neutron::TaaS::TapService`` resource is added to support a Tap "
+"Service in the Neutron Tap-as-a-service plugin."
+msgstr ""
+"A new ``OS::Neutron::TaaS::TapService`` resource is added to support a Tap "
+"Service in the Neutron Tap-as-a-service plugin."
+
+msgid ""
"A new ``OS::Neutron:AddressScope`` resource that helps in managing the "
"lifecycle of neutron address scope. Availability of this resource depends on "
"availability of neutron ``address-scope`` API extension. This resource can "
@@ -249,6 +324,28 @@ msgid "Add UDP to supported protocols for Octavia."
msgstr "Add UDP to supported protocols for Octavia."
msgid ""
+"Add ``ca_cert`` and ``insecure`` properties for ``OS::Heat::Stack`` resource "
+"type. The ``ca_cert`` is the contents of a CA Certificate file that can be "
+"used to verify a remote cloud or region's server certificate. ``insecure`` "
+"is boolean option, CA cert will be use if we didn't setup insecure flag."
+msgstr ""
+"Add ``ca_cert`` and ``insecure`` properties for ``OS::Heat::Stack`` resource "
+"type. The ``ca_cert`` is the contents of a CA Certificate file that can be "
+"used to verify a remote cloud or region's server certificate. ``insecure`` "
+"is boolean option, CA cert will be use if we didn't setup insecure flag."
+
+msgid ""
+"Add ``group`` property to ``OS::Heat::MultipartMime``. This allow you to set "
+"group for entire multipart cofig resource like ``group`` property in ``OS::"
+"Heat::SoftwareConfig``. Aware that, you must make sure all configs in "
+"MultipartMime works with ``group``. Default value is ``Heat::Ungrouped``."
+msgstr ""
+"Add ``group`` property to ``OS::Heat::MultipartMime``. This allow you to set "
+"group for entire multipart config resource like ``group`` property in ``OS::"
+"Heat::SoftwareConfig``. Aware that, you must make sure all configs in "
+"MultipartMime works with ``group``. Default value is ``Heat::Ungrouped``."
+
+msgid ""
"Add ``map_replace`` function, that takes 2 arguments an input map and a map "
"containing a ``keys`` and/or ``values`` map. key/value substitutions on the "
"input map are performed based on the mappings passed in ``keys`` and "
@@ -260,6 +357,13 @@ msgstr ""
"``values``."
msgid ""
+"Add ``tty`` property to ``OS::Zun::Container``. This property allows users "
+"to open the TTY of the container."
+msgstr ""
+"Add ``tty`` property to ``OS::Zun::Container``. This property allows users "
+"to open the TTY of the container."
+
+msgid ""
"Add ``yaql`` function, that takes 2 arguments ``expression`` of type string "
"and ``data`` of type map and evaluates ``expression`` on a given ``data``."
msgstr ""
@@ -305,6 +409,13 @@ msgstr ""
"template environment."
msgid ""
+"Add a new OS::Glance::WebImage resource supporting the web-download import "
+"of Glance v2."
+msgstr ""
+"Add a new OS::Glance::WebImage resource supporting the web-download import "
+"of Glance v2."
+
+msgid ""
"Add a new property ``networks`` to resource OS::Zun::Container. This "
"property is an ordered list of nics to be added to this container, with "
"information about connected networks, fixed ips, and port. This property can "
@@ -323,6 +434,19 @@ msgstr ""
"function can work with project resource."
msgid ""
+"Add multiple OpenStack orchestration support - User can now use ``OS::Heat::"
+"Stack`` to create stack in another OpenStack cloud. Must provide properties "
+"``credential_secret_id`` in ``context``. Remote stack resource will get "
+"authentication information from cloud credential to refresh context before "
+"calling stack create."
+msgstr ""
+"Add multiple OpenStack orchestration support - User can now use ``OS::Heat::"
+"Stack`` to create stack in another OpenStack cloud. Must provide properties "
+"``credential_secret_id`` in ``context``. Remote stack resource will get "
+"authentication information from cloud credential to refresh context before "
+"calling stack create."
+
+msgid ""
"Add new ``OS::Barbican::GenericContainer`` resource for storing arbitrary "
"barbican secrets."
msgstr ""
@@ -355,6 +479,31 @@ msgstr ""
"the existing Heat autoscaling behaviour, we manually create the Monasca "
"notification resource in Heat with a default interval value of 60."
+msgid "Add rbac_policy and subnetpool support for OS::Neutron::Quota resource."
+msgstr ""
+"Add rbac_policy and subnetpool support for OS::Neutron::Quota resource."
+
+msgid ""
+"Add support for ``OS::Octavia::Flavor`` and ``OS::Octavia::FlavorProfile`` "
+"resources and add ``flavor`` parameter in ``OS::Octavia::LoadBalancer``, "
+"allowing users to configure Load Balancer capabilities."
+msgstr ""
+"Add support for ``OS::Octavia::Flavor`` and ``OS::Octavia::FlavorProfile`` "
+"resources and add ``flavor`` parameter in ``OS::Octavia::LoadBalancer``, "
+"allowing users to configure Load Balancer capabilities."
+
+msgid "Add tags support for ProviderNet resource"
+msgstr "Add tags support for ProviderNet resource"
+
+msgid ""
+"Added ``dns_domain`` property to resource type ``OS::Neutron::ProviderNet``. "
+"This specifies the DNS domain to use when publishing DNS records for ports "
+"on this network."
+msgstr ""
+"Added ``dns_domain`` property to resource type ``OS::Neutron::ProviderNet``. "
+"This specifies the DNS domain to use when publishing DNS records for ports "
+"on this network."
+
msgid ""
"Added ``hostname``, ``hints``, ``security_groups``, and ``mounts`` "
"properties to Zun Container resources."
@@ -376,6 +525,17 @@ msgstr ""
"l2_adjacency property)::"
msgid ""
+"Added ``propagate_uplink_status`` property to resource type ``OS::Neutron::"
+"Port``. This resource depends on Neutron API extension ``uplink-status-"
+"propagation`` and the default is ``False``. If this property is set to "
+"``True``, the VF link state can follow that of PF."
+msgstr ""
+"Added ``propagate_uplink_status`` property to resource type ``OS::Neutron::"
+"Port``. This resource depends on Neutron API extension ``uplink-status-"
+"propagation`` and the default is ``False``. If this property is set to "
+"``True``, the VF link state can follow that of PF."
+
+msgid ""
"Added a new ``event-sinks`` element to the environment which allows "
"specifying a target where events from the stack are sent. It supports the "
"``zaqar-queue`` element for now."
@@ -385,6 +545,15 @@ msgstr ""
"``zaqar-queue`` element for now."
msgid ""
+"Added a new config option server_keystone_endpoint_type to specify the "
+"keystone authentication endpoint (public/internal/admin) to pass into cloud-"
+"init data. If left unset the original behavior should remain unchanged."
+msgstr ""
+"Added a new config option server_keystone_endpoint_type to specify the "
+"keystone authentication endpoint (public/internal/admin) to pass into cloud-"
+"init data. If left unset the original behavior should remain unchanged."
+
+msgid ""
"Added a new schema property tags, to parameters, to categorize parameters "
"based on features."
msgstr ""
@@ -399,6 +568,19 @@ msgstr ""
"and ``/stack/outputs/output_key``."
msgid ""
+"Added new config option ``[DEFAULT]allow_trusts_redelegation`` (``False`` by "
+"default). When enabled and ``reauthentication_auth_method`` is set to "
+"``trusts``, Heat will always create trusts with enabled redelegation, for "
+"both trusts used for long running stacks and for trusts used for deferred "
+"authentication."
+msgstr ""
+"Added new config option ``[DEFAULT]allow_trusts_redelegation`` (``False`` by "
+"default). When enabled and ``reauthentication_auth_method`` is set to "
+"``trusts``, Heat will always create trusts with enabled redelegation, for "
+"both trusts used for long running stacks and for trusts used for deferred "
+"authentication."
+
+msgid ""
"Added new functionality for showing and listing stack outputs without "
"resolving all outputs during stack initialisation."
msgstr ""
@@ -605,6 +787,13 @@ msgstr "Allow to set or update the tags for OS::Neutron::Subnet resource."
msgid "Allow to set or update the tags for OS::Neutron::SubnetPool resource."
msgstr "Allow to set or update the tags for OS::Neutron::SubnetPool resource."
+msgid ""
+"Behaviour has been adjusted to bring this in line with the CLI and GUI, in "
+"which it is optional."
+msgstr ""
+"Behaviour has been adjusted to bring this in line with the CLI and GUI, in "
+"which it is optional."
+
msgid "Bug Fixes"
msgstr "Bug Fixes"
@@ -636,6 +825,19 @@ msgid "Deprecation Notes"
msgstr "Deprecation Notes"
msgid ""
+"Designate project had removed v1 api support since stable/queens. Heat has "
+"now removed support for v1 resources ``OS::Designate::Domain`` and ``OS::"
+"Designate::Record`` completely and replaced them with placeholders for "
+"existing templates with those resources. The ``designate.domain`` custom "
+"constraint has also been removed."
+msgstr ""
+"Designate project had removed v1 api support since stable/queens. Heat has "
+"now removed support for v1 resources ``OS::Designate::Domain`` and ``OS::"
+"Designate::Record`` completely and replaced them with placeholders for "
+"existing templates with those resources. The ``designate.domain`` custom "
+"constraint has also been removed."
+
+msgid ""
"Designate v1 resource plugins OS::Designate::Domain and OS::Designate::"
"Record are deprecated."
msgstr ""
@@ -650,6 +852,29 @@ msgstr ""
"RecordSet are newly added."
msgid ""
+"Empty string passing in for volume availability_zone can be correctly "
+"handled now. For this case, it's same as no AZ set, so the default AZ in "
+"cinder.conf will be used."
+msgstr ""
+"Empty string passing in for volume availability_zone can be correctly "
+"handled now. For this case, it's same as no AZ set, so the default AZ in "
+"cinder.conf will be used."
+
+msgid ""
+"Erroneously, availability_zone for host aggregate resource types was "
+"considered mandatory in heat templates."
+msgstr ""
+"Erroneously, availability_zone for host aggregate resource types was "
+"considered mandatory in heat templates."
+
+msgid ""
+"For more information see http://lists.openstack.org/pipermail/openstack-"
+"discuss/2019-February/002925.html"
+msgstr ""
+"For more information see http://lists.openstack.org/pipermail/openstack-"
+"discuss/2019-February/002925.html"
+
+msgid ""
"Force delete the nova instance. If a resource is related with a nova "
"instance which is in 'SOFT_DELETED' status, the resource can't be deleted, "
"when nova config 'reclaim_instance_interval'. so, force-delete the nova "
@@ -666,6 +891,34 @@ msgid "Heat Release Notes"
msgstr "Heat Release Notes"
msgid ""
+"Heat can now perform a stack update to roll back to a previous version of a "
+"resource after a previous attempt to create a replacement for it failed "
+"(provided that convergence is enabled). This allows the user to recover a "
+"stack where a resource has been inadvertantly replaced with a definition "
+"than can never succeed because it conflicts with the original. Previously "
+"this required automatic rollback to be enabled, or the user had to update "
+"the stack with a non-conflicting definition before rolling back to the "
+"original."
+msgstr ""
+"Heat can now perform a stack update to roll back to a previous version of a "
+"resource after a previous attempt to create a replacement for it failed "
+"(provided that convergence is enabled). This allows the user to recover a "
+"stack where a resource has been inadvertantly replaced with a definition "
+"than can never succeed because it conflicts with the original. Previously "
+"this required automatic rollback to be enabled, or the user had to update "
+"the stack with a non-conflicting definition before rolling back to the "
+"original."
+
+msgid ""
+"Heat can now support software deployments with CoreOS by passing a CoreOS "
+"Ignition config in the ``user_data`` property for an ``OS::Nova::Server`` "
+"resource when the ``user_data_format`` is set to ``SOFTWARE_CONFIG``."
+msgstr ""
+"Heat can now support software deployments with CoreOS by passing a CoreOS "
+"Ignition config in the ``user_data`` property for an ``OS::Nova::Server`` "
+"resource when the ``user_data_format`` is set to ``SOFTWARE_CONFIG``."
+
+msgid ""
"Heat current bug/blueprint reports have migrated from Launchpad to "
"`storyboard`_. If you would like to create a new story (a bug or a "
"blueprint), please file it under the `Heat project`_. This change applies to "
@@ -760,6 +1013,22 @@ msgstr ""
"resources."
msgid ""
+"Introduce a Ironic client plugin module that will be used by the Ironic's "
+"resources. Support only ironicclient version >=2.8.0 to get allocation "
+"functionality support."
+msgstr ""
+"Introduce a Ironic client plugin module that will be used by the Ironic's "
+"resources. Support only ironicclient version >=2.8.0 to get allocation "
+"functionality support."
+
+msgid ""
+"Introduce a Vitrage client plugin module that will be used by the Vitrage "
+"resources."
+msgstr ""
+"Introduce a Vitrage client plugin module that will be used by the Vitrage "
+"resources."
+
+msgid ""
"Introduce a Zun client plugin module that will be used by the Zun's "
"resources that are under development."
msgstr ""
@@ -794,6 +1063,10 @@ msgstr ""
"deprecated, should use `OS::Magnum::ClusterTemplate` instead Deprecation "
"warnings are printed for old usages."
+msgid "Manila resources now use the 'sharev2' endpoint and API version '2.13'."
+msgstr ""
+"Manila resources now use the 'sharev2' endpoint and API version '2.13'."
+
msgid "Mitaka Series Release Notes"
msgstr "Mitaka Series Release Notes"
@@ -838,6 +1111,17 @@ msgstr ""
"particular."
msgid ""
+"New config ``max_nova_api_microversion`` to set the maximum nova API "
+"microversion for nova client plugin. If``max_nova_api_microversion`` is set, "
+"any nova features supported with microversion number above "
+"max_nova_api_microversion will not be available."
+msgstr ""
+"New config ``max_nova_api_microversion`` to set the maximum nova API "
+"microversion for nova client plugin. If``max_nova_api_microversion`` is set, "
+"any nova features supported with microversion number above "
+"max_nova_api_microversion will not be available."
+
+msgid ""
"New config section ``volumes`` with new config option "
"``[volumes]backups_enabled`` (defaults to ``True``). Operators that do not "
"have Cinder backup service deployed in their cloud are encouraged to set "
@@ -849,6 +1133,15 @@ msgstr ""
"this option to ``False``."
msgid ""
+"New document is out for ``multi-clouds support``, check out https://docs."
+"openstack.org/heat/latest/template_guide/multi-clouds.html for more "
+"information."
+msgstr ""
+"New document is out for ``multi-clouds support``, check out https://docs."
+"openstack.org/heat/latest/template_guide/multi-clouds.html for more "
+"information."
+
+msgid ""
"New framework for ``heat-status upgrade check`` command is added. This "
"framework allows adding various checks which can be run before a Heat "
"upgrade to ensure if the upgrade can be performed safely."
@@ -884,6 +1177,35 @@ msgstr ""
"These include gigabytes, snapshots, and volumes."
msgid ""
+"New resource ``OS::Neutron::ExtraRouteSet`` is added to manage extra routes "
+"of a Neutron router."
+msgstr ""
+"New resource ``OS::Neutron::ExtraRouteSet`` is added to manage extra routes "
+"of a Neutron router."
+
+msgid ""
+"New resource ``OS::Neutron::L2GatewayConnection`` to allow management of "
+"Neutron Layer2 Gateway Connection. This resource provides capability to "
+"connect a Neutron network to a Layer2 Gateway. The resource depends on the "
+"Neutron ``l2-gateway`` extension."
+msgstr ""
+"New resource ``OS::Neutron::L2GatewayConnection`` to allow management of "
+"Neutron Layer2 Gateway Connection. This resource provides capability to "
+"connect a Neutron network to a Layer2 Gateway. The resource depends on the "
+"Neutron ``l2-gateway`` extension."
+
+msgid ""
+"New resource ``OS::Neutron::L2Gateway`` to allow management of Neutron "
+"Layer2 Gateway. This resource provides life-cycle management of layer2 "
+"gateway instances. The resource depends on the Neutron ``l2-gateway`` "
+"extension."
+msgstr ""
+"New resource ``OS::Neutron::L2Gateway`` to allow management of Neutron "
+"Layer2 Gateway. This resource provides life-cycle management of layer2 "
+"gateway instances. The resource depends on the Neutron ``l2-gateway`` "
+"extension."
+
+msgid ""
"New resource ``OS::Neutron::LBaaS::HealthMonitor`` is added to create and "
"manage Health Monitors which watch status of the Load Balanced servers."
msgstr ""
@@ -920,6 +1242,17 @@ msgstr ""
"Pools which represent a group of nodes. Pools define the subnet where nodes "
"reside, the balancing algorithm, and the nodes themselves."
+msgid ""
+"New resource ``OS::Neutron::QoSMinimumBandwidthRule`` to support "
+"``minimum_bandwidth_rules`` in Neutron QoS. This resource depends on Neutron "
+"API extension ``qos-bw-minimum-ingress`` and according to the default policy "
+"it is admin-only."
+msgstr ""
+"New resource ``OS::Neutron::QoSMinimumBandwidthRule`` to support "
+"``minimum_bandwidth_rules`` in Neutron QoS. This resource depends on Neutron "
+"API extension ``qos-bw-minimum-ingress`` and according to the default policy "
+"it is admin-only."
+
msgid "New resource ``OS::Neutron::Quota`` is added to manage neutron quotas."
msgstr "New resource ``OS::Neutron::Quota`` is added to manage Neutron quotas."
@@ -983,6 +1316,13 @@ msgstr ""
"balancing algorithm, and the nodes themselves."
msgid ""
+"New resource ``OS::Octavia::Quota`` is added to enable an admin to manage "
+"Octavia service quotas for a specific project."
+msgstr ""
+"New resource ``OS::Octavia::Quota`` is added to enable an admin to manage "
+"Octavia service quotas for a specific project."
+
+msgid ""
"New resource ``OS::Senlin::Cluster`` is added to create a cluster in senlin. "
"A cluster is a group of homogeneous nodes."
msgstr ""
@@ -1022,6 +1362,13 @@ msgstr ""
"sources."
msgid ""
+"New resource type ``OS::Ironic::Port`` is now supported in orchestration "
+"service."
+msgstr ""
+"New resource type ``OS::Ironic::Port`` is now supported in orchestration "
+"service."
+
+msgid ""
"New resources for Neutron Load Balancer version 2. These are unique for "
"version 2 and do not support or mix with existing version 1 resources."
msgstr ""
@@ -1032,6 +1379,17 @@ msgid "Newton Series Release Notes"
msgstr "Newton Series Release Notes"
msgid ""
+"Non-ASCII text that appears in parameter constraints (e.g. in the "
+"description of a constraint, or a list of allowed values) will now be "
+"handled correctly when generating error messages if the constraint is not "
+"met."
+msgstr ""
+"Non-ASCII text that appears in parameter constraints (e.g. in the "
+"description of a constraint, or a list of allowed values) will now be "
+"handled correctly when generating error messages if the constraint is not "
+"met."
+
+msgid ""
"Note that Heat is compatible with OpenStack Identity federation, even when "
"using Keystone trusts. It should work after you enable Federation and build "
"the `auto-provisioning map`_ with the heat service user in Keystone. Auto-"
@@ -1043,6 +1401,15 @@ msgstr ""
"provisioning has been available in Keystone since the Ocata release."
msgid ""
+"Nova has removed api extension support and its api bindings. Heat has now "
+"removed support for extensions from nova client plugin and the resource "
+"plugins using it."
+msgstr ""
+"Nova has removed API extension support and its API bindings. Heat has now "
+"removed support for extensions from Nova client plugin and the resource "
+"plugins using it."
+
+msgid ""
"Now heat keystone user name charaters limit increased from 64 to 255. Any "
"extra charaters will lost when truncate the name to the last 255 charaters."
msgstr ""
@@ -1059,6 +1426,13 @@ msgstr ""
"in Newton release."
msgid ""
+"OS::Aodh::LBMemberHealthAlarm resource plugin is added to manage Aodh "
+"loadbalancer_member_health alarm."
+msgstr ""
+"OS::Aodh::LBMemberHealthAlarm resource plugin is added to manage Aodh "
+"loadbalancer_member_health alarm."
+
+msgid ""
"OS::Cinder::QoSAssociation resource plugin is added to support cinder QoS "
"Specs Association with Volume Types, which is provided by cinder ``qos-"
"specs`` API extension."
@@ -1105,6 +1479,13 @@ msgstr ""
"template feature, which is provided by Magnum ``clustertemplates`` API."
msgid ""
+"OS::Manila::Share now properly supports 'cephx' as a value for property "
+"'{\"access_rules\": [{\"access_type\": \"\"}]}'."
+msgstr ""
+"OS::Manila::Share now properly supports 'cephx' as a value for property "
+"'{\"access_rules\": [{\"access_type\": \"\"}]}'."
+
+msgid ""
"OS::Monasca::AlarmDefinition and OS::Monasca::Notification resource plug-ins "
"are now supported by heat community as monasca became offcial OpenStack "
"project."
@@ -1166,12 +1547,44 @@ msgid "Ocata Series Release Notes"
msgstr "Ocata Series Release Notes"
msgid ""
+"On clouds where Keystone usernames are `case-insensitive <https://docs."
+"openstack.org/keystone/latest/admin/case-insensitive.html>`_, Heat will now "
+"allow usernames with any case as property and parameter values where a "
+"Keystone user is expected (i.e. a ``keystone.user`` custom constraint "
+"applies). Previously the case had to match the case with which the name was "
+"stored in Keystone, even if Keystone itself was case-insensitive."
+msgstr ""
+"On clouds where Keystone usernames are `case-insensitive <https://docs."
+"openstack.org/keystone/latest/admin/case-insensitive.html>`_, Heat will now "
+"allow usernames with any case as property and parameter values where a "
+"Keystone user is expected (i.e. a ``keystone.user`` custom constraint "
+"applies). Previously the case had to match the case with which the name was "
+"stored in Keystone, even if Keystone itself was case-insensitive."
+
+msgid ""
"Operator can now use new CLI tool ``heat-status upgrade check`` to check if "
"Heat deployment can be safely upgraded from N-1 to N release."
msgstr ""
"Operator can now use new CLI tool ``heat-status upgrade check`` to check if "
"Heat deployment can be safely upgraded from N-1 to N release."
+msgid ""
+"Operators can now apply different authorization policies to each action "
+"supported by the action API (``actions:suspend`` for suspend, ``actions:"
+"resume`` for resume, ``actions:check`` for check, ``actions:cancel_update`` "
+"for cancel operation and roll back, and ``actions:cancel_without_rollback`` "
+"for cancel operation without rolling back). The default for each is to use "
+"the existing ``actions:action`` rule that was previously the only way to "
+"specify policy for actions."
+msgstr ""
+"Operators can now apply different authorisation policies to each action "
+"supported by the action API (``actions:suspend`` for suspend, ``actions:"
+"resume`` for resume, ``actions:check`` for check, ``actions:cancel_update`` "
+"for cancel operation and roll back, and ``actions:cancel_without_rollback`` "
+"for cancel operation without rolling back). The default for each is to use "
+"the existing ``actions:action`` rule that was previously the only way to "
+"specify policy for actions."
+
msgid "Other Notes"
msgstr "Other Notes"
@@ -1268,6 +1681,20 @@ msgstr ""
"the delete API call returns, so any subsequent polling will reflect up-to-"
"date information."
+msgid ""
+"Properties of the VPNaaS ``OS::Neutron::IKEPolicy`` resource can now be "
+"updated in place."
+msgstr ""
+"Properties of the VPNaaS ``OS::Neutron::IKEPolicy`` resource can now be "
+"updated in place."
+
+msgid ""
+"Python 2 is no longer supported. This release runs only on Python 3 and is "
+"tested only on Python 3.6 and 3.7."
+msgstr ""
+"Python 2 is no longer supported. This release runs only on Python 3 and is "
+"tested only on Python 3.6 and 3.7."
+
msgid "Queens Series Release Notes"
msgstr "Queens Series Release Notes"
@@ -1374,10 +1801,53 @@ msgstr ""
msgid "Start using reno to manage release notes."
msgstr "Start using Reno to manage release notes."
+msgid "Stein Series Release Notes"
+msgstr "Stein Series Release Notes"
+
+msgid ""
+"Support ``allowed_cidrs`` property for the resource ``OS::Octavia::"
+"Listener``, the property is allowed to be updated as well. The property "
+"'allowed_cidrs' was introduced in Octavia since Train release. The default "
+"value is empty list if it is not specified in Heat template."
+msgstr ""
+"Support ``allowed_cidrs`` property for the resource ``OS::Octavia::"
+"Listener``, the property is allowed to be updated as well. The property "
+"'allowed_cidrs' was introduced in Octavia since Train release. The default "
+"value is empty list if it is not specified in Heat template."
+
+msgid ""
+"Support ``tags`` property for the resource ``OS::Octavia::PoolMember``, the "
+"property is allowed to be updated as well. The resource tag was introduced "
+"in Octavia since Stein release, do not specify tags in Heat template if you "
+"are using the previous versions."
+msgstr ""
+"Support ``tags`` property for the resource ``OS::Octavia::PoolMember``, the "
+"property is allowed to be updated as well. The resource tag was introduced "
+"in Octavia since Stein release, do not specify tags in Heat template if you "
+"are using the previous versions."
+
+msgid ""
+"Support ``tls_enabled`` property for the resource ``OS::Octavia::Pool``, the "
+"property is allowed to be updated as well. The property 'tls_enabled' was "
+"introduced in Octavia since Stein release. The default value is False if it "
+"is not specified in Heat template."
+msgstr ""
+"Support ``tls_enabled`` property for the resource ``OS::Octavia::Pool``, the "
+"property is allowed to be updated as well. The property 'tls_enabled' was "
+"introduced in Octavia since Stein release. The default value is False if it "
+"is not specified in Heat template."
+
msgid "Support external resource reference in template."
msgstr "Support external resource reference in template."
msgid ""
+"Support shared services in multi region mode. The services are declared in a "
+"list in config. shared_services_types=image, volume, volumev2."
+msgstr ""
+"Support shared services in multi region mode. The services are declared in a "
+"list in config. shared_services_types=image, volume, volumev2."
+
+msgid ""
"Support to managing rbac policy for 'qos_policy' resource, which allows to "
"share Neutron qos policy to subsets of tenants."
msgstr ""
@@ -1401,6 +1871,17 @@ msgstr ""
"this requires Nova api version equal or greater than 2.8."
msgid ""
+"Supports user, group, role and project lookup across domains. Added domain "
+"parameter to keystone lookup functions. Heat templates now support "
+"user{domain}, group{domain}, role{domain} and project{domain} to support "
+"cross domain lookup. Keystone constrains will also work across domain."
+msgstr ""
+"Supports user, group, role and project lookup across domains. Added domain "
+"parameter to keystone lookup functions. Heat templates now support "
+"user{domain}, group{domain}, role{domain} and project{domain} to support "
+"cross domain lookup. Keystone constrains will also work across domain."
+
+msgid ""
"Template validation is improved to ignore the given set of error codes. For "
"example, heat will report template as invalid one, if it does not find any "
"required OpenStack services in the cloud deployment and while authoring the "
@@ -1416,6 +1897,15 @@ msgstr ""
"refer the API documentation of validate template for more details."
msgid ""
+"Templates that creates roles but does not specify the domain will not get a "
+"\"default\" domain from now on. To have a domain added to your new role it "
+"needs to be assigned in the template."
+msgstr ""
+"Templates that creates roles but does not specify the domain will not get a "
+"\"default\" domain from now on. To have a domain added to your new role it "
+"needs to be assigned in the template."
+
+msgid ""
"The 'attachments' attribute of OS::Cinder::Volume has been deprecated in "
"favor of 'attachments_list', which has the correct type of LIST. This makes "
"this data easier for end users to process."
@@ -1492,6 +1982,32 @@ msgstr ""
"instead."
msgid ""
+"The ``OS::Designate::Zone`` resource type's ``masters`` property is now "
+"known as ``primaries``. Existing templates will continue to work."
+msgstr ""
+"The ``OS::Designate::Zone`` resource type's ``masters`` property is now "
+"known as ``primaries``. Existing templates will continue to work."
+
+msgid ""
+"The ``OS::Glance::WebImage`` resource type now supports a ``members`` "
+"property for managing a list of other tenants with access to the Image."
+msgstr ""
+"The ``OS::Glance::WebImage`` resource type now supports a ``members`` "
+"property for managing a list of other tenants with access to the Image."
+
+msgid ""
+"The ``OS::Glance::WebImage`` resource type now supports an ``active`` "
+"property to allow administrators to deactivate and reactivate the Image. "
+"Images remain active by default."
+msgstr ""
+"The ``OS::Glance::WebImage`` resource type now supports an ``active`` "
+"property to allow administrators to deactivate and reactivate the Image. "
+"Images remain active by default."
+
+msgid "The ``OS::Heat::Delay`` resource type is now usable."
+msgstr "The ``OS::Heat::Delay`` resource type is now usable."
+
+msgid ""
"The ``OS::Heat::HARestarter`` resource type is no longer supported. This "
"resource type is now hidden from the documentation. HARestarter resources in "
"stacks, including pre-existing ones, are now only placeholders and will no "
@@ -1513,6 +2029,38 @@ msgstr ""
"heat-templates/tree/hot/autohealing"
msgid ""
+"The ``OS::Neutron::QosBandwidthLimitRule`` resource type now supports an "
+"optional ``direction`` property, allowing users to set the ingress bandwidth "
+"limit in a QoS rule. Previously only the egress bandwidth limit could be set."
+msgstr ""
+"The ``OS::Neutron::QosBandwidthLimitRule`` resource type now supports an "
+"optional ``direction`` property, allowing users to set the ingress bandwidth "
+"limit in a QoS rule. Previously only the egress bandwidth limit could be set."
+
+msgid ""
+"The ``OS::Trove::Cluster`` resource type now supports specifying an "
+"availability zone."
+msgstr ""
+"The ``OS::Trove::Cluster`` resource type now supports specifying an "
+"availability zone."
+
+msgid ""
+"The ``accessIPv4`` and ``accessIPv6`` attributes of the ``OS::Nova::Server`` "
+"resource are now deprecated, since Nova returns empty values for them. Use "
+"the ``addresses`` attribute instead to get IP addresses."
+msgstr ""
+"The ``accessIPv4`` and ``accessIPv6`` attributes of the ``OS::Nova::Server`` "
+"resource are now deprecated, since Nova returns empty values for them. Use "
+"the ``addresses`` attribute instead to get IP addresses."
+
+msgid ""
+"The ``lb_algorithm`` property of ``OS::Octavia::Pool`` resource now supports "
+"SOURCE_IP_PORT option required for Octavia OVN provider driver."
+msgstr ""
+"The ``lb_algorithm`` property of ``OS::Octavia::Pool`` resource now supports "
+"SOURCE_IP_PORT option required for Octavia OVN provider driver."
+
+msgid ""
"The ``resource mark unhealthy`` command now accepts either a logical "
"resource name (as it did previously) or a physical resource ID to identify "
"the resource to be marked unhealthy."
@@ -1522,6 +2070,39 @@ msgstr ""
"the resource to be marked unhealthy."
msgid ""
+"The ``wallaby`` template version introduces a new 2-argument form of the "
+"``if`` function. This allows users to specify optional property values, so "
+"that when the condition is false Heat treats it the same as if no value were "
+"specified for the property at all. The behaviour of existing templates is "
+"unchanged, even after updating the template version to ``wallaby``."
+msgstr ""
+"The ``wallaby`` template version introduces a new 2-argument form of the "
+"``if`` function. This allows users to specify optional property values, so "
+"that when the condition is false Heat treats it the same as if no value were "
+"specified for the property at all. The behaviour of existing templates is "
+"unchanged, even after updating the template version to ``wallaby``."
+
+msgid ""
+"The `firewall_rules` property of the `OS::Neutron::FirewallPolicy` resource "
+"type is now optional."
+msgstr ""
+"The `firewall_rules` property of the `OS::Neutron::FirewallPolicy` resource "
+"type is now optional."
+
+msgid ""
+"The behavior of ``get_resource`` on an ``OS::Heat::AutoScalingGroup`` "
+"resource has changed. Previously it returned the physical resource name (i."
+"e. the name of the nested Heat stack which implemented the group). It will "
+"now return the UUID of the nested stack if available. This will also apply "
+"to any resource type that inherits from ``OS::Heat::AutoScalingGroup``."
+msgstr ""
+"The behavior of ``get_resource`` on an ``OS::Heat::AutoScalingGroup`` "
+"resource has changed. Previously it returned the physical resource name (i."
+"e. the name of the nested Heat stack which implemented the group). It will "
+"now return the UUID of the nested stack if available. This will also apply "
+"to any resource type that inherits from ``OS::Heat::AutoScalingGroup``."
+
+msgid ""
"The ceilometer client plugin is no longer provided, due to the Ceilometer "
"API no longer being available from Queens and the python-ceilometerclient "
"library being unmaintained."
@@ -1596,6 +2177,34 @@ msgstr ""
"repository."
msgid ""
+"The ordering in the list of segments returned by ``OS::Neutron::Net`` "
+"resources is not predictable. Stack updates changeing attributes of the "
+"network can cause the list of segments to shift."
+msgstr ""
+"The ordering in the list of segments returned by ``OS::Neutron::Net`` "
+"resources is not predictable. Stack updates changing attributes of the "
+"network can cause the list of segments to shift."
+
+msgid ""
+"The ordering is now slightly more predictable, segments with name=``None`` "
+"are now placed first in the list. This doesn't guarantee the order, but "
+"typically only the segment implicitly created by neutron has no name "
+"attribute set. The template author should ensure other segments on the "
+"network does have a name set, so that the implicit segment will always be "
+"index 0. Resolving attributes of the implcitly created segment on the "
+"network resource can then predictibly happen using index 0. See `bug: "
+"1894920 <https://bugs.launchpad.net/tripleo/+bug/1894920>`_."
+msgstr ""
+"The ordering is now slightly more predictable, segments with name=``None`` "
+"are now placed first in the list. This doesn't guarantee the order, but "
+"typically only the segment implicitly created by neutron has no name "
+"attribute set. The template author should ensure other segments on the "
+"network does have a name set, so that the implicit segment will always be "
+"index 0. Resolving attributes of the implicitly created segment on the "
+"network resource can then predictably happen using index 0. See `bug: "
+"1894920 <https://bugs.launchpad.net/tripleo/+bug/1894920>`_."
+
+msgid ""
"The template validate API call now returns the Environment calculated by "
"heat - this enables preview of the merged environment when using "
"parameter_merge_strategy prior to creating the stack"
@@ -1604,6 +2213,30 @@ msgstr ""
"heat - this enables preview of the merged environment when using "
"parameter_merge_strategy prior to creating the stack"
+msgid ""
+"There was a mismatch between the way heat create role behaved with the "
+"templates or with the openstack CLI on what relates to the default domain if "
+"the domain is not specified on both cases the CLI will not assign on to the "
+"created new role but the heat templates will assign the \"default\" domain"
+msgstr ""
+"There was a mismatch between the way heat create role behaved with the "
+"templates or with the OpenStack CLI on what relates to the default domain if "
+"the domain is not specified on both cases the CLI will not assign on to the "
+"created new role but the heat templates will assign the \"default\" domain"
+
+msgid ""
+"This feature allows the deployer to unambiguously specify the keystone "
+"endpoint passed to user provisioned servers, and is particularly useful "
+"where the deployment network architecture requires the heat service to "
+"interact with the internal endpoint, but user provisioned servers only have "
+"access to the external network."
+msgstr ""
+"This feature allows the deployer to unambiguously specify the keystone "
+"endpoint passed to user provisioned servers, and is particularly useful "
+"where the deployment network architecture requires the heat service to "
+"interact with the internal endpoint, but user provisioned servers only have "
+"access to the external network."
+
msgid "This feature only supports templates with version over `2016-10-14`."
msgstr "This feature only supports templates with version over `2016-10-14`."
@@ -1616,6 +2249,9 @@ msgstr ""
"Ocata. Please use ``OS::Aodh::GnocchiAggregationByResourcesAlarm`` in place "
"of ``OS::Aodh::Alarm``."
+msgid "Train Series Release Notes"
+msgstr "Train Series Release Notes"
+
msgid ""
"Two new policies soft-affinity and soft-anti-affinity have been supported "
"for the OS::Nova::ServerGroup resource."
@@ -1623,9 +2259,42 @@ msgstr ""
"Two new policies soft-affinity and soft-anti-affinity have been supported "
"for the OS::Nova::ServerGroup resource."
+msgid ""
+"Unsupported contrib resource ``OS::Neutron::ExtraRoute`` is deprecated in "
+"favor of ``OS::Neutron::ExtraRouteSet`` on all OpenStack clouds where "
+"Neutron extension ``extraroute-atomic`` is available."
+msgstr ""
+"Unsupported contrib resource ``OS::Neutron::ExtraRoute`` is deprecated in "
+"favor of ``OS::Neutron::ExtraRouteSet`` on all OpenStack clouds where "
+"Neutron extension ``extraroute-atomic`` is available."
+
msgid "Upgrade Notes"
msgstr "Upgrade Notes"
+msgid "Ussuri Series Release Notes"
+msgstr "Ussuri Series Release Notes"
+
+msgid "Victoria Series Release Notes"
+msgstr "Victoria Series Release Notes"
+
+msgid ""
+"We have change some log pathes as below * Migrate ``heat.engine.clients."
+"keystoneclient`` to ``heat.engine.clients.os.keystone.heat_keystoneclient`` "
+"* remove ``heat.all`` * remove ``heat.api`` * remove ``heat.api.cfn`` * "
+"remove ``heat.engine``"
+msgstr ""
+"We have change some log pathes as below * Migrate ``heat.engine.clients."
+"keystoneclient`` to ``heat.engine.clients.os.keystone.heat_keystoneclient`` "
+"* remove ``heat.all`` * remove ``heat.api`` * remove ``heat.api.cfn`` * "
+"remove ``heat.engine``"
+
+msgid ""
+"We now allowed global admins to operate software deployment and software "
+"config resources from other projects."
+msgstr ""
+"We now allowed global admins to operate software deployment and software "
+"config resources from other projects."
+
msgid ""
"When 'nested_depth' is set the response also includes an extra entry in the "
"'links' list with 'rel' set to 'root_stack'. This can be used by client side "
@@ -1638,11 +2307,77 @@ msgstr ""
"side recursive event fetching."
msgid ""
+"When loading a Resource plugin, the attribute schema is now validated in the "
+"same way that the properties schema is. Third-party resource plugins should "
+"be tested to check that they still comply."
+msgstr ""
+"When loading a Resource plugin, the attribute schema is now validated in the "
+"same way that the properties schema is. Third-party resource plugins should "
+"be tested to check that they still comply."
+
+msgid ""
+"With both ``reauthentication_auth_method`` set to ``trusts`` and "
+"``allow_trusts_redelegation`` set to ``True`` (new config option, ``False`` "
+"by default), Heat will always create trusts with enabled redelegation, for "
+"both trusts used for long running stacks and for trusts used for deferred "
+"authentication. This have security implications and is only recommended when "
+"Heat is set to use trust and you experience problems with other services "
+"Heat consumes that also require to create trusts from token being passed by "
+"Heat (examples are Aodh and Heat running in another region)."
+msgstr ""
+"With both ``reauthentication_auth_method`` set to ``trusts`` and "
+"``allow_trusts_redelegation`` set to ``True`` (new config option, ``False`` "
+"by default), Heat will always create trusts with enabled redelegation, for "
+"both trusts used for long running stacks and for trusts used for deferred "
+"authentication. This have security implications and is only recommended when "
+"Heat is set to use trust and you experience problems with other services "
+"Heat consumes that also require to create trusts from token being passed by "
+"Heat (examples are Aodh and Heat running in another region)."
+
+msgid ""
+"``OS::Neutron::Port`` resources will now be replaced when the "
+"``mac_address`` property is modified. Neutron is unable to update the MAC "
+"address of a port once the port is in use."
+msgstr ""
+"``OS::Neutron::Port`` resources will now be replaced when the "
+"``mac_address`` property is modified. Neutron is unable to update the MAC "
+"address of a port once the port is in use."
+
+msgid ""
+"``multiattach``` property in ``OS::Cinder::Volume`` is now hidden. Please "
+"use ``multiattach`` key in ``metadata`` property of ``OS::Cinder::"
+"VolumeType`` instead."
+msgstr ""
+"``multiattach``` property in ``OS::Cinder::Volume`` is now hidden. Please "
+"use ``multiattach`` key in ``metadata`` property of ``OS::Cinder::"
+"VolumeType`` instead."
+
+msgid ""
+"``personality`` property of ``OS::Nova::Server`` is now deprecated, please "
+"use ``user_data`` or ``metadata`` instead. If that property really required, "
+"use config ``max_nova_api_microversion`` to set the maximum nova API "
+"microversion <2.57 for nova client plugin to support personality property."
+msgstr ""
+"``personality`` property of ``OS::Nova::Server`` is now deprecated, please "
+"use ``user_data`` or ``metadata`` instead. If that property really required, "
+"use config ``max_nova_api_microversion`` to set the maximum nova API "
+"microversion <2.57 for Nova client plugin to support personality property."
+
+msgid ""
"cinder.qos_specs constraint added to support to validate QoS Specs attribute."
msgstr ""
"cinder.qos_specs constraint added to support to validate QoS Specs attribute."
msgid ""
+"file injection is deprecated in compute api. Deprecating injected_files, "
+"injected_file_content_bites, and injected_file_path_bytes properties "
+"accordingly in OS::Nova::Quota resource."
+msgstr ""
+"file injection is deprecated in compute API. Deprecating injected_files, "
+"injected_file_content_bites, and injected_file_path_bytes properties "
+"accordingly in OS::Nova::Quota resource."
+
+msgid ""
"nova-network is no longer supported in OpenStack. Please use OS::Neutron::"
"FloatingIPAssociation and OS::Neutron::FloatingIP in place of OS::Nova::"
"FloatingIPAssociation and OS::Nova::FloatingIP"
diff --git a/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po
index d035fc5a1..549ff2616 100644
--- a/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po
@@ -1,19 +1,75 @@
# minwook-shin <minwook0106@gmail.com>, 2017. #zanata
+# Hongjae Kim <neo415ha@gmail.com>, 2019. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: heat\n"
+"Project-Id-Version: openstack-heat\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2018-09-10 05:09+0000\n"
+"POT-Creation-Date: 2019-12-20 05:35+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2017-08-05 01:49+0000\n"
-"Last-Translator: minwook-shin <minwook0106@gmail.com>\n"
+"PO-Revision-Date: 2019-11-02 08:14+0000\n"
+"Last-Translator: Hongjae Kim <neo415ha@gmail.com>\n"
"Language-Team: Korean (South Korea)\n"
"Language: ko_KR\n"
"X-Generator: Zanata 4.3.3\n"
"Plural-Forms: nplurals=1; plural=0\n"
+msgid ""
+"'CEPHFS' can be used as a share protocol when using OS::Manila::Share "
+"resource."
+msgstr ""
+"OS 사용 시 'CEPHFS'를 공유 프로토콜로 사용할 수 있음:Manila::리소스를 공유하"
+"십시오."
+
+msgid "10.0.0"
+msgstr "10.0.0"
+
+msgid "10.0.0.0b1"
+msgstr "10.0.0.0b1"
+
+msgid "10.0.0.0b2"
+msgstr "10.0.0.0b2"
+
+msgid "10.0.0.0b3"
+msgstr "10.0.0.0b3"
+
+msgid "10.0.0.0rc1"
+msgstr "10.0.0.0rc1"
+
+msgid "10.0.1"
+msgstr "10.0.1"
+
+msgid "10.0.2"
+msgstr "10.0.2"
+
+msgid "11.0.0"
+msgstr "11.0.0"
+
+msgid "11.0.0.0b1"
+msgstr "11.0.0.0b1"
+
+msgid "11.0.0.0b3"
+msgstr "11.0.0.0b3"
+
+msgid "11.0.0.0rc1"
+msgstr "11.0.0.0rc1"
+
+msgid "11.0.2"
+msgstr "11.0.2"
+
+msgid "12.0.0"
+msgstr "12.0.0"
+
+msgid "12.0.0.0rc1"
+msgstr "12.0.0.0rc1"
+
+msgid "13.0.0"
+msgstr "13.0.0"
+
+msgid "13.0.0.0rc1"
+msgstr "13.0.0.0rc1"
+
msgid "5.0.1"
msgstr "5.0.1"
@@ -23,6 +79,15 @@ msgstr "6.0.0"
msgid "7.0.0"
msgstr "7.0.0"
+msgid "7.0.0.0b1"
+msgstr "7.0.0.0b1"
+
+msgid "7.0.0.0b2"
+msgstr "7.0.0.0b2"
+
+msgid "7.0.0.0b3"
+msgstr "7.0.0.0b3"
+
msgid "8.0.0"
msgstr "8.0.0"
diff --git a/releasenotes/source/train.rst b/releasenotes/source/train.rst
new file mode 100644
index 000000000..583900393
--- /dev/null
+++ b/releasenotes/source/train.rst
@@ -0,0 +1,6 @@
+==========================
+Train Series Release Notes
+==========================
+
+.. release-notes::
+ :branch: stable/train
diff --git a/releasenotes/source/ussuri.rst b/releasenotes/source/ussuri.rst
new file mode 100644
index 000000000..e21e50e0c
--- /dev/null
+++ b/releasenotes/source/ussuri.rst
@@ -0,0 +1,6 @@
+===========================
+Ussuri Series Release Notes
+===========================
+
+.. release-notes::
+ :branch: stable/ussuri
diff --git a/releasenotes/source/victoria.rst b/releasenotes/source/victoria.rst
new file mode 100644
index 000000000..4efc7b6f3
--- /dev/null
+++ b/releasenotes/source/victoria.rst
@@ -0,0 +1,6 @@
+=============================
+Victoria Series Release Notes
+=============================
+
+.. release-notes::
+ :branch: stable/victoria
diff --git a/requirements.txt b/requirements.txt
index 27e5e2bd8..46ef80646 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,64 +2,68 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr!=2.1.0,>=2.0.0 # Apache-2.0
+pbr>=3.1.1 # Apache-2.0
Babel!=2.4.0,>=2.3.4 # BSD
+ddt>=1.4.1 # MIT
croniter>=0.3.4 # MIT License
-cryptography>=2.1 # BSD/Apache-2.0
-eventlet!=0.18.3,!=0.20.1,>=0.18.2 # MIT
-keystoneauth1>=3.4.0 # Apache-2.0
-keystonemiddleware>=4.17.0 # Apache-2.0
-lxml!=3.7.0,>=3.4.1 # BSD
+cryptography>=2.5 # BSD/Apache-2.0
+debtcollector>=1.19.0 # Apache-2.0
+eventlet!=0.18.3,!=0.20.1,!=0.21.0,!=0.23.0,!=0.25.0,>=0.18.2 # MIT
+keystoneauth1>=3.18.0 # Apache-2.0
+keystonemiddleware>=5.1.0 # Apache-2.0
+lxml!=3.7.0,>=4.5.0 # BSD
netaddr>=0.7.18 # BSD
neutron-lib>=1.14.0 # Apache-2.0
openstacksdk>=0.11.2 # Apache-2.0
oslo.cache>=1.26.0 # Apache-2.0
-oslo.config>=5.2.0 # Apache-2.0
+oslo.config>=6.0.0 # Apache-2.0
oslo.concurrency>=3.26.0 # Apache-2.0
-oslo.context>=2.19.2 # Apache-2.0
-oslo.db>=4.27.0 # Apache-2.0
-oslo.i18n>=3.15.3 # Apache-2.0
-oslo.log>=3.36.0 # Apache-2.0
+oslo.context>=2.22.0 # Apache-2.0
+oslo.db>=6.0.0 # Apache-2.0
+oslo.i18n>=3.20.0 # Apache-2.0
+oslo.log>=4.3.0 # Apache-2.0
oslo.messaging>=5.29.0 # Apache-2.0
oslo.middleware>=3.31.0 # Apache-2.0
-oslo.policy>=1.30.0 # Apache-2.0
+oslo.policy>=3.7.0 # Apache-2.0
oslo.reports>=1.18.0 # Apache-2.0
-oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
+oslo.serialization>=2.25.0 # Apache-2.0
oslo.service!=1.28.1,>=1.24.0 # Apache-2.0
oslo.upgradecheck>=0.1.0 # Apache-2.0
-oslo.utils>=3.37.0 # Apache-2.0
+oslo.utils>=3.40.0 # Apache-2.0
osprofiler>=1.4.0 # Apache-2.0
oslo.versionedobjects>=1.31.2 # Apache-2.0
PasteDeploy>=1.5.0 # MIT
aodhclient>=0.9.0 # Apache-2.0
python-barbicanclient>=4.5.2 # Apache-2.0
python-blazarclient>=1.0.1 # Apache-2.0
+python-ceilometerclient>=2.5.0
python-cinderclient>=3.3.0 # Apache-2.0
python-designateclient>=2.7.0 # Apache-2.0
python-glanceclient>=2.8.0 # Apache-2.0
python-heatclient>=1.10.0 # Apache-2.0
+python-ironicclient>=2.8.0 # Apache-2.0
python-keystoneclient>=3.8.0 # Apache-2.0
python-magnumclient>=2.3.0 # Apache-2.0
python-manilaclient>=1.16.0 # Apache-2.0
python-mistralclient!=3.2.0,>=3.1.0 # Apache-2.0
python-monascaclient>=1.12.0 # Apache-2.0
-python-neutronclient>=6.7.0 # Apache-2.0
+python-neutronclient>=6.14.0 # Apache-2.0
python-novaclient>=9.1.0 # Apache-2.0
-python-octaviaclient>=1.3.0 # Apache-2.0
+python-octaviaclient>=1.8.0 # Apache-2.0
python-openstackclient>=3.12.0 # Apache-2.0
python-saharaclient>=1.4.0 # Apache-2.0
python-swiftclient>=3.2.0 # Apache-2.0
python-troveclient>=2.2.0 # Apache-2.0
+python-vitrageclient>=2.7.0 # Apache-2.0
python-zaqarclient>=1.3.0 # Apache-2.0
-python-zunclient>=2.0.0 # Apache-2.0
+python-zunclient>=3.4.0 # Apache-2.0
pytz>=2013.6 # MIT
-PyYAML>=3.12 # MIT
+PyYAML>=5.1 # MIT
requests>=2.14.2 # Apache-2.0
-tenacity>=4.4.0 # Apache-2.0
+tenacity>=6.1.0 # Apache-2.0
Routes>=2.3.1 # MIT
-six>=1.10.0 # MIT
SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT
-sqlalchemy-migrate>=0.11.0 # Apache-2.0
-stevedore>=1.20.0 # Apache-2.0
+sqlalchemy-migrate>=0.13.0 # Apache-2.0
+stevedore>=3.1.0 # Apache-2.0
WebOb>=1.7.1 # MIT
yaql>=1.1.3 # Apache 2.0 License
diff --git a/roles/run-heat-tests/defaults/main.yaml b/roles/run-heat-tests/defaults/main.yaml
new file mode 100644
index 000000000..b601d49d9
--- /dev/null
+++ b/roles/run-heat-tests/defaults/main.yaml
@@ -0,0 +1,2 @@
+devstack_base_dir: /opt/stack
+tempest_test_timeout: ''
diff --git a/roles/run-heat-tests/tasks/main.yaml b/roles/run-heat-tests/tasks/main.yaml
new file mode 100644
index 000000000..75122f2a1
--- /dev/null
+++ b/roles/run-heat-tests/tasks/main.yaml
@@ -0,0 +1,9 @@
+- name: Run heat tests
+ command: tox -evenv-tempest -- stestr --test-path={{devstack_base_dir}}/heat/heat_integrationtests \
+ --top-dir={{devstack_base_dir}}/heat \
+ --group_regex='heat_tempest_plugin\.tests\.api\.test_heat_api[._]([^_]+)' run
+ args:
+ chdir: "{{devstack_base_dir}}/tempest"
+ become: true
+ become_user: tempest
+ environment: '{{ {"OS_TEST_TIMEOUT": tempest_test_timeout} if tempest_test_timeout else {} }}'
diff --git a/setup.cfg b/setup.cfg
index 0568c8e62..f66ec65dc 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,6 +6,7 @@ description-file =
author = OpenStack
author-email = openstack-discuss@lists.openstack.org
home-page = https://docs.openstack.org/heat/latest/
+python-requires = >=3.6
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
@@ -13,11 +14,10 @@ classifier =
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
- Programming Language :: Python :: 2
- Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3.8
[files]
data_files =
@@ -75,6 +75,7 @@ heat.clients =
designate = heat.engine.clients.os.designate:DesignateClientPlugin
glance = heat.engine.clients.os.glance:GlanceClientPlugin
heat = heat.engine.clients.os.heat_plugin:HeatClientPlugin
+ ironic = heat.engine.clients.os.ironic:IronicClientPlugin
keystone = heat.engine.clients.os.keystone:KeystoneClientPlugin
magnum = heat.engine.clients.os.magnum:MagnumClientPlugin
manila = heat.engine.clients.os.manila:ManilaClientPlugin
@@ -88,6 +89,7 @@ heat.clients =
senlin = heat.engine.clients.os.senlin:SenlinClientPlugin
swift = heat.engine.clients.os.swift:SwiftClientPlugin
trove = heat.engine.clients.os.trove:TroveClientPlugin
+ vitrage = heat.engine.clients.os.vitrage:VitrageClientPlugin
zaqar = heat.engine.clients.os.zaqar:ZaqarClientPlugin
zun = heat.engine.clients.os.zun:ZunClientPlugin
@@ -108,12 +110,12 @@ heat.constraints =
# service constraints
barbican.container = heat.engine.clients.os.barbican:ContainerConstraint
barbican.secret = heat.engine.clients.os.barbican:SecretConstraint
+ blazar.reservation = heat.engine.clients.os.blazar:ReservationConstraint
cinder.backup = heat.engine.clients.os.cinder:VolumeBackupConstraint
cinder.qos_specs = heat.engine.clients.os.cinder:QoSSpecsConstraint
cinder.snapshot = heat.engine.clients.os.cinder:VolumeSnapshotConstraint
cinder.volume = heat.engine.clients.os.cinder:VolumeConstraint
cinder.vtype = heat.engine.clients.os.cinder:VolumeTypeConstraint
- designate.domain = heat.engine.clients.os.designate:DesignateDomainConstraint
designate.zone = heat.engine.clients.os.designate:DesignateZoneConstraint
glance.image = heat.engine.clients.os.glance:ImageConstraint
keystone.domain = heat.engine.clients.os.keystone.keystone_constraints:KeystoneDomainConstraint
@@ -158,6 +160,8 @@ heat.constraints =
octavia.loadbalancer = heat.engine.clients.os.octavia:LoadbalancerConstraint
octavia.l7policy = heat.engine.clients.os.octavia:L7PolicyConstraint
octavia.pool = heat.engine.clients.os.octavia:PoolConstraint
+ octavia.flavor = heat.engine.clients.os.octavia:FlavorConstraint
+ octavia.flavorprofile = heat.engine.clients.os.octavia:FlavorProfileConstraint
sahara.cluster = heat.engine.clients.os.sahara:ClusterConstraint
sahara.cluster_template = heat.engine.clients.os.sahara:ClusterTemplateConstraint
sahara.data_source = heat.engine.clients.os.sahara:DataSourceConstraint
@@ -172,6 +176,9 @@ heat.constraints =
senlin.profile_type = heat.engine.clients.os.senlin:ProfileTypeConstraint
trove.flavor = heat.engine.clients.os.trove:FlavorConstraint
zaqar.queue = heat.engine.clients.os.zaqar:QueueConstraint
+ #ironic
+ ironic.portgroup = heat.engine.clients.os.ironic:PortGroupConstraint
+ ironic.node = heat.engine.clients.os.ironic:NodeConstraint
heat.stack_lifecycle_plugins =
@@ -196,10 +203,8 @@ heat.templates =
heat_template_version.queens = heat.engine.hot.template:HOTemplate20180302
heat_template_version.2018-08-31 = heat.engine.hot.template:HOTemplate20180831
heat_template_version.rocky = heat.engine.hot.template:HOTemplate20180831
-
-[global]
-setup-hooks =
- pbr.hooks.setup_hook
+ heat_template_version.2021-04-16 = heat.engine.hot.template:HOTemplate20210416
+ heat_template_version.wallaby = heat.engine.hot.template:HOTemplate20210416
[compile_catalog]
directory = heat/locale
diff --git a/setup.py b/setup.py
index 566d84432..f63cc23c5 100644
--- a/setup.py
+++ b/setup.py
@@ -16,14 +16,6 @@
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
-# In python < 2.7.4, a lazy loading of package `pbr` will break
-# setuptools if some other modules registered functions in `atexit`.
-# solution from: http://bugs.python.org/issue15881#msg170215
-try:
- import multiprocessing # noqa
-except ImportError:
- pass
-
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
pbr=True)
diff --git a/test-requirements.txt b/test-requirements.txt
index d7bd8f6f8..bd3032139 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -3,20 +3,23 @@
# process, which may cause wedges in the gate later.
# Hacking already pins down pep8, pyflakes and flake8
-hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
+hacking>=3.0.1,<3.1.0 # Apache-2.0
+# remove this pyflakes from here once you bump the
+# hacking to 3.2.0 or above. hacking 3.2.0 takes
+# care of pyflakes version compatibilty.
+pyflakes>=2.1.1
+
bandit!=1.6.0,>=1.1.0 # Apache-2.0
coverage!=4.4,>=4.0 # Apache-2.0
fixtures>=3.0.0 # Apache-2.0/BSD
-kombu!=4.0.2,>=4.0.0 # BSD
-mock>=2.0.0 # BSD
-PyMySQL>=0.7.6 # MIT License
+kombu!=4.0.2,>=5.0.1 # BSD
+PyMySQL>=0.8.0 # MIT License
oslotest>=3.2.0 # Apache-2.0
-psycopg2>=2.7 # LGPL/ZPL
stestr>=2.0.0 # Apache-2.0
testscenarios>=0.4 # Apache-2.0/BSD
testtools>=2.2.0 # MIT
testresources>=2.0.0 # Apache-2.0/BSD
-doc8>=0.6.0 # Apache-2.0
+doc8>=0.8.1 # Apache-2.0
Pygments>=2.2.0 # BSD license
# Next are used in integration tests only
tempest>=17.1.0 # Apache-2.0
diff --git a/tools/README.rst b/tools/README.rst
index 990f1d5a6..ee8d7db45 100644
--- a/tools/README.rst
+++ b/tools/README.rst
@@ -35,4 +35,4 @@ command under `gerrit-dash-creator` repo
The sample of heat.dash can be found under ./dashboards/
Get the output URL and add it to your gerrit menu
-(at ``https://review.openstack.org/#/settings/preferences``).
+(at ``https://review.opendev.org/#/settings/preferences``).
diff --git a/tools/custom_guidelines.py b/tools/custom_guidelines.py
index ce0defc80..53306d0cc 100644
--- a/tools/custom_guidelines.py
+++ b/tools/custom_guidelines.py
@@ -16,7 +16,6 @@ import re
import sys
from oslo_log import log
-import six
from heat.common.i18n import _
from heat.engine import constraints
@@ -101,7 +100,7 @@ class HeatCustomGuidelines(object):
def _check_resource_schemas(self, resource, schema, schema_name,
error_path=None):
- for key, value in six.iteritems(schema):
+ for key, value in schema.items():
if error_path is None:
error_path = [resource.__name__, key]
else:
@@ -129,7 +128,7 @@ class HeatCustomGuidelines(object):
error_path.pop()
def _check_resource_methods(self, resource):
- for method in six.itervalues(resource.__dict__):
+ for method in resource.__dict__.values():
# need to skip non-functions attributes
if not callable(method):
continue
@@ -159,7 +158,7 @@ class HeatCustomGuidelines(object):
cls_file = open(cls.__module__.replace('.', '/') + '.py')
except IOError as ex:
LOG.warning('Cannot perform trailing spaces check on '
- 'resource module: %s', six.text_type(ex))
+ 'resource module: %s', str(ex))
continue
lines = [line.strip() for line in cls_file.readlines()]
idx = 0
@@ -199,7 +198,7 @@ class HeatCustomGuidelines(object):
'with uppercase letter') % error_key.title(),
'snippet': description})
self.print_guideline_error(**error_kwargs)
- if not description.endswith('.'):
+ if not (description.endswith('.') or description.endswith('.)')):
error_kwargs.update(
{'message': _('%s description summary omitted '
'terminator at the end') % error_key.title(),
@@ -250,7 +249,8 @@ class HeatCustomGuidelines(object):
if re.search("^(:param|:type|:returns|:rtype|:raises)",
line):
params = True
- if not params and not doclines[-2].endswith('.'):
+ if not params and not (doclines[-2].endswith('.') or
+ doclines[-2].endswith('.)')):
error_kwargs.update(
{'message': _('%s description omitted '
'terminator at the end') % error_key.title(),
diff --git a/tools/dashboards/heat.dash b/tools/dashboards/heat.dash
index a272b987d..644f86702 100644
--- a/tools/dashboards/heat.dash
+++ b/tools/dashboards/heat.dash
@@ -11,6 +11,9 @@ foreach = (project:openstack/heat OR project:openstack/heat-agents OR
[section "Heat Specs"]
query = project:openstack/heat-specs
+[section "Stable branches"]
+query = branch:"^stable/.*"
+
[section "Stories & Bug Fixes"]
query = (intopic: ^story/.* OR intopic: ^bug/.* OR intopic: ^bp/.*)
diff --git a/tools/test-setup.sh b/tools/test-setup.sh
index 07a078547..505a58cb7 100755
--- a/tools/test-setup.sh
+++ b/tools/test-setup.sh
@@ -23,8 +23,8 @@ sudo -H mysqladmin -u root password $DB_ROOT_PW
sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e "
DELETE FROM mysql.user WHERE User='';
FLUSH PRIVILEGES;
- GRANT ALL PRIVILEGES ON *.*
- TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;"
+ CREATE USER '$DB_USER'@'%' IDENTIFIED BY '$DB_PW';
+ GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' WITH GRANT OPTION;"
# Now create our database.
mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e "
diff --git a/tox.ini b/tox.ini
index 9ad149e0f..6df056455 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,33 +1,25 @@
[tox]
-envlist = py36,py37,py27,pep8
-minversion = 1.6
+envlist = py36,py37,py38,pep8
+ignore_basepython_conflict = True
+minversion = 3.1.0
skipsdist = True
[testenv]
+basepython = python3
setenv = VIRTUAL_ENV={envdir}
PYTHONWARNINGS=default::DeprecationWarning
OS_TEST_PATH=heat/tests
usedevelop = True
-install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} {opts} {packages}
-deps = -r{toxinidir}/requirements.txt
+deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
- find {toxinidir} -type f -not -path '{toxinidir}/.tox/*' -not -path '*/__pycache__/*' -name '*.py[c|o]' -delete
stestr run {posargs}
stestr slowest
-whitelist_externals =
- bash
- find
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
-[testenv:py27log]
-commands =
- find {toxinidir} -type f -not -path '{toxinidir}/.tox/*' -not -path '*/__pycache__/*' -name '*.py[c|o]' -delete
- stestr run '^(?!heat_integrationtests){posargs}'
-
[testenv:pep8]
-basepython = python3
commands =
flake8 heat bin/heat-api bin/heat-api-cfn bin/heat-engine bin/heat-manage contrib heat_integrationtests doc/source
python tools/custom_guidelines.py --exclude heat/engine/resources/aws
@@ -48,16 +40,13 @@ commands =
doc8 {posargs}
[testenv:venv]
-basepython = python3
commands = {posargs}
[testenv:cover]
-basepython = python3
setenv =
PYTHON=coverage run --source heat --parallel-mode
commands =
coverage erase
- find {toxinidir} -not -path '{toxinidir}/.tox/*' -not -path '*/__pycache__/*' -name '*.py[c|o]' -delete
stestr run {posargs}
coverage combine
coverage html -d cover
@@ -65,37 +54,44 @@ commands =
coverage report
[testenv:docs]
-basepython = python3
-deps = -r{toxinidir}/doc/requirements.txt
+whitelist_externals =
+ rm
+deps =
+ -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -r{toxinidir}/doc/requirements.txt
commands =
rm -rf doc/build
- sphinx-build -W -b html doc/source doc/build/html
+ sphinx-build -W --keep-going -b html doc/source doc/build/html
+
+[testenv:pdf-docs]
+deps = {[testenv:docs]deps}
+whitelist_externals =
+ make
+commands =
+ sphinx-build -W -b latex doc/source doc/build/pdf
+ make -C doc/build/pdf
[testenv:api-ref]
-basepython = python3
# This environment is called from CI scripts to test and publish
-# the API Ref to developer.openstack.org.
+# the API Ref to docs.openstack.org.
deps = -r{toxinidir}/doc/requirements.txt
-whitelist_externals = bash
- rm
+whitelist_externals = rm
commands =
rm -rf api-ref/build
- sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html
+ sphinx-build -W --keep-going -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html
[testenv:genconfig]
-basepython = python3
commands =
oslo-config-generator --config-file=config-generator.conf
- oslo-config-generator --config-file=heat_integrationtests/config-generator.conf
[testenv:genpolicy]
-basepython = python3
commands =
oslopolicy-sample-generator --config-file etc/heat/heat-policy-generator.conf
[testenv:bandit]
-basepython = python3
-deps = -r{toxinidir}/test-requirements.txt
+deps =
+ -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -r{toxinidir}/test-requirements.txt
# The following bandit tests are being skipped:
# B101: Test for use of assert
# B104: Test for binding to all interfaces
@@ -113,8 +109,12 @@ commands = bandit -r heat -x tests --skip B101,B104,B107,B110,B310,B311,B404,B41
[flake8]
show-source = true
+# E123 closing bracket does not match indentation of opening bracket's line
+# W503 line break before binary operator
+# W504 line break after binary operator
+ignore = E123,W503,W504
exclude=.*,dist,*lib/python*,*egg,build,*convergence/scenarios/*
-max-complexity=20
+max-complexity=23
[doc8]
ignore = D001
@@ -122,19 +122,27 @@ ignore-path = .venv,.git,.tox,.tmp,*heat/locale*,*lib/python*,openstack_heat.egg
[hacking]
import_exceptions = heat.common.i18n
-local-check-factory = heat.hacking.checks.factory
+
+[flake8:local-plugins]
+extension =
+ Heat301 = checks:no_log_warn
+ Heat302 = checks:check_python3_no_iteritems
+ Heat303 = checks:check_python3_no_iterkeys
+ Heat304 = checks:check_python3_no_itervalues
+paths = ./heat/hacking
[testenv:debug]
-basepython = python3
commands = oslo_debug_helper {posargs}
[testenv:releasenotes]
-basepython = python3
+whitelist_externals =
+ rm
deps = -r{toxinidir}/doc/requirements.txt
-commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
+commands =
+ rm -rf releasenotes/build
+ sphinx-build -a -E -W -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html
[testenv:bindep]
-basepython = python3
# Do not install any requirements. We want this to be fast and work even if
# system dependencies are missing, since it's used to tell you what system
# dependencies are missing! This also means that bindep must be installed
@@ -146,7 +154,6 @@ commands = bindep test
usedevelop = False
[testenv:lower-constraints]
-basepython = python3
install_command = pip install {opts} {packages}
deps =
-c{toxinidir}/lower-constraints.txt