summaryrefslogtreecommitdiff
path: root/lib/ansible/module_utils
diff options
context:
space:
mode:
Diffstat (limited to 'lib/ansible/module_utils')
-rw-r--r--lib/ansible/module_utils/alicloud_ecs.py158
-rw-r--r--lib/ansible/module_utils/cloud.py217
-rw-r--r--lib/ansible/module_utils/cloudscale.py132
-rw-r--r--lib/ansible/module_utils/cloudstack.py664
-rw-r--r--lib/ansible/module_utils/database.py142
-rw-r--r--lib/ansible/module_utils/digital_ocean.py147
-rw-r--r--lib/ansible/module_utils/dimensiondata.py338
-rw-r--r--lib/ansible/module_utils/docker/__init__.py0
-rw-r--r--lib/ansible/module_utils/docker/common.py1022
-rw-r--r--lib/ansible/module_utils/docker/swarm.py280
-rw-r--r--lib/ansible/module_utils/exoscale.py139
-rw-r--r--lib/ansible/module_utils/f5_utils.py383
-rw-r--r--lib/ansible/module_utils/firewalld.py316
-rw-r--r--lib/ansible/module_utils/gcdns.py55
-rw-r--r--lib/ansible/module_utils/gce.py54
-rw-r--r--lib/ansible/module_utils/gcp.py815
-rw-r--r--lib/ansible/module_utils/gitlab.py104
-rw-r--r--lib/ansible/module_utils/heroku.py41
-rw-r--r--lib/ansible/module_utils/hetzner.py171
-rw-r--r--lib/ansible/module_utils/hwc_utils.py438
-rw-r--r--lib/ansible/module_utils/ibm_sa_utils.py94
-rw-r--r--lib/ansible/module_utils/identity/__init__.py0
-rw-r--r--lib/ansible/module_utils/identity/keycloak/__init__.py0
-rw-r--r--lib/ansible/module_utils/identity/keycloak/keycloak.py480
-rw-r--r--lib/ansible/module_utils/infinibox.py93
-rw-r--r--lib/ansible/module_utils/influxdb.py88
-rw-r--r--lib/ansible/module_utils/ipa.py226
-rw-r--r--lib/ansible/module_utils/known_hosts.py195
-rw-r--r--lib/ansible/module_utils/kubevirt.py462
-rw-r--r--lib/ansible/module_utils/ldap.py78
-rw-r--r--lib/ansible/module_utils/linode.py37
-rw-r--r--lib/ansible/module_utils/lxd.py142
-rw-r--r--lib/ansible/module_utils/manageiq.py170
-rw-r--r--lib/ansible/module_utils/memset.py151
-rw-r--r--lib/ansible/module_utils/mysql.py106
-rw-r--r--lib/ansible/module_utils/net_tools/__init__.py0
-rw-r--r--lib/ansible/module_utils/net_tools/netbox/__init__.py0
-rw-r--r--lib/ansible/module_utils/net_tools/nios/__init__.py0
-rw-r--r--lib/ansible/module_utils/net_tools/nios/api.py601
-rw-r--r--lib/ansible/module_utils/network/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/a10/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/a10/a10.py153
-rw-r--r--lib/ansible/module_utils/network/aci/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/aireos/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/aireos/aireos.py129
-rw-r--r--lib/ansible/module_utils/network/aos/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/aos/aos.py180
-rw-r--r--lib/ansible/module_utils/network/apconos/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/apconos/apconos.py113
-rw-r--r--lib/ansible/module_utils/network/aruba/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/aruba/aruba.py131
-rw-r--r--lib/ansible/module_utils/network/avi/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/avi/ansible_utils.py572
-rw-r--r--lib/ansible/module_utils/network/avi/avi.py38
-rw-r--r--lib/ansible/module_utils/network/avi/avi_api.py972
-rw-r--r--lib/ansible/module_utils/network/bigswitch/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/bigswitch/bigswitch.py91
-rw-r--r--lib/ansible/module_utils/network/checkpoint/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/cloudengine/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/cloudengine/ce.py421
-rw-r--r--lib/ansible/module_utils/network/cnos/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/cnos/cnos.py660
-rw-r--r--lib/ansible/module_utils/network/cnos/cnos_devicerules.py1921
-rw-r--r--lib/ansible/module_utils/network/cnos/cnos_errorcodes.py256
-rw-r--r--lib/ansible/module_utils/network/edgeos/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/edgeos/edgeos.py132
-rw-r--r--lib/ansible/module_utils/network/edgeswitch/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/edgeswitch/edgeswitch.py168
-rw-r--r--lib/ansible/module_utils/network/edgeswitch/edgeswitch_interface.py91
-rw-r--r--lib/ansible/module_utils/network/enos/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/enos/enos.py172
-rw-r--r--lib/ansible/module_utils/network/eric_eccli/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/eric_eccli/eric_eccli.py49
-rw-r--r--lib/ansible/module_utils/network/exos/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/argspec/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/argspec/facts/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/argspec/facts/facts.py23
-rw-r--r--lib/ansible/module_utils/network/exos/argspec/l2_interfaces/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/argspec/l2_interfaces/l2_interfaces.py48
-rw-r--r--lib/ansible/module_utils/network/exos/argspec/lldp_global/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/argspec/lldp_global/lldp_global.py57
-rw-r--r--lib/ansible/module_utils/network/exos/argspec/lldp_interfaces/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/argspec/lldp_interfaces/lldp_interfaces.py49
-rw-r--r--lib/ansible/module_utils/network/exos/argspec/vlans/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/argspec/vlans/vlans.py53
-rw-r--r--lib/ansible/module_utils/network/exos/config/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/config/l2_interfaces/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/config/l2_interfaces/l2_interfaces.py294
-rw-r--r--lib/ansible/module_utils/network/exos/config/lldp_global/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/config/lldp_global/lldp_global.py199
-rw-r--r--lib/ansible/module_utils/network/exos/config/lldp_interfaces/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/config/lldp_interfaces/lldp_interfaces.py243
-rw-r--r--lib/ansible/module_utils/network/exos/config/vlans/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/config/vlans/vlans.py277
-rw-r--r--lib/ansible/module_utils/network/exos/exos.py219
-rw-r--r--lib/ansible/module_utils/network/exos/facts/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/facts/facts.py61
-rw-r--r--lib/ansible/module_utils/network/exos/facts/l2_interfaces/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/facts/l2_interfaces/l2_interfaces.py92
-rw-r--r--lib/ansible/module_utils/network/exos/facts/legacy/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/facts/legacy/base.py263
-rw-r--r--lib/ansible/module_utils/network/exos/facts/lldp_global/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/facts/lldp_global/lldp_global.py97
-rw-r--r--lib/ansible/module_utils/network/exos/facts/lldp_interfaces/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/facts/lldp_interfaces/lldp_interfaces.py88
-rw-r--r--lib/ansible/module_utils/network/exos/facts/vlans/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/facts/vlans/vlans.py89
-rw-r--r--lib/ansible/module_utils/network/exos/utils/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/exos/utils/utils.py9
-rw-r--r--lib/ansible/module_utils/network/f5/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/f5/iworkflow.py57
-rw-r--r--lib/ansible/module_utils/network/f5/legacy.py121
-rw-r--r--lib/ansible/module_utils/network/f5/urls.py122
-rw-r--r--lib/ansible/module_utils/network/fortianalyzer/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/fortianalyzer/common.py292
-rw-r--r--lib/ansible/module_utils/network/fortianalyzer/fortianalyzer.py477
-rw-r--r--lib/ansible/module_utils/network/ftd/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/ftd/common.py238
-rw-r--r--lib/ansible/module_utils/network/ftd/configuration.py565
-rw-r--r--lib/ansible/module_utils/network/ftd/device.py138
-rw-r--r--lib/ansible/module_utils/network/ftd/fdm_swagger_client.py638
-rw-r--r--lib/ansible/module_utils/network/ftd/operation.py41
-rw-r--r--lib/ansible/module_utils/network/icx/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/icx/icx.py69
-rw-r--r--lib/ansible/module_utils/network/ingate/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/ingate/common.py69
-rw-r--r--lib/ansible/module_utils/network/ironware/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/ironware/ironware.py113
-rw-r--r--lib/ansible/module_utils/network/netscaler/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/netscaler/netscaler.py322
-rw-r--r--lib/ansible/module_utils/network/netvisor/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/netvisor/netvisor.py59
-rw-r--r--lib/ansible/module_utils/network/netvisor/pn_nvos.py66
-rw-r--r--lib/ansible/module_utils/network/nos/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/nos/nos.py160
-rw-r--r--lib/ansible/module_utils/network/nso/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/nso/nso.py822
-rw-r--r--lib/ansible/module_utils/network/onyx/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/onyx/onyx.py261
-rw-r--r--lib/ansible/module_utils/network/ordnance/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/ordnance/ordnance.py19
-rw-r--r--lib/ansible/module_utils/network/panos/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/panos/panos.py418
-rw-r--r--lib/ansible/module_utils/network/routeros/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/routeros/routeros.py156
-rw-r--r--lib/ansible/module_utils/network/slxos/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/slxos/slxos.py148
-rw-r--r--lib/ansible/module_utils/network/sros/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/sros/sros.py111
-rw-r--r--lib/ansible/module_utils/network/voss/__init__.py0
-rw-r--r--lib/ansible/module_utils/network/voss/voss.py219
-rw-r--r--lib/ansible/module_utils/oneandone.py277
-rw-r--r--lib/ansible/module_utils/oneview.py502
-rw-r--r--lib/ansible/module_utils/online.py121
-rw-r--r--lib/ansible/module_utils/opennebula.py306
-rw-r--r--lib/ansible/module_utils/oracle/__init__.py0
-rw-r--r--lib/ansible/module_utils/oracle/oci_utils.py1961
-rw-r--r--lib/ansible/module_utils/postgres.py330
-rw-r--r--lib/ansible/module_utils/pure.py128
-rw-r--r--lib/ansible/module_utils/rabbitmq.py220
-rw-r--r--lib/ansible/module_utils/rax.py331
-rw-r--r--lib/ansible/module_utils/redfish_utils.py2458
-rw-r--r--lib/ansible/module_utils/redhat.py284
-rw-r--r--lib/ansible/module_utils/remote_management/__init__.py0
-rw-r--r--lib/ansible/module_utils/remote_management/dellemc/__init__.py0
-rw-r--r--lib/ansible/module_utils/remote_management/dellemc/dellemc_idrac.py57
-rw-r--r--lib/ansible/module_utils/remote_management/dellemc/ome.py181
-rw-r--r--lib/ansible/module_utils/remote_management/lxca/__init__.py0
-rw-r--r--lib/ansible/module_utils/remote_management/lxca/common.py95
-rw-r--r--lib/ansible/module_utils/scaleway.py183
-rw-r--r--lib/ansible/module_utils/source_control/__init__.py0
-rw-r--r--lib/ansible/module_utils/source_control/bitbucket.py95
-rw-r--r--lib/ansible/module_utils/storage/__init__.py0
-rw-r--r--lib/ansible/module_utils/storage/emc/__init__.py0
-rw-r--r--lib/ansible/module_utils/storage/emc/emc_vnx.py34
-rw-r--r--lib/ansible/module_utils/storage/hpe3par/__init__.py0
-rw-r--r--lib/ansible/module_utils/storage/hpe3par/hpe3par.py90
-rw-r--r--lib/ansible/module_utils/univention_umc.py293
-rw-r--r--lib/ansible/module_utils/utm_utils.py234
-rw-r--r--lib/ansible/module_utils/vexata.py94
-rw-r--r--lib/ansible/module_utils/vultr.py333
-rw-r--r--lib/ansible/module_utils/xenserver.py862
182 files changed, 0 insertions, 32119 deletions
diff --git a/lib/ansible/module_utils/alicloud_ecs.py b/lib/ansible/module_utils/alicloud_ecs.py
deleted file mode 100644
index 31b4694c0b..0000000000
--- a/lib/ansible/module_utils/alicloud_ecs.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c) 2017 Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-from ansible.module_utils.basic import env_fallback
-
-try:
- import footmark
- import footmark.ecs
- import footmark.slb
- import footmark.vpc
- import footmark.rds
- import footmark.ess
- HAS_FOOTMARK = True
-except ImportError:
- HAS_FOOTMARK = False
-
-
-class AnsibleACSError(Exception):
- pass
-
-
-def acs_common_argument_spec():
- return dict(
- alicloud_access_key=dict(required=True, aliases=['access_key_id', 'access_key'], no_log=True,
- fallback=(env_fallback, ['ALICLOUD_ACCESS_KEY', 'ALICLOUD_ACCESS_KEY_ID'])),
- alicloud_secret_key=dict(required=True, aliases=['secret_access_key', 'secret_key'], no_log=True,
- fallback=(env_fallback, ['ALICLOUD_SECRET_KEY', 'ALICLOUD_SECRET_ACCESS_KEY'])),
- alicloud_security_token=dict(aliases=['security_token'], no_log=True,
- fallback=(env_fallback, ['ALICLOUD_SECURITY_TOKEN'])),
- )
-
-
-def ecs_argument_spec():
- spec = acs_common_argument_spec()
- spec.update(
- dict(
- alicloud_region=dict(required=True, aliases=['region', 'region_id'],
- fallback=(env_fallback, ['ALICLOUD_REGION', 'ALICLOUD_REGION_ID'])),
- )
- )
- return spec
-
-
-def get_acs_connection_info(module):
-
- ecs_params = dict(acs_access_key_id=module.params.get('alicloud_access_key'),
- acs_secret_access_key=module.params.get('alicloud_secret_key'),
- security_token=module.params.get('alicloud_security_token'),
- user_agent='Ansible-Provider-Alicloud')
-
- return module.params.get('alicloud_region'), ecs_params
-
-
-def connect_to_acs(acs_module, region, **params):
- conn = acs_module.connect_to_region(region, **params)
- if not conn:
- if region not in [acs_module_region.id for acs_module_region in acs_module.regions()]:
- raise AnsibleACSError(
- "Region %s does not seem to be available for acs module %s." % (region, acs_module.__name__))
- else:
- raise AnsibleACSError(
- "Unknown problem connecting to region %s for acs module %s." % (region, acs_module.__name__))
- return conn
-
-
-def ecs_connect(module):
- """ Return an ecs connection"""
-
- region, ecs_params = get_acs_connection_info(module)
- # If we have a region specified, connect to its endpoint.
- if region:
- try:
- ecs = connect_to_acs(footmark.ecs, region, **ecs_params)
- except AnsibleACSError as e:
- module.fail_json(msg=str(e))
- # Otherwise, no region so we fallback to the old connection method
- return ecs
-
-
-def slb_connect(module):
- """ Return an slb connection"""
-
- region, slb_params = get_acs_connection_info(module)
- # If we have a region specified, connect to its endpoint.
- if region:
- try:
- slb = connect_to_acs(footmark.slb, region, **slb_params)
- except AnsibleACSError as e:
- module.fail_json(msg=str(e))
- # Otherwise, no region so we fallback to the old connection method
- return slb
-
-
-def vpc_connect(module):
- """ Return an vpc connection"""
-
- region, vpc_params = get_acs_connection_info(module)
- # If we have a region specified, connect to its endpoint.
- if region:
- try:
- vpc = connect_to_acs(footmark.vpc, region, **vpc_params)
- except AnsibleACSError as e:
- module.fail_json(msg=str(e))
- # Otherwise, no region so we fallback to the old connection method
- return vpc
-
-
-def rds_connect(module):
- """ Return an rds connection"""
-
- region, rds_params = get_acs_connection_info(module)
- # If we have a region specified, connect to its endpoint.
- if region:
- try:
- rds = connect_to_acs(footmark.rds, region, **rds_params)
- except AnsibleACSError as e:
- module.fail_json(msg=str(e))
- # Otherwise, no region so we fallback to the old connection method
- return rds
-
-
-def ess_connect(module):
- """ Return an ess connection"""
-
- region, ess_params = get_acs_connection_info(module)
- # If we have a region specified, connect to its endpoint.
- if region:
- try:
- ess = connect_to_acs(footmark.ess, region, **ess_params)
- except AnsibleACSError as e:
- module.fail_json(msg=str(e))
- # Otherwise, no region so we fallback to the old connection method
- return ess
diff --git a/lib/ansible/module_utils/cloud.py b/lib/ansible/module_utils/cloud.py
deleted file mode 100644
index 0d29071fe1..0000000000
--- a/lib/ansible/module_utils/cloud.py
+++ /dev/null
@@ -1,217 +0,0 @@
-#
-# (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-"""
-This module adds shared support for generic cloud modules
-
-In order to use this module, include it as part of a custom
-module as shown below.
-
-from ansible.module_utils.cloud import CloudRetry
-
-The 'cloud' module provides the following common classes:
-
- * CloudRetry
- - The base class to be used by other cloud providers, in order to
- provide a backoff/retry decorator based on status codes.
-
- - Example using the AWSRetry class which inherits from CloudRetry.
-
- @AWSRetry.exponential_backoff(retries=10, delay=3)
- get_ec2_security_group_ids_from_names()
-
- @AWSRetry.jittered_backoff()
- get_ec2_security_group_ids_from_names()
-
-"""
-import random
-from functools import wraps
-import syslog
-import time
-
-
-def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
- """ Customizable exponential backoff strategy.
- Args:
- retries (int): Maximum number of times to retry a request.
- delay (float): Initial (base) delay.
- backoff (float): base of the exponent to use for exponential
- backoff.
- max_delay (int): Optional. If provided each delay generated is capped
- at this amount. Defaults to 60 seconds.
- Returns:
- Callable that returns a generator. This generator yields durations in
- seconds to be used as delays for an exponential backoff strategy.
- Usage:
- >>> backoff = _exponential_backoff()
- >>> backoff
- <function backoff_backoff at 0x7f0d939facf8>
- >>> list(backoff())
- [2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
- """
- def backoff_gen():
- for retry in range(0, retries):
- sleep = delay * backoff ** retry
- yield sleep if max_delay is None else min(sleep, max_delay)
- return backoff_gen
-
-
-def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
- """ Implements the "Full Jitter" backoff strategy described here
- https://www.awsarchitectureblog.com/2015/03/backoff.html
- Args:
- retries (int): Maximum number of times to retry a request.
- delay (float): Approximate number of seconds to sleep for the first
- retry.
- max_delay (int): The maximum number of seconds to sleep for any retry.
- _random (random.Random or None): Makes this generator testable by
- allowing developers to explicitly pass in the a seeded Random.
- Returns:
- Callable that returns a generator. This generator yields durations in
- seconds to be used as delays for a full jitter backoff strategy.
- Usage:
- >>> backoff = _full_jitter_backoff(retries=5)
- >>> backoff
- <function backoff_backoff at 0x7f0d939facf8>
- >>> list(backoff())
- [3, 6, 5, 23, 38]
- >>> list(backoff())
- [2, 1, 6, 6, 31]
- """
- def backoff_gen():
- for retry in range(0, retries):
- yield _random.randint(0, min(max_delay, delay * 2 ** retry))
- return backoff_gen
-
-
-class CloudRetry(object):
- """ CloudRetry can be used by any cloud provider, in order to implement a
- backoff algorithm/retry effect based on Status Code from Exceptions.
- """
- # This is the base class of the exception.
- # AWS Example botocore.exceptions.ClientError
- base_class = None
-
- @staticmethod
- def status_code_from_exception(error):
- """ Return the status code from the exception object
- Args:
- error (object): The exception itself.
- """
- pass
-
- @staticmethod
- def found(response_code, catch_extra_error_codes=None):
- """ Return True if the Response Code to retry on was found.
- Args:
- response_code (str): This is the Response Code that is being matched against.
- """
- pass
-
- @classmethod
- def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
- """ Retry calling the Cloud decorated function using the provided
- backoff strategy.
- Args:
- backoff_strategy (callable): Callable that returns a generator. The
- generator should yield sleep times for each retry of the decorated
- function.
- """
- def deco(f):
- @wraps(f)
- def retry_func(*args, **kwargs):
- for delay in backoff_strategy():
- try:
- return f(*args, **kwargs)
- except Exception as e:
- if isinstance(e, cls.base_class):
- response_code = cls.status_code_from_exception(e)
- if cls.found(response_code, catch_extra_error_codes):
- msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
- syslog.syslog(syslog.LOG_INFO, msg)
- time.sleep(delay)
- else:
- # Return original exception if exception is not a ClientError
- raise e
- else:
- # Return original exception if exception is not a ClientError
- raise e
- return f(*args, **kwargs)
-
- return retry_func # true decorator
-
- return deco
-
- @classmethod
- def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
- """
- Retry calling the Cloud decorated function using an exponential backoff.
-
- Kwargs:
- retries (int): Number of times to retry a failed request before giving up
- default=10
- delay (int or float): Initial delay between retries in seconds
- default=3
- backoff (int or float): backoff multiplier e.g. value of 2 will
- double the delay each retry
- default=1.1
- max_delay (int or None): maximum amount of time to wait between retries.
- default=60
- """
- return cls._backoff(_exponential_backoff(
- retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes)
-
- @classmethod
- def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
- """
- Retry calling the Cloud decorated function using a jittered backoff
- strategy. More on this strategy here:
-
- https://www.awsarchitectureblog.com/2015/03/backoff.html
-
- Kwargs:
- retries (int): Number of times to retry a failed request before giving up
- default=10
- delay (int): Initial delay between retries in seconds
- default=3
- max_delay (int): maximum amount of time to wait between retries.
- default=60
- """
- return cls._backoff(_full_jitter_backoff(
- retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes)
-
- @classmethod
- def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
- """
- Retry calling the Cloud decorated function using an exponential backoff.
-
- Compatibility for the original implementation of CloudRetry.backoff that
- did not provide configurable backoff strategies. Developers should use
- CloudRetry.exponential_backoff instead.
-
- Kwargs:
- tries (int): Number of times to try (not retry) before giving up
- default=10
- delay (int or float): Initial delay between retries in seconds
- default=3
- backoff (int or float): backoff multiplier e.g. value of 2 will
- double the delay each retry
- default=1.1
- """
- return cls.exponential_backoff(
- retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)
diff --git a/lib/ansible/module_utils/cloudscale.py b/lib/ansible/module_utils/cloudscale.py
deleted file mode 100644
index 01f72bd21f..0000000000
--- a/lib/ansible/module_utils/cloudscale.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# (c) 2017, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-from copy import deepcopy
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.urls import fetch_url
-from ansible.module_utils._text import to_text
-
-API_URL = 'https://api.cloudscale.ch/v1/'
-
-
-def cloudscale_argument_spec():
- return dict(
- api_token=dict(fallback=(env_fallback, ['CLOUDSCALE_API_TOKEN']),
- no_log=True,
- required=True,
- type='str'),
- api_timeout=dict(default=30, type='int'),
- )
-
-
-class AnsibleCloudscaleBase(object):
-
- def __init__(self, module):
- self._module = module
- self._auth_header = {'Authorization': 'Bearer %s' % module.params['api_token']}
- self._result = {
- 'changed': False,
- 'diff': dict(before=dict(), after=dict()),
- }
-
- def _get(self, api_call):
- resp, info = fetch_url(self._module, API_URL + api_call,
- headers=self._auth_header,
- timeout=self._module.params['api_timeout'])
-
- if info['status'] == 200:
- return self._module.from_json(to_text(resp.read(), errors='surrogate_or_strict'))
- elif info['status'] == 404:
- return None
- else:
- self._module.fail_json(msg='Failure while calling the cloudscale.ch API with GET for '
- '"%s".' % api_call, fetch_url_info=info)
-
- def _post_or_patch(self, api_call, method, data):
- # This helps with tags when we have the full API resource href to update.
- if API_URL not in api_call:
- api_endpoint = API_URL + api_call
- else:
- api_endpoint = api_call
-
- headers = self._auth_header.copy()
- if data is not None:
- # Sanitize data dictionary
- # Deepcopy: Duplicate the data object for iteration, because
- # iterating an object and changing it at the same time is insecure
- for k, v in deepcopy(data).items():
- if v is None:
- del data[k]
-
- data = self._module.jsonify(data)
- headers['Content-type'] = 'application/json'
-
- resp, info = fetch_url(self._module,
- api_endpoint,
- headers=headers,
- method=method,
- data=data,
- timeout=self._module.params['api_timeout'])
-
- if info['status'] in (200, 201):
- return self._module.from_json(to_text(resp.read(), errors='surrogate_or_strict'))
- elif info['status'] == 204:
- return None
- else:
- self._module.fail_json(msg='Failure while calling the cloudscale.ch API with %s for '
- '"%s".' % (method, api_call), fetch_url_info=info)
-
- def _post(self, api_call, data=None):
- return self._post_or_patch(api_call, 'POST', data)
-
- def _patch(self, api_call, data=None):
- return self._post_or_patch(api_call, 'PATCH', data)
-
- def _delete(self, api_call):
- resp, info = fetch_url(self._module,
- API_URL + api_call,
- headers=self._auth_header,
- method='DELETE',
- timeout=self._module.params['api_timeout'])
-
- if info['status'] == 204:
- return None
- else:
- self._module.fail_json(msg='Failure while calling the cloudscale.ch API with DELETE for '
- '"%s".' % api_call, fetch_url_info=info)
-
- def _param_updated(self, key, resource):
- param = self._module.params.get(key)
- if param is None:
- return False
-
- if resource and key in resource:
- if param != resource[key]:
- self._result['changed'] = True
-
- patch_data = {
- key: param
- }
-
- self._result['diff']['before'].update({key: resource[key]})
- self._result['diff']['after'].update(patch_data)
-
- if not self._module.check_mode:
- href = resource.get('href')
- if not href:
- self._module.fail_json(msg='Unable to update %s, no href found.' % key)
-
- self._patch(href, patch_data)
- return True
- return False
-
- def get_result(self, resource):
- if resource:
- for k, v in resource.items():
- self._result[k] = v
- return self._result
diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py
deleted file mode 100644
index 85a53b6b6e..0000000000
--- a/lib/ansible/module_utils/cloudstack.py
+++ /dev/null
@@ -1,664 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2015, René Moser <mail@renemoser.net>
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-import os
-import sys
-import time
-import traceback
-
-from ansible.module_utils._text import to_text, to_native
-from ansible.module_utils.basic import missing_required_lib
-
-CS_IMP_ERR = None
-try:
- from cs import CloudStack, CloudStackException, read_config
- HAS_LIB_CS = True
-except ImportError:
- CS_IMP_ERR = traceback.format_exc()
- HAS_LIB_CS = False
-
-
-if sys.version_info > (3,):
- long = int
-
-
-def cs_argument_spec():
- return dict(
- api_key=dict(default=os.environ.get('CLOUDSTACK_KEY')),
- api_secret=dict(default=os.environ.get('CLOUDSTACK_SECRET'), no_log=True),
- api_url=dict(default=os.environ.get('CLOUDSTACK_ENDPOINT')),
- api_http_method=dict(choices=['get', 'post'], default=os.environ.get('CLOUDSTACK_METHOD')),
- api_timeout=dict(type='int', default=os.environ.get('CLOUDSTACK_TIMEOUT')),
- api_region=dict(default=os.environ.get('CLOUDSTACK_REGION') or 'cloudstack'),
- )
-
-
-def cs_required_together():
- return [['api_key', 'api_secret']]
-
-
-class AnsibleCloudStack:
-
- def __init__(self, module):
- if not HAS_LIB_CS:
- module.fail_json(msg=missing_required_lib('cs'), exception=CS_IMP_ERR)
-
- self.result = {
- 'changed': False,
- 'diff': {
- 'before': dict(),
- 'after': dict()
- }
- }
-
- # Common returns, will be merged with self.returns
- # search_for_key: replace_with_key
- self.common_returns = {
- 'id': 'id',
- 'name': 'name',
- 'created': 'created',
- 'zonename': 'zone',
- 'state': 'state',
- 'project': 'project',
- 'account': 'account',
- 'domain': 'domain',
- 'displaytext': 'display_text',
- 'displayname': 'display_name',
- 'description': 'description',
- }
-
- # Init returns dict for use in subclasses
- self.returns = {}
- # these values will be casted to int
- self.returns_to_int = {}
- # these keys will be compared case sensitive in self.has_changed()
- self.case_sensitive_keys = [
- 'id',
- 'displaytext',
- 'displayname',
- 'description',
- ]
-
- self.module = module
- self._cs = None
-
- # Helper for VPCs
- self._vpc_networks_ids = None
-
- self.domain = None
- self.account = None
- self.project = None
- self.ip_address = None
- self.network = None
- self.physical_network = None
- self.vpc = None
- self.zone = None
- self.vm = None
- self.vm_default_nic = None
- self.os_type = None
- self.hypervisor = None
- self.capabilities = None
- self.network_acl = None
-
- @property
- def cs(self):
- if self._cs is None:
- api_config = self.get_api_config()
- self._cs = CloudStack(**api_config)
- return self._cs
-
- def get_api_config(self):
- api_region = self.module.params.get('api_region') or os.environ.get('CLOUDSTACK_REGION')
- try:
- config = read_config(api_region)
- except KeyError:
- config = {}
-
- api_config = {
- 'endpoint': self.module.params.get('api_url') or config.get('endpoint'),
- 'key': self.module.params.get('api_key') or config.get('key'),
- 'secret': self.module.params.get('api_secret') or config.get('secret'),
- 'timeout': self.module.params.get('api_timeout') or config.get('timeout') or 10,
- 'method': self.module.params.get('api_http_method') or config.get('method') or 'get',
- }
- self.result.update({
- 'api_region': api_region,
- 'api_url': api_config['endpoint'],
- 'api_key': api_config['key'],
- 'api_timeout': int(api_config['timeout']),
- 'api_http_method': api_config['method'],
- })
- if not all([api_config['endpoint'], api_config['key'], api_config['secret']]):
- self.fail_json(msg="Missing api credentials: can not authenticate")
- return api_config
-
- def fail_json(self, **kwargs):
- self.result.update(kwargs)
- self.module.fail_json(**self.result)
-
- def get_or_fallback(self, key=None, fallback_key=None):
- value = self.module.params.get(key)
- if not value:
- value = self.module.params.get(fallback_key)
- return value
-
- def has_changed(self, want_dict, current_dict, only_keys=None, skip_diff_for_keys=None):
- result = False
- for key, value in want_dict.items():
-
- # Optionally limit by a list of keys
- if only_keys and key not in only_keys:
- continue
-
- # Skip None values
- if value is None:
- continue
-
- if key in current_dict:
- if isinstance(value, (int, float, long, complex)):
-
- # ensure we compare the same type
- if isinstance(value, int):
- current_dict[key] = int(current_dict[key])
- elif isinstance(value, float):
- current_dict[key] = float(current_dict[key])
- elif isinstance(value, long):
- current_dict[key] = long(current_dict[key])
- elif isinstance(value, complex):
- current_dict[key] = complex(current_dict[key])
-
- if value != current_dict[key]:
- if skip_diff_for_keys and key not in skip_diff_for_keys:
- self.result['diff']['before'][key] = current_dict[key]
- self.result['diff']['after'][key] = value
- result = True
- else:
- before_value = to_text(current_dict[key])
- after_value = to_text(value)
-
- if self.case_sensitive_keys and key in self.case_sensitive_keys:
- if before_value != after_value:
- if skip_diff_for_keys and key not in skip_diff_for_keys:
- self.result['diff']['before'][key] = before_value
- self.result['diff']['after'][key] = after_value
- result = True
-
- # Test for diff in case insensitive way
- elif before_value.lower() != after_value.lower():
- if skip_diff_for_keys and key not in skip_diff_for_keys:
- self.result['diff']['before'][key] = before_value
- self.result['diff']['after'][key] = after_value
- result = True
- else:
- if skip_diff_for_keys and key not in skip_diff_for_keys:
- self.result['diff']['before'][key] = None
- self.result['diff']['after'][key] = to_text(value)
- result = True
- return result
-
- def _get_by_key(self, key=None, my_dict=None):
- if my_dict is None:
- my_dict = {}
- if key:
- if key in my_dict:
- return my_dict[key]
- self.fail_json(msg="Something went wrong: %s not found" % key)
- return my_dict
-
- def query_api(self, command, **args):
- try:
- res = getattr(self.cs, command)(**args)
-
- if 'errortext' in res:
- self.fail_json(msg="Failed: '%s'" % res['errortext'])
-
- except CloudStackException as e:
- self.fail_json(msg='CloudStackException: %s' % to_native(e))
-
- except Exception as e:
- self.fail_json(msg=to_native(e))
-
- return res
-
- def get_network_acl(self, key=None):
- if self.network_acl is None:
- args = {
- 'name': self.module.params.get('network_acl'),
- 'vpcid': self.get_vpc(key='id'),
- }
- network_acls = self.query_api('listNetworkACLLists', **args)
- if network_acls:
- self.network_acl = network_acls['networkacllist'][0]
- self.result['network_acl'] = self.network_acl['name']
- if self.network_acl:
- return self._get_by_key(key, self.network_acl)
- else:
- self.fail_json(msg="Network ACL %s not found" % self.module.params.get('network_acl'))
-
- def get_vpc(self, key=None):
- """Return a VPC dictionary or the value of given key of."""
- if self.vpc:
- return self._get_by_key(key, self.vpc)
-
- vpc = self.module.params.get('vpc')
- if not vpc:
- vpc = os.environ.get('CLOUDSTACK_VPC')
- if not vpc:
- return None
-
- args = {
- 'account': self.get_account(key='name'),
- 'domainid': self.get_domain(key='id'),
- 'projectid': self.get_project(key='id'),
- 'zoneid': self.get_zone(key='id'),
- }
- vpcs = self.query_api('listVPCs', **args)
- if not vpcs:
- self.fail_json(msg="No VPCs available.")
-
- for v in vpcs['vpc']:
- if vpc in [v['name'], v['displaytext'], v['id']]:
- # Fail if the identifyer matches more than one VPC
- if self.vpc:
- self.fail_json(msg="More than one VPC found with the provided identifyer '%s'" % vpc)
- else:
- self.vpc = v
- self.result['vpc'] = v['name']
- if self.vpc:
- return self._get_by_key(key, self.vpc)
- self.fail_json(msg="VPC '%s' not found" % vpc)
-
- def is_vpc_network(self, network_id):
- """Returns True if network is in VPC."""
- # This is an efficient way to query a lot of networks at a time
- if self._vpc_networks_ids is None:
- args = {
- 'account': self.get_account(key='name'),
- 'domainid': self.get_domain(key='id'),
- 'projectid': self.get_project(key='id'),
- 'zoneid': self.get_zone(key='id'),
- }
- vpcs = self.query_api('listVPCs', **args)
- self._vpc_networks_ids = []
- if vpcs:
- for vpc in vpcs['vpc']:
- for n in vpc.get('network', []):
- self._vpc_networks_ids.append(n['id'])
- return network_id in self._vpc_networks_ids
-
- def get_physical_network(self, key=None):
- if self.physical_network:
- return self._get_by_key(key, self.physical_network)
- physical_network = self.module.params.get('physical_network')
- args = {
- 'zoneid': self.get_zone(key='id')
- }
- physical_networks = self.query_api('listPhysicalNetworks', **args)
- if not physical_networks:
- self.fail_json(msg="No physical networks available.")
-
- for net in physical_networks['physicalnetwork']:
- if physical_network in [net['name'], net['id']]:
- self.physical_network = net
- self.result['physical_network'] = net['name']
- return self._get_by_key(key, self.physical_network)
- self.fail_json(msg="Physical Network '%s' not found" % physical_network)
-
- def get_network(self, key=None):
- """Return a network dictionary or the value of given key of."""
- if self.network:
- return self._get_by_key(key, self.network)
-
- network = self.module.params.get('network')
- if not network:
- vpc_name = self.get_vpc(key='name')
- if vpc_name:
- self.fail_json(msg="Could not find network for VPC '%s' due missing argument: network" % vpc_name)
- return None
-
- args = {
- 'account': self.get_account(key='name'),
- 'domainid': self.get_domain(key='id'),
- 'projectid': self.get_project(key='id'),
- 'zoneid': self.get_zone(key='id'),
- 'vpcid': self.get_vpc(key='id')
- }
- networks = self.query_api('listNetworks', **args)
- if not networks:
- self.fail_json(msg="No networks available.")
-
- for n in networks['network']:
- # ignore any VPC network if vpc param is not given
- if 'vpcid' in n and not self.get_vpc(key='id'):
- continue
- if network in [n['displaytext'], n['name'], n['id']]:
- self.result['network'] = n['name']
- self.network = n
- return self._get_by_key(key, self.network)
- self.fail_json(msg="Network '%s' not found" % network)
-
- def get_project(self, key=None):
- if self.project:
- return self._get_by_key(key, self.project)
-
- project = self.module.params.get('project')
- if not project:
- project = os.environ.get('CLOUDSTACK_PROJECT')
- if not project:
- return None
- args = {
- 'account': self.get_account(key='name'),
- 'domainid': self.get_domain(key='id')
- }
- projects = self.query_api('listProjects', **args)
- if projects:
- for p in projects['project']:
- if project.lower() in [p['name'].lower(), p['id']]:
- self.result['project'] = p['name']
- self.project = p
- return self._get_by_key(key, self.project)
- self.fail_json(msg="project '%s' not found" % project)
-
- def get_ip_address(self, key=None):
- if self.ip_address:
- return self._get_by_key(key, self.ip_address)
-
- ip_address = self.module.params.get('ip_address')
- if not ip_address:
- self.fail_json(msg="IP address param 'ip_address' is required")
-
- args = {
- 'ipaddress': ip_address,
- 'account': self.get_account(key='name'),
- 'domainid': self.get_domain(key='id'),
- 'projectid': self.get_project(key='id'),
- 'vpcid': self.get_vpc(key='id'),
- }
-
- ip_addresses = self.query_api('listPublicIpAddresses', **args)
-
- if not ip_addresses:
- self.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
-
- self.ip_address = ip_addresses['publicipaddress'][0]
- return self._get_by_key(key, self.ip_address)
-
- def get_vm_guest_ip(self):
- vm_guest_ip = self.module.params.get('vm_guest_ip')
- default_nic = self.get_vm_default_nic()
-
- if not vm_guest_ip:
- return default_nic['ipaddress']
-
- for secondary_ip in default_nic['secondaryip']:
- if vm_guest_ip == secondary_ip['ipaddress']:
- return vm_guest_ip
- self.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip)
-
- def get_vm_default_nic(self):
- if self.vm_default_nic:
- return self.vm_default_nic
-
- nics = self.query_api('listNics', virtualmachineid=self.get_vm(key='id'))
- if nics:
- for n in nics['nic']:
- if n['isdefault']:
- self.vm_default_nic = n
- return self.vm_default_nic
- self.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm'))
-
- def get_vm(self, key=None, filter_zone=True):
- if self.vm:
- return self._get_by_key(key, self.vm)
-
- vm = self.module.params.get('vm')
- if not vm:
- self.fail_json(msg="Virtual machine param 'vm' is required")
-
- args = {
- 'account': self.get_account(key='name'),
- 'domainid': self.get_domain(key='id'),
- 'projectid': self.get_project(key='id'),
- 'zoneid': self.get_zone(key='id') if filter_zone else None,
- 'fetch_list': True,
- }
- vms = self.query_api('listVirtualMachines', **args)
- if vms:
- for v in vms:
- if vm.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]:
- self.vm = v
- return self._get_by_key(key, self.vm)
- self.fail_json(msg="Virtual machine '%s' not found" % vm)
-
- def get_disk_offering(self, key=None):
- disk_offering = self.module.params.get('disk_offering')
- if not disk_offering:
- return None
-
- # Do not add domain filter for disk offering listing.
- disk_offerings = self.query_api('listDiskOfferings')
- if disk_offerings:
- for d in disk_offerings['diskoffering']:
- if disk_offering in [d['displaytext'], d['name'], d['id']]:
- return self._get_by_key(key, d)
- self.fail_json(msg="Disk offering '%s' not found" % disk_offering)
-
- def get_zone(self, key=None):
- if self.zone:
- return self._get_by_key(key, self.zone)
-
- zone = self.module.params.get('zone')
- if not zone:
- zone = os.environ.get('CLOUDSTACK_ZONE')
- zones = self.query_api('listZones')
-
- if not zones:
- self.fail_json(msg="No zones available. Please create a zone first")
-
- # use the first zone if no zone param given
- if not zone:
- self.zone = zones['zone'][0]
- self.result['zone'] = self.zone['name']
- return self._get_by_key(key, self.zone)
-
- if zones:
- for z in zones['zone']:
- if zone.lower() in [z['name'].lower(), z['id']]:
- self.result['zone'] = z['name']
- self.zone = z
- return self._get_by_key(key, self.zone)
- self.fail_json(msg="zone '%s' not found" % zone)
-
- def get_os_type(self, key=None):
- if self.os_type:
- return self._get_by_key(key, self.zone)
-
- os_type = self.module.params.get('os_type')
- if not os_type:
- return None
-
- os_types = self.query_api('listOsTypes')
- if os_types:
- for o in os_types['ostype']:
- if os_type in [o['description'], o['id']]:
- self.os_type = o
- return self._get_by_key(key, self.os_type)
- self.fail_json(msg="OS type '%s' not found" % os_type)
-
- def get_hypervisor(self):
- if self.hypervisor:
- return self.hypervisor
-
- hypervisor = self.module.params.get('hypervisor')
- hypervisors = self.query_api('listHypervisors')
-
- # use the first hypervisor if no hypervisor param given
- if not hypervisor:
- self.hypervisor = hypervisors['hypervisor'][0]['name']
- return self.hypervisor
-
- for h in hypervisors['hypervisor']:
- if hypervisor.lower() == h['name'].lower():
- self.hypervisor = h['name']
- return self.hypervisor
- self.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
-
- def get_account(self, key=None):
- if self.account:
- return self._get_by_key(key, self.account)
-
- account = self.module.params.get('account')
- if not account:
- account = os.environ.get('CLOUDSTACK_ACCOUNT')
- if not account:
- return None
-
- domain = self.module.params.get('domain')
- if not domain:
- self.fail_json(msg="Account must be specified with Domain")
-
- args = {
- 'name': account,
- 'domainid': self.get_domain(key='id'),
- 'listall': True
- }
- accounts = self.query_api('listAccounts', **args)
- if accounts:
- self.account = accounts['account'][0]
- self.result['account'] = self.account['name']
- return self._get_by_key(key, self.account)
- self.fail_json(msg="Account '%s' not found" % account)
-
- def get_domain(self, key=None):
- if self.domain:
- return self._get_by_key(key, self.domain)
-
- domain = self.module.params.get('domain')
- if not domain:
- domain = os.environ.get('CLOUDSTACK_DOMAIN')
- if not domain:
- return None
-
- args = {
- 'listall': True,
- }
- domains = self.query_api('listDomains', **args)
- if domains:
- for d in domains['domain']:
- if d['path'].lower() in [domain.lower(), "root/" + domain.lower(), "root" + domain.lower()]:
- self.domain = d
- self.result['domain'] = d['path']
- return self._get_by_key(key, self.domain)
- self.fail_json(msg="Domain '%s' not found" % domain)
-
- def query_tags(self, resource, resource_type):
- args = {
- 'resourceid': resource['id'],
- 'resourcetype': resource_type,
- }
- tags = self.query_api('listTags', **args)
- return self.get_tags(resource=tags, key='tag')
-
- def get_tags(self, resource=None, key='tags'):
- existing_tags = []
- for tag in resource.get(key) or []:
- existing_tags.append({'key': tag['key'], 'value': tag['value']})
- return existing_tags
-
- def _process_tags(self, resource, resource_type, tags, operation="create"):
- if tags:
- self.result['changed'] = True
- if not self.module.check_mode:
- args = {
- 'resourceids': resource['id'],
- 'resourcetype': resource_type,
- 'tags': tags,
- }
- if operation == "create":
- response = self.query_api('createTags', **args)
- else:
- response = self.query_api('deleteTags', **args)
- self.poll_job(response)
-
- def _tags_that_should_exist_or_be_updated(self, resource, tags):
- existing_tags = self.get_tags(resource)
- return [tag for tag in tags if tag not in existing_tags]
-
- def _tags_that_should_not_exist(self, resource, tags):
- existing_tags = self.get_tags(resource)
- return [tag for tag in existing_tags if tag not in tags]
-
- def ensure_tags(self, resource, resource_type=None):
- if not resource_type or not resource:
- self.fail_json(msg="Error: Missing resource or resource_type for tags.")
-
- if 'tags' in resource:
- tags = self.module.params.get('tags')
- if tags is not None:
- self._process_tags(resource, resource_type, self._tags_that_should_not_exist(resource, tags), operation="delete")
- self._process_tags(resource, resource_type, self._tags_that_should_exist_or_be_updated(resource, tags))
- resource['tags'] = self.query_tags(resource=resource, resource_type=resource_type)
- return resource
-
- def get_capabilities(self, key=None):
- if self.capabilities:
- return self._get_by_key(key, self.capabilities)
- capabilities = self.query_api('listCapabilities')
- self.capabilities = capabilities['capability']
- return self._get_by_key(key, self.capabilities)
-
- def poll_job(self, job=None, key=None):
- if 'jobid' in job:
- while True:
- res = self.query_api('queryAsyncJobResult', jobid=job['jobid'])
- if res['jobstatus'] != 0 and 'jobresult' in res:
-
- if 'errortext' in res['jobresult']:
- self.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
-
- if key and key in res['jobresult']:
- job = res['jobresult'][key]
-
- break
- time.sleep(2)
- return job
-
- def update_result(self, resource, result=None):
- if result is None:
- result = dict()
- if resource:
- returns = self.common_returns.copy()
- returns.update(self.returns)
- for search_key, return_key in returns.items():
- if search_key in resource:
- result[return_key] = resource[search_key]
-
- # Bad bad API does not always return int when it should.
- for search_key, return_key in self.returns_to_int.items():
- if search_key in resource:
- result[return_key] = int(resource[search_key])
-
- if 'tags' in resource:
- result['tags'] = resource['tags']
- return result
-
- def get_result(self, resource):
- return self.update_result(resource, self.result)
-
- def get_result_and_facts(self, facts_name, resource):
- result = self.get_result(resource)
-
- ansible_facts = {
- facts_name: result.copy()
- }
- for k in ['diff', 'changed']:
- if k in ansible_facts[facts_name]:
- del ansible_facts[facts_name][k]
-
- result.update(ansible_facts=ansible_facts)
- return result
diff --git a/lib/ansible/module_utils/database.py b/lib/ansible/module_utils/database.py
deleted file mode 100644
index 014939a260..0000000000
--- a/lib/ansible/module_utils/database.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-class SQLParseError(Exception):
- pass
-
-
-class UnclosedQuoteError(SQLParseError):
- pass
-
-
-# maps a type of identifier to the maximum number of dot levels that are
-# allowed to specify that identifier. For example, a database column can be
-# specified by up to 4 levels: database.schema.table.column
-_PG_IDENTIFIER_TO_DOT_LEVEL = dict(
- database=1,
- schema=2,
- table=3,
- column=4,
- role=1,
- tablespace=1,
- sequence=3,
- publication=1,
-)
-_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
-
-
-def _find_end_quote(identifier, quote_char):
- accumulate = 0
- while True:
- try:
- quote = identifier.index(quote_char)
- except ValueError:
- raise UnclosedQuoteError
- accumulate = accumulate + quote
- try:
- next_char = identifier[quote + 1]
- except IndexError:
- return accumulate
- if next_char == quote_char:
- try:
- identifier = identifier[quote + 2:]
- accumulate = accumulate + 2
- except IndexError:
- raise UnclosedQuoteError
- else:
- return accumulate
-
-
-def _identifier_parse(identifier, quote_char):
- if not identifier:
- raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
-
- already_quoted = False
- if identifier.startswith(quote_char):
- already_quoted = True
- try:
- end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
- except UnclosedQuoteError:
- already_quoted = False
- else:
- if end_quote < len(identifier) - 1:
- if identifier[end_quote + 1] == '.':
- dot = end_quote + 1
- first_identifier = identifier[:dot]
- next_identifier = identifier[dot + 1:]
- further_identifiers = _identifier_parse(next_identifier, quote_char)
- further_identifiers.insert(0, first_identifier)
- else:
- raise SQLParseError('User escaped identifiers must escape extra quotes')
- else:
- further_identifiers = [identifier]
-
- if not already_quoted:
- try:
- dot = identifier.index('.')
- except ValueError:
- identifier = identifier.replace(quote_char, quote_char * 2)
- identifier = ''.join((quote_char, identifier, quote_char))
- further_identifiers = [identifier]
- else:
- if dot == 0 or dot >= len(identifier) - 1:
- identifier = identifier.replace(quote_char, quote_char * 2)
- identifier = ''.join((quote_char, identifier, quote_char))
- further_identifiers = [identifier]
- else:
- first_identifier = identifier[:dot]
- next_identifier = identifier[dot + 1:]
- further_identifiers = _identifier_parse(next_identifier, quote_char)
- first_identifier = first_identifier.replace(quote_char, quote_char * 2)
- first_identifier = ''.join((quote_char, first_identifier, quote_char))
- further_identifiers.insert(0, first_identifier)
-
- return further_identifiers
-
-
-def pg_quote_identifier(identifier, id_type):
- identifier_fragments = _identifier_parse(identifier, quote_char='"')
- if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
- raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
- return '.'.join(identifier_fragments)
-
-
-def mysql_quote_identifier(identifier, id_type):
- identifier_fragments = _identifier_parse(identifier, quote_char='`')
- if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
- raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
-
- special_cased_fragments = []
- for fragment in identifier_fragments:
- if fragment == '`*`':
- special_cased_fragments.append('*')
- else:
- special_cased_fragments.append(fragment)
-
- return '.'.join(special_cased_fragments)
diff --git a/lib/ansible/module_utils/digital_ocean.py b/lib/ansible/module_utils/digital_ocean.py
deleted file mode 100644
index fc30343e28..0000000000
--- a/lib/ansible/module_utils/digital_ocean.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Ansible Project 2017
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import json
-import os
-from ansible.module_utils.urls import fetch_url
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-
-
-class Response(object):
-
- def __init__(self, resp, info):
- self.body = None
- if resp:
- self.body = resp.read()
- self.info = info
-
- @property
- def json(self):
- if not self.body:
- if "body" in self.info:
- return json.loads(to_text(self.info["body"]))
- return None
- try:
- return json.loads(to_text(self.body))
- except ValueError:
- return None
-
- @property
- def status_code(self):
- return self.info["status"]
-
-
-class DigitalOceanHelper:
-
- def __init__(self, module):
- self.module = module
- self.baseurl = 'https://api.digitalocean.com/v2'
- self.timeout = module.params.get('timeout', 30)
- self.oauth_token = module.params.get('oauth_token')
- self.headers = {'Authorization': 'Bearer {0}'.format(self.oauth_token),
- 'Content-type': 'application/json'}
-
- # Check if api_token is valid or not
- response = self.get('account')
- if response.status_code == 401:
- self.module.fail_json(msg='Failed to login using API token, please verify validity of API token.')
-
- def _url_builder(self, path):
- if path[0] == '/':
- path = path[1:]
- return '%s/%s' % (self.baseurl, path)
-
- def send(self, method, path, data=None):
- url = self._url_builder(path)
- data = self.module.jsonify(data)
-
- resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method, timeout=self.timeout)
-
- return Response(resp, info)
-
- def get(self, path, data=None):
- return self.send('GET', path, data)
-
- def put(self, path, data=None):
- return self.send('PUT', path, data)
-
- def post(self, path, data=None):
- return self.send('POST', path, data)
-
- def delete(self, path, data=None):
- return self.send('DELETE', path, data)
-
- @staticmethod
- def digital_ocean_argument_spec():
- return dict(
- validate_certs=dict(type='bool', required=False, default=True),
- oauth_token=dict(
- no_log=True,
- # Support environment variable for DigitalOcean OAuth Token
- fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN', 'OAUTH_TOKEN']),
- required=False,
- aliases=['api_token'],
- ),
- timeout=dict(type='int', default=30),
- )
-
- def get_paginated_data(self, base_url=None, data_key_name=None, data_per_page=40, expected_status_code=200):
- """
- Function to get all paginated data from given URL
- Args:
- base_url: Base URL to get data from
- data_key_name: Name of data key value
- data_per_page: Number results per page (Default: 40)
- expected_status_code: Expected returned code from DigitalOcean (Default: 200)
- Returns: List of data
-
- """
- page = 1
- has_next = True
- ret_data = []
- status_code = None
- response = None
- while has_next or status_code != expected_status_code:
- required_url = "{0}page={1}&per_page={2}".format(base_url, page, data_per_page)
- response = self.get(required_url)
- status_code = response.status_code
- # stop if any error during pagination
- if status_code != expected_status_code:
- break
- page += 1
- ret_data.extend(response.json[data_key_name])
- has_next = "pages" in response.json["links"] and "next" in response.json["links"]["pages"]
-
- if status_code != expected_status_code:
- msg = "Failed to fetch %s from %s" % (data_key_name, base_url)
- if response:
- msg += " due to error : %s" % response.json['message']
- self.module.fail_json(msg=msg)
-
- return ret_data
diff --git a/lib/ansible/module_utils/dimensiondata.py b/lib/ansible/module_utils/dimensiondata.py
deleted file mode 100644
index 179c3eff9c..0000000000
--- a/lib/ansible/module_utils/dimensiondata.py
+++ /dev/null
@@ -1,338 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2016 Dimension Data
-#
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-#
-# Authors:
-# - Aimon Bustardo <aimon.bustardo@dimensiondata.com>
-# - Mark Maglana <mmaglana@gmail.com>
-# - Adam Friedman <tintoy@tintoy.io>
-#
-# Common functionality to be used by versious module components
-
-import os
-import re
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils.six.moves import configparser
-from os.path import expanduser
-from uuid import UUID
-
-LIBCLOUD_IMP_ERR = None
-try:
- from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus
- from libcloud.compute.base import Node, NodeLocation
- from libcloud.compute.providers import get_driver
- from libcloud.compute.types import Provider
-
- import libcloud.security
-
- HAS_LIBCLOUD = True
-except ImportError:
- LIBCLOUD_IMP_ERR = traceback.format_exc()
- HAS_LIBCLOUD = False
-
-# MCP 2.x version patten for location (datacenter) names.
-#
-# Note that this is not a totally reliable way of determining MCP version.
-# Unfortunately, libcloud's NodeLocation currently makes no provision for extended properties.
-# At some point we may therefore want to either enhance libcloud or enable overriding mcp_version
-# by specifying it in the module parameters.
-MCP_2_LOCATION_NAME_PATTERN = re.compile(r".*MCP\s?2.*")
-
-
-class DimensionDataModule(object):
- """
- The base class containing common functionality used by Dimension Data modules for Ansible.
- """
-
- def __init__(self, module):
- """
- Create a new DimensionDataModule.
-
- Will fail if Apache libcloud is not present.
-
- :param module: The underlying Ansible module.
- :type module: AnsibleModule
- """
-
- self.module = module
-
- if not HAS_LIBCLOUD:
- self.module.fail_json(msg=missing_required_lib('libcloud'), exception=LIBCLOUD_IMP_ERR)
-
- # Credentials are common to all Dimension Data modules.
- credentials = self.get_credentials()
- self.user_id = credentials['user_id']
- self.key = credentials['key']
-
- # Region and location are common to all Dimension Data modules.
- region = self.module.params['region']
- self.region = 'dd-{0}'.format(region)
- self.location = self.module.params['location']
-
- libcloud.security.VERIFY_SSL_CERT = self.module.params['validate_certs']
-
- self.driver = get_driver(Provider.DIMENSIONDATA)(
- self.user_id,
- self.key,
- region=self.region
- )
-
- # Determine the MCP API version (this depends on the target datacenter).
- self.mcp_version = self.get_mcp_version(self.location)
-
- # Optional "wait-for-completion" arguments
- if 'wait' in self.module.params:
- self.wait = self.module.params['wait']
- self.wait_time = self.module.params['wait_time']
- self.wait_poll_interval = self.module.params['wait_poll_interval']
- else:
- self.wait = False
- self.wait_time = 0
- self.wait_poll_interval = 0
-
- def get_credentials(self):
- """
- Get user_id and key from module configuration, environment, or dotfile.
- Order of priority is module, environment, dotfile.
-
- To set in environment:
-
- export MCP_USER='myusername'
- export MCP_PASSWORD='mypassword'
-
- To set in dot file place a file at ~/.dimensiondata with
- the following contents:
-
- [dimensiondatacloud]
- MCP_USER: myusername
- MCP_PASSWORD: mypassword
- """
-
- if not HAS_LIBCLOUD:
- self.module.fail_json(msg='libcloud is required for this module.')
-
- user_id = None
- key = None
-
- # First, try the module configuration
- if 'mcp_user' in self.module.params:
- if 'mcp_password' not in self.module.params:
- self.module.fail_json(
- msg='"mcp_user" parameter was specified, but not "mcp_password" (either both must be specified, or neither).'
- )
-
- user_id = self.module.params['mcp_user']
- key = self.module.params['mcp_password']
-
- # Fall back to environment
- if not user_id or not key:
- user_id = os.environ.get('MCP_USER', None)
- key = os.environ.get('MCP_PASSWORD', None)
-
- # Finally, try dotfile (~/.dimensiondata)
- if not user_id or not key:
- home = expanduser('~')
- config = configparser.RawConfigParser()
- config.read("%s/.dimensiondata" % home)
-
- try:
- user_id = config.get("dimensiondatacloud", "MCP_USER")
- key = config.get("dimensiondatacloud", "MCP_PASSWORD")
- except (configparser.NoSectionError, configparser.NoOptionError):
- pass
-
- # One or more credentials not found. Function can't recover from this
- # so it has to raise an error instead of fail silently.
- if not user_id:
- raise MissingCredentialsError("Dimension Data user id not found")
- elif not key:
- raise MissingCredentialsError("Dimension Data key not found")
-
- # Both found, return data
- return dict(user_id=user_id, key=key)
-
- def get_mcp_version(self, location):
- """
- Get the MCP version for the specified location.
- """
-
- location = self.driver.ex_get_location_by_id(location)
- if MCP_2_LOCATION_NAME_PATTERN.match(location.name):
- return '2.0'
-
- return '1.0'
-
- def get_network_domain(self, locator, location):
- """
- Retrieve a network domain by its name or Id.
- """
-
- if is_uuid(locator):
- network_domain = self.driver.ex_get_network_domain(locator)
- else:
- matching_network_domains = [
- network_domain for network_domain in self.driver.ex_list_network_domains(location=location)
- if network_domain.name == locator
- ]
-
- if matching_network_domains:
- network_domain = matching_network_domains[0]
- else:
- network_domain = None
-
- if network_domain:
- return network_domain
-
- raise UnknownNetworkError("Network '%s' could not be found" % locator)
-
- def get_vlan(self, locator, location, network_domain):
- """
- Get a VLAN object by its name or id
- """
- if is_uuid(locator):
- vlan = self.driver.ex_get_vlan(locator)
- else:
- matching_vlans = [
- vlan for vlan in self.driver.ex_list_vlans(location, network_domain)
- if vlan.name == locator
- ]
-
- if matching_vlans:
- vlan = matching_vlans[0]
- else:
- vlan = None
-
- if vlan:
- return vlan
-
- raise UnknownVLANError("VLAN '%s' could not be found" % locator)
-
- @staticmethod
- def argument_spec(**additional_argument_spec):
- """
- Build an argument specification for a Dimension Data module.
- :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
- :return: A dict containing the argument specification.
- """
-
- spec = dict(
- region=dict(type='str', default='na'),
- mcp_user=dict(type='str', required=False),
- mcp_password=dict(type='str', required=False, no_log=True),
- location=dict(type='str', required=True),
- validate_certs=dict(type='bool', required=False, default=True)
- )
-
- if additional_argument_spec:
- spec.update(additional_argument_spec)
-
- return spec
-
- @staticmethod
- def argument_spec_with_wait(**additional_argument_spec):
- """
- Build an argument specification for a Dimension Data module that includes "wait for completion" arguments.
- :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
- :return: A dict containing the argument specification.
- """
-
- spec = DimensionDataModule.argument_spec(
- wait=dict(type='bool', required=False, default=False),
- wait_time=dict(type='int', required=False, default=600),
- wait_poll_interval=dict(type='int', required=False, default=2)
- )
-
- if additional_argument_spec:
- spec.update(additional_argument_spec)
-
- return spec
-
- @staticmethod
- def required_together(*additional_required_together):
- """
- Get the basic argument specification for Dimension Data modules indicating which arguments are must be specified together.
- :param additional_required_together: An optional list representing the specification for additional module arguments that must be specified together.
- :return: An array containing the argument specifications.
- """
-
- required_together = [
- ['mcp_user', 'mcp_password']
- ]
-
- if additional_required_together:
- required_together.extend(additional_required_together)
-
- return required_together
-
-
-class LibcloudNotFound(Exception):
- """
- Exception raised when Apache libcloud cannot be found.
- """
-
- pass
-
-
-class MissingCredentialsError(Exception):
- """
- Exception raised when credentials for Dimension Data CloudControl cannot be found.
- """
-
- pass
-
-
-class UnknownNetworkError(Exception):
- """
- Exception raised when a network or network domain cannot be found.
- """
-
- pass
-
-
-class UnknownVLANError(Exception):
- """
- Exception raised when a VLAN cannot be found.
- """
-
- pass
-
-
-def get_dd_regions():
- """
- Get the list of available regions whose vendor is Dimension Data.
- """
-
- # Get endpoints
- all_regions = API_ENDPOINTS.keys()
-
- # Only Dimension Data endpoints (no prefix)
- regions = [region[3:] for region in all_regions if region.startswith('dd-')]
-
- return regions
-
-
-def is_uuid(u, version=4):
- """
- Test if valid v4 UUID
- """
- try:
- uuid_obj = UUID(u, version=version)
-
- return str(uuid_obj) == u
- except ValueError:
- return False
diff --git a/lib/ansible/module_utils/docker/__init__.py b/lib/ansible/module_utils/docker/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/docker/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/docker/common.py b/lib/ansible/module_utils/docker/common.py
deleted file mode 100644
index 03307250d6..0000000000
--- a/lib/ansible/module_utils/docker/common.py
+++ /dev/null
@@ -1,1022 +0,0 @@
-#
-# Copyright 2016 Red Hat | Ansible
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-import os
-import platform
-import re
-import sys
-from datetime import timedelta
-from distutils.version import LooseVersion
-
-
-from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
-from ansible.module_utils.common._collections_compat import Mapping, Sequence
-from ansible.module_utils.six import string_types
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
-
-HAS_DOCKER_PY = True
-HAS_DOCKER_PY_2 = False
-HAS_DOCKER_PY_3 = False
-HAS_DOCKER_ERROR = None
-
-try:
- from requests.exceptions import SSLError
- from docker import __version__ as docker_version
- from docker.errors import APIError, NotFound, TLSParameterError
- from docker.tls import TLSConfig
- from docker import auth
-
- if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
- HAS_DOCKER_PY_3 = True
- from docker import APIClient as Client
- elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
- HAS_DOCKER_PY_2 = True
- from docker import APIClient as Client
- else:
- from docker import Client
-
-except ImportError as exc:
- HAS_DOCKER_ERROR = str(exc)
- HAS_DOCKER_PY = False
-
-
-# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used
-# to ensure the user does not have both ``docker`` and ``docker-py`` modules
-# installed, as they utilize the same namespace are are incompatible
-try:
- # docker (Docker SDK for Python >= 2.0.0)
- import docker.models # noqa: F401
- HAS_DOCKER_MODELS = True
-except ImportError:
- HAS_DOCKER_MODELS = False
-
-try:
- # docker-py (Docker SDK for Python < 2.0.0)
- import docker.ssladapter # noqa: F401
- HAS_DOCKER_SSLADAPTER = True
-except ImportError:
- HAS_DOCKER_SSLADAPTER = False
-
-
-try:
- from requests.exceptions import RequestException
-except ImportError:
- # Either docker-py is no longer using requests, or docker-py isn't around either,
- # or docker-py's dependency requests is missing. In any case, define an exception
- # class RequestException so that our code doesn't break.
- class RequestException(Exception):
- pass
-
-
-DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
-DEFAULT_TLS = False
-DEFAULT_TLS_VERIFY = False
-DEFAULT_TLS_HOSTNAME = 'localhost'
-MIN_DOCKER_VERSION = "1.8.0"
-DEFAULT_TIMEOUT_SECONDS = 60
-
-DOCKER_COMMON_ARGS = dict(
- docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
- tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
- api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
- timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
- ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']),
- client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
- client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
- ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
- tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
- validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
- debug=dict(type='bool', default=False)
-)
-
-DOCKER_MUTUALLY_EXCLUSIVE = []
-
-DOCKER_REQUIRED_TOGETHER = [
- ['client_cert', 'client_key']
-]
-
-DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
-EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+'
-BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
-
-
-if not HAS_DOCKER_PY:
- docker_version = None
-
- # No Docker SDK for Python. Create a place holder client to allow
- # instantiation of AnsibleModule and proper error handing
- class Client(object): # noqa: F811
- def __init__(self, **kwargs):
- pass
-
- class APIError(Exception): # noqa: F811
- pass
-
- class NotFound(Exception): # noqa: F811
- pass
-
-
-def is_image_name_id(name):
- """Check whether the given image name is in fact an image ID (hash)."""
- if re.match('^sha256:[0-9a-fA-F]{64}$', name):
- return True
- return False
-
-
-def is_valid_tag(tag, allow_empty=False):
- """Check whether the given string is a valid docker tag name."""
- if not tag:
- return allow_empty
- # See here ("Extended description") for a definition what tags can be:
- # https://docs.docker.com/engine/reference/commandline/tag/
- return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag))
-
-
-def sanitize_result(data):
- """Sanitize data object for return to Ansible.
-
- When the data object contains types such as docker.types.containers.HostConfig,
- Ansible will fail when these are returned via exit_json or fail_json.
- HostConfig is derived from dict, but its constructor requires additional
- arguments. This function sanitizes data structures by recursively converting
- everything derived from dict to dict and everything derived from list (and tuple)
- to a list.
- """
- if isinstance(data, dict):
- return dict((k, sanitize_result(v)) for k, v in data.items())
- elif isinstance(data, (list, tuple)):
- return [sanitize_result(v) for v in data]
- else:
- return data
-
-
-class DockerBaseClass(object):
-
- def __init__(self):
- self.debug = False
-
- def log(self, msg, pretty_print=False):
- pass
- # if self.debug:
- # log_file = open('docker.log', 'a')
- # if pretty_print:
- # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
- # log_file.write(u'\n')
- # else:
- # log_file.write(msg + u'\n')
-
-
-def update_tls_hostname(result):
- if result['tls_hostname'] is None:
- # get default machine name from the url
- parsed_url = urlparse(result['docker_host'])
- if ':' in parsed_url.netloc:
- result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
- else:
- result['tls_hostname'] = parsed_url
-
-
-def _get_tls_config(fail_function, **kwargs):
- try:
- tls_config = TLSConfig(**kwargs)
- return tls_config
- except TLSParameterError as exc:
- fail_function("TLS config error: %s" % exc)
-
-
-def get_connect_params(auth, fail_function):
- if auth['tls'] or auth['tls_verify']:
- auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
-
- if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
- # TLS with certs and host verification
- if auth['cacert_path']:
- tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
- ca_cert=auth['cacert_path'],
- verify=True,
- assert_hostname=auth['tls_hostname'],
- ssl_version=auth['ssl_version'],
- fail_function=fail_function)
- else:
- tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
- verify=True,
- assert_hostname=auth['tls_hostname'],
- ssl_version=auth['ssl_version'],
- fail_function=fail_function)
-
- return dict(base_url=auth['docker_host'],
- tls=tls_config,
- version=auth['api_version'],
- timeout=auth['timeout'])
-
- if auth['tls_verify'] and auth['cacert_path']:
- # TLS with cacert only
- tls_config = _get_tls_config(ca_cert=auth['cacert_path'],
- assert_hostname=auth['tls_hostname'],
- verify=True,
- ssl_version=auth['ssl_version'],
- fail_function=fail_function)
- return dict(base_url=auth['docker_host'],
- tls=tls_config,
- version=auth['api_version'],
- timeout=auth['timeout'])
-
- if auth['tls_verify']:
- # TLS with verify and no certs
- tls_config = _get_tls_config(verify=True,
- assert_hostname=auth['tls_hostname'],
- ssl_version=auth['ssl_version'],
- fail_function=fail_function)
- return dict(base_url=auth['docker_host'],
- tls=tls_config,
- version=auth['api_version'],
- timeout=auth['timeout'])
-
- if auth['tls'] and auth['cert_path'] and auth['key_path']:
- # TLS with certs and no host verification
- tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
- verify=False,
- ssl_version=auth['ssl_version'],
- fail_function=fail_function)
- return dict(base_url=auth['docker_host'],
- tls=tls_config,
- version=auth['api_version'],
- timeout=auth['timeout'])
-
- if auth['tls']:
- # TLS with no certs and not host verification
- tls_config = _get_tls_config(verify=False,
- ssl_version=auth['ssl_version'],
- fail_function=fail_function)
- return dict(base_url=auth['docker_host'],
- tls=tls_config,
- version=auth['api_version'],
- timeout=auth['timeout'])
-
- # No TLS
- return dict(base_url=auth['docker_host'],
- version=auth['api_version'],
- timeout=auth['timeout'])
-
-
-DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
-DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
-DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. "
- "Hint: if you do not need Python 2.6 support, try "
- "`pip uninstall docker-py` instead, followed by `pip install docker`.")
-
-
-class AnsibleDockerClient(Client):
-
- def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
- required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION,
- min_docker_api_version=None, option_minimal_versions=None,
- option_minimal_versions_ignore_params=None, fail_results=None):
-
- # Modules can put information in here which will always be returned
- # in case client.fail() is called.
- self.fail_results = fail_results or {}
-
- merged_arg_spec = dict()
- merged_arg_spec.update(DOCKER_COMMON_ARGS)
- if argument_spec:
- merged_arg_spec.update(argument_spec)
- self.arg_spec = merged_arg_spec
-
- mutually_exclusive_params = []
- mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
- if mutually_exclusive:
- mutually_exclusive_params += mutually_exclusive
-
- required_together_params = []
- required_together_params += DOCKER_REQUIRED_TOGETHER
- if required_together:
- required_together_params += required_together
-
- self.module = AnsibleModule(
- argument_spec=merged_arg_spec,
- supports_check_mode=supports_check_mode,
- mutually_exclusive=mutually_exclusive_params,
- required_together=required_together_params,
- required_if=required_if)
-
- NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
-
- self.docker_py_version = LooseVersion(docker_version)
-
- if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
- self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
- "SDK for Python) installed together as they use the same namespace and cause a corrupt "
- "installation. Please uninstall both packages, and re-install only the docker-py or docker "
- "python module (for %s's Python %s). It is recommended to install the docker module if no "
- "support for Python 2.6 is required. Please note that simply uninstalling one of the modules "
- "can leave the other module in a broken state." % (platform.node(), sys.executable))
-
- if not HAS_DOCKER_PY:
- if NEEDS_DOCKER_PY2:
- msg = missing_required_lib("Docker SDK for Python: docker")
- msg = msg + ", for example via `pip install docker`. The error was: %s"
- else:
- msg = missing_required_lib("Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)")
- msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s"
- self.fail(msg % HAS_DOCKER_ERROR)
-
- if self.docker_py_version < LooseVersion(min_docker_version):
- msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
- if not NEEDS_DOCKER_PY2:
- # The minimal required version is < 2.0 (and the current version as well).
- # Advertise docker (instead of docker-py) for non-Python-2.6 users.
- msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
- elif docker_version < LooseVersion('2.0'):
- msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
- else:
- msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
- self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
-
- self.debug = self.module.params.get('debug')
- self.check_mode = self.module.check_mode
- self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
-
- try:
- super(AnsibleDockerClient, self).__init__(**self._connect_params)
- self.docker_api_version_str = self.version()['ApiVersion']
- except APIError as exc:
- self.fail("Docker API error: %s" % exc)
- except Exception as exc:
- self.fail("Error connecting: %s" % exc)
-
- self.docker_api_version = LooseVersion(self.docker_api_version_str)
- if min_docker_api_version is not None:
- if self.docker_api_version < LooseVersion(min_docker_api_version):
- self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
-
- if option_minimal_versions is not None:
- self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
-
- def log(self, msg, pretty_print=False):
- pass
- # if self.debug:
- # log_file = open('docker.log', 'a')
- # if pretty_print:
- # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
- # log_file.write(u'\n')
- # else:
- # log_file.write(msg + u'\n')
-
- def fail(self, msg, **kwargs):
- self.fail_results.update(kwargs)
- self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
-
- @staticmethod
- def _get_value(param_name, param_value, env_variable, default_value):
- if param_value is not None:
- # take module parameter value
- if param_value in BOOLEANS_TRUE:
- return True
- if param_value in BOOLEANS_FALSE:
- return False
- return param_value
-
- if env_variable is not None:
- env_value = os.environ.get(env_variable)
- if env_value is not None:
- # take the env variable value
- if param_name == 'cert_path':
- return os.path.join(env_value, 'cert.pem')
- if param_name == 'cacert_path':
- return os.path.join(env_value, 'ca.pem')
- if param_name == 'key_path':
- return os.path.join(env_value, 'key.pem')
- if env_value in BOOLEANS_TRUE:
- return True
- if env_value in BOOLEANS_FALSE:
- return False
- return env_value
-
- # take the default
- return default_value
-
- @property
- def auth_params(self):
- # Get authentication credentials.
- # Precedence: module parameters-> environment variables-> defaults.
-
- self.log('Getting credentials')
-
- params = dict()
- for key in DOCKER_COMMON_ARGS:
- params[key] = self.module.params.get(key)
-
- if self.module.params.get('use_tls'):
- # support use_tls option in docker_image.py. This will be deprecated.
- use_tls = self.module.params.get('use_tls')
- if use_tls == 'encrypt':
- params['tls'] = True
- if use_tls == 'verify':
- params['validate_certs'] = True
-
- result = dict(
- docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
- DEFAULT_DOCKER_HOST),
- tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
- 'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME),
- api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
- 'auto'),
- cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None),
- cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None),
- key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None),
- ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
- tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
- tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
- DEFAULT_TLS_VERIFY),
- timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
- DEFAULT_TIMEOUT_SECONDS),
- )
-
- update_tls_hostname(result)
-
- return result
-
- def _handle_ssl_error(self, error):
- match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
- if match:
- self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
- "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
- "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
- "setting the `tls` parameter to true."
- % (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
- self.fail("SSL Exception: %s" % (error))
-
- def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
- self.option_minimal_versions = dict()
- for option in self.module.argument_spec:
- if ignore_params is not None:
- if option in ignore_params:
- continue
- self.option_minimal_versions[option] = dict()
- self.option_minimal_versions.update(option_minimal_versions)
-
- for option, data in self.option_minimal_versions.items():
- # Test whether option is supported, and store result
- support_docker_py = True
- support_docker_api = True
- if 'docker_py_version' in data:
- support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
- if 'docker_api_version' in data:
- support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
- data['supported'] = support_docker_py and support_docker_api
- # Fail if option is not supported but used
- if not data['supported']:
- # Test whether option is specified
- if 'detect_usage' in data:
- used = data['detect_usage'](self)
- else:
- used = self.module.params.get(option) is not None
- if used and 'default' in self.module.argument_spec[option]:
- used = self.module.params[option] != self.module.argument_spec[option]['default']
- if used:
- # If the option is used, compose error message.
- if 'usage_msg' in data:
- usg = data['usage_msg']
- else:
- usg = 'set %s option' % (option, )
- if not support_docker_api:
- msg = 'Docker API version is %s. Minimum version required is %s to %s.'
- msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
- elif not support_docker_py:
- msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
- if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
- msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
- elif self.docker_py_version < LooseVersion('2.0.0'):
- msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
- else:
- msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
- msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
- else:
- # should not happen
- msg = 'Cannot %s with your configuration.' % (usg, )
- self.fail(msg)
-
- def get_container_by_id(self, container_id):
- try:
- self.log("Inspecting container Id %s" % container_id)
- result = self.inspect_container(container=container_id)
- self.log("Completed container inspection")
- return result
- except NotFound as dummy:
- return None
- except Exception as exc:
- self.fail("Error inspecting container: %s" % exc)
-
- def get_container(self, name=None):
- '''
- Lookup a container and return the inspection results.
- '''
- if name is None:
- return None
-
- search_name = name
- if not name.startswith('/'):
- search_name = '/' + name
-
- result = None
- try:
- for container in self.containers(all=True):
- self.log("testing container: %s" % (container['Names']))
- if isinstance(container['Names'], list) and search_name in container['Names']:
- result = container
- break
- if container['Id'].startswith(name):
- result = container
- break
- if container['Id'] == name:
- result = container
- break
- except SSLError as exc:
- self._handle_ssl_error(exc)
- except Exception as exc:
- self.fail("Error retrieving container list: %s" % exc)
-
- if result is None:
- return None
-
- return self.get_container_by_id(result['Id'])
-
- def get_network(self, name=None, network_id=None):
- '''
- Lookup a network and return the inspection results.
- '''
- if name is None and network_id is None:
- return None
-
- result = None
-
- if network_id is None:
- try:
- for network in self.networks():
- self.log("testing network: %s" % (network['Name']))
- if name == network['Name']:
- result = network
- break
- if network['Id'].startswith(name):
- result = network
- break
- except SSLError as exc:
- self._handle_ssl_error(exc)
- except Exception as exc:
- self.fail("Error retrieving network list: %s" % exc)
-
- if result is not None:
- network_id = result['Id']
-
- if network_id is not None:
- try:
- self.log("Inspecting network Id %s" % network_id)
- result = self.inspect_network(network_id)
- self.log("Completed network inspection")
- except NotFound as dummy:
- return None
- except Exception as exc:
- self.fail("Error inspecting network: %s" % exc)
-
- return result
-
- def find_image(self, name, tag):
- '''
- Lookup an image (by name and tag) and return the inspection results.
- '''
- if not name:
- return None
-
- self.log("Find image %s:%s" % (name, tag))
- images = self._image_lookup(name, tag)
- if not images:
- # In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
- registry, repo_name = auth.resolve_repository_name(name)
- if registry == 'docker.io':
- # If docker.io is explicitly there in name, the image
- # isn't found in some cases (#41509)
- self.log("Check for docker.io image: %s" % repo_name)
- images = self._image_lookup(repo_name, tag)
- if not images and repo_name.startswith('library/'):
- # Sometimes library/xxx images are not found
- lookup = repo_name[len('library/'):]
- self.log("Check for docker.io image: %s" % lookup)
- images = self._image_lookup(lookup, tag)
- if not images:
- # Last case: if docker.io wasn't there, it can be that
- # the image wasn't found either (#15586)
- lookup = "%s/%s" % (registry, repo_name)
- self.log("Check for docker.io image: %s" % lookup)
- images = self._image_lookup(lookup, tag)
-
- if len(images) > 1:
- self.fail("Registry returned more than one result for %s:%s" % (name, tag))
-
- if len(images) == 1:
- try:
- inspection = self.inspect_image(images[0]['Id'])
- except Exception as exc:
- self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
- return inspection
-
- self.log("Image %s:%s not found." % (name, tag))
- return None
-
- def find_image_by_id(self, image_id):
- '''
- Lookup an image (by ID) and return the inspection results.
- '''
- if not image_id:
- return None
-
- self.log("Find image %s (by ID)" % image_id)
- try:
- inspection = self.inspect_image(image_id)
- except Exception as exc:
- self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
- return inspection
-
- def _image_lookup(self, name, tag):
- '''
- Including a tag in the name parameter sent to the Docker SDK for Python images method
- does not work consistently. Instead, get the result set for name and manually check
- if the tag exists.
- '''
- try:
- response = self.images(name=name)
- except Exception as exc:
- self.fail("Error searching for image %s - %s" % (name, str(exc)))
- images = response
- if tag:
- lookup = "%s:%s" % (name, tag)
- lookup_digest = "%s@%s" % (name, tag)
- images = []
- for image in response:
- tags = image.get('RepoTags')
- digests = image.get('RepoDigests')
- if (tags and lookup in tags) or (digests and lookup_digest in digests):
- images = [image]
- break
- return images
-
- def pull_image(self, name, tag="latest"):
- '''
- Pull an image
- '''
- self.log("Pulling image %s:%s" % (name, tag))
- old_tag = self.find_image(name, tag)
- try:
- for line in self.pull(name, tag=tag, stream=True, decode=True):
- self.log(line, pretty_print=True)
- if line.get('error'):
- if line.get('errorDetail'):
- error_detail = line.get('errorDetail')
- self.fail("Error pulling %s - code: %s message: %s" % (name,
- error_detail.get('code'),
- error_detail.get('message')))
- else:
- self.fail("Error pulling %s - %s" % (name, line.get('error')))
- except Exception as exc:
- self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
-
- new_tag = self.find_image(name, tag)
-
- return new_tag, old_tag == new_tag
-
- def report_warnings(self, result, warnings_key=None):
- '''
- Checks result of client operation for warnings, and if present, outputs them.
-
- warnings_key should be a list of keys used to crawl the result dictionary.
- For example, if warnings_key == ['a', 'b'], the function will consider
- result['a']['b'] if these keys exist. If the result is a non-empty string, it
- will be reported as a warning. If the result is a list, every entry will be
- reported as a warning.
-
- In most cases (if warnings are returned at all), warnings_key should be
- ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
- '''
- if warnings_key is None:
- warnings_key = ['Warnings']
- for key in warnings_key:
- if not isinstance(result, Mapping):
- return
- result = result.get(key)
- if isinstance(result, Sequence):
- for warning in result:
- self.module.warn('Docker warning: {0}'.format(warning))
- elif isinstance(result, string_types) and result:
- self.module.warn('Docker warning: {0}'.format(result))
-
- def inspect_distribution(self, image, **kwargs):
- '''
- Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
- since prior versions did not support accessing private repositories.
- '''
- if self.docker_py_version < LooseVersion('4.0.0'):
- registry = auth.resolve_repository_name(image)[0]
- header = auth.get_config_header(self, registry)
- if header:
- return self._result(self._get(
- self._url('/distribution/{0}/json', image),
- headers={'X-Registry-Auth': header}
- ), json=True)
- return super(AnsibleDockerClient, self).inspect_distribution(image, **kwargs)
-
-
-def compare_dict_allow_more_present(av, bv):
- '''
- Compare two dictionaries for whether every entry of the first is in the second.
- '''
- for key, value in av.items():
- if key not in bv:
- return False
- if bv[key] != value:
- return False
- return True
-
-
-def compare_generic(a, b, method, datatype):
- '''
- Compare values a and b as described by method and datatype.
-
- Returns ``True`` if the values compare equal, and ``False`` if not.
-
- ``a`` is usually the module's parameter, while ``b`` is a property
- of the current object. ``a`` must not be ``None`` (except for
- ``datatype == 'value'``).
-
- Valid values for ``method`` are:
- - ``ignore`` (always compare as equal);
- - ``strict`` (only compare if really equal)
- - ``allow_more_present`` (allow b to have elements which a does not have).
-
- Valid values for ``datatype`` are:
- - ``value``: for simple values (strings, numbers, ...);
- - ``list``: for ``list``s or ``tuple``s where order matters;
- - ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
- matter;
- - ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
- not matter and which contain ``dict``s; ``allow_more_present`` is used
- for the ``dict``s, and these are assumed to be dictionaries of values;
- - ``dict``: for dictionaries of values.
- '''
- if method == 'ignore':
- return True
- # If a or b is None:
- if a is None or b is None:
- # If both are None: equality
- if a == b:
- return True
- # Otherwise, not equal for values, and equal
- # if the other is empty for set/list/dict
- if datatype == 'value':
- return False
- # For allow_more_present, allow a to be None
- if method == 'allow_more_present' and a is None:
- return True
- # Otherwise, the iterable object which is not None must have length 0
- return len(b if a is None else a) == 0
- # Do proper comparison (both objects not None)
- if datatype == 'value':
- return a == b
- elif datatype == 'list':
- if method == 'strict':
- return a == b
- else:
- i = 0
- for v in a:
- while i < len(b) and b[i] != v:
- i += 1
- if i == len(b):
- return False
- i += 1
- return True
- elif datatype == 'dict':
- if method == 'strict':
- return a == b
- else:
- return compare_dict_allow_more_present(a, b)
- elif datatype == 'set':
- set_a = set(a)
- set_b = set(b)
- if method == 'strict':
- return set_a == set_b
- else:
- return set_b >= set_a
- elif datatype == 'set(dict)':
- for av in a:
- found = False
- for bv in b:
- if compare_dict_allow_more_present(av, bv):
- found = True
- break
- if not found:
- return False
- if method == 'strict':
- # If we would know that both a and b do not contain duplicates,
- # we could simply compare len(a) to len(b) to finish this test.
- # We can assume that b has no duplicates (as it is returned by
- # docker), but we don't know for a.
- for bv in b:
- found = False
- for av in a:
- if compare_dict_allow_more_present(av, bv):
- found = True
- break
- if not found:
- return False
- return True
-
-
-class DifferenceTracker(object):
- def __init__(self):
- self._diff = []
-
- def add(self, name, parameter=None, active=None):
- self._diff.append(dict(
- name=name,
- parameter=parameter,
- active=active,
- ))
-
- def merge(self, other_tracker):
- self._diff.extend(other_tracker._diff)
-
- @property
- def empty(self):
- return len(self._diff) == 0
-
- def get_before_after(self):
- '''
- Return texts ``before`` and ``after``.
- '''
- before = dict()
- after = dict()
- for item in self._diff:
- before[item['name']] = item['active']
- after[item['name']] = item['parameter']
- return before, after
-
- def has_difference_for(self, name):
- '''
- Returns a boolean if a difference exists for name
- '''
- return any(diff for diff in self._diff if diff['name'] == name)
-
- def get_legacy_docker_container_diffs(self):
- '''
- Return differences in the docker_container legacy format.
- '''
- result = []
- for entry in self._diff:
- item = dict()
- item[entry['name']] = dict(
- parameter=entry['parameter'],
- container=entry['active'],
- )
- result.append(item)
- return result
-
- def get_legacy_docker_diffs(self):
- '''
- Return differences in the docker_container legacy format.
- '''
- result = [entry['name'] for entry in self._diff]
- return result
-
-
-def clean_dict_booleans_for_docker_api(data):
- '''
- Go doesn't like Python booleans 'True' or 'False', while Ansible is just
- fine with them in YAML. As such, they need to be converted in cases where
- we pass dictionaries to the Docker API (e.g. docker_network's
- driver_options and docker_prune's filters).
- '''
- result = dict()
- if data is not None:
- for k, v in data.items():
- if v is True:
- v = 'true'
- elif v is False:
- v = 'false'
- else:
- v = str(v)
- result[str(k)] = v
- return result
-
-
-def convert_duration_to_nanosecond(time_str):
- """
- Return time duration in nanosecond.
- """
- if not isinstance(time_str, str):
- raise ValueError('Missing unit in duration - %s' % time_str)
-
- regex = re.compile(
- r'^(((?P<hours>\d+)h)?'
- r'((?P<minutes>\d+)m(?!s))?'
- r'((?P<seconds>\d+)s)?'
- r'((?P<milliseconds>\d+)ms)?'
- r'((?P<microseconds>\d+)us)?)$'
- )
- parts = regex.match(time_str)
-
- if not parts:
- raise ValueError('Invalid time duration - %s' % time_str)
-
- parts = parts.groupdict()
- time_params = {}
- for (name, value) in parts.items():
- if value:
- time_params[name] = int(value)
-
- delta = timedelta(**time_params)
- time_in_nanoseconds = (
- delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
- ) * 10 ** 3
-
- return time_in_nanoseconds
-
-
-def parse_healthcheck(healthcheck):
- """
- Return dictionary of healthcheck parameters and boolean if
- healthcheck defined in image was requested to be disabled.
- """
- if (not healthcheck) or (not healthcheck.get('test')):
- return None, None
-
- result = dict()
-
- # All supported healthcheck parameters
- options = dict(
- test='test',
- interval='interval',
- timeout='timeout',
- start_period='start_period',
- retries='retries'
- )
-
- duration_options = ['interval', 'timeout', 'start_period']
-
- for (key, value) in options.items():
- if value in healthcheck:
- if healthcheck.get(value) is None:
- # due to recursive argument_spec, all keys are always present
- # (but have default value None if not specified)
- continue
- if value in duration_options:
- time = convert_duration_to_nanosecond(healthcheck.get(value))
- if time:
- result[key] = time
- elif healthcheck.get(value):
- result[key] = healthcheck.get(value)
- if key == 'test':
- if isinstance(result[key], (tuple, list)):
- result[key] = [str(e) for e in result[key]]
- else:
- result[key] = ['CMD-SHELL', str(result[key])]
- elif key == 'retries':
- try:
- result[key] = int(result[key])
- except ValueError:
- raise ValueError(
- 'Cannot parse number of retries for healthcheck. '
- 'Expected an integer, got "{0}".'.format(result[key])
- )
-
- if result['test'] == ['NONE']:
- # If the user explicitly disables the healthcheck, return None
- # as the healthcheck object, and set disable_healthcheck to True
- return None, True
-
- return result, False
-
-
-def omit_none_from_dict(d):
- """
- Return a copy of the dictionary with all keys with value None omitted.
- """
- return dict((k, v) for (k, v) in d.items() if v is not None)
diff --git a/lib/ansible/module_utils/docker/swarm.py b/lib/ansible/module_utils/docker/swarm.py
deleted file mode 100644
index 55d94db06b..0000000000
--- a/lib/ansible/module_utils/docker/swarm.py
+++ /dev/null
@@ -1,280 +0,0 @@
-# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
-# (c) Thierry Bouvet (@tbouvet)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-import json
-from time import sleep
-
-try:
- from docker.errors import APIError, NotFound
-except ImportError:
- # missing Docker SDK for Python handled in ansible.module_utils.docker.common
- pass
-
-from ansible.module_utils._text import to_native
-from ansible.module_utils.docker.common import (
- AnsibleDockerClient,
- LooseVersion,
-)
-
-
-class AnsibleDockerSwarmClient(AnsibleDockerClient):
-
- def __init__(self, **kwargs):
- super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
-
- def get_swarm_node_id(self):
- """
- Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
- of Docker host the module is executed on
- :return:
- NodeID of host or 'None' if not part of Swarm
- """
-
- try:
- info = self.info()
- except APIError as exc:
- self.fail("Failed to get node information for %s" % to_native(exc))
-
- if info:
- json_str = json.dumps(info, ensure_ascii=False)
- swarm_info = json.loads(json_str)
- if swarm_info['Swarm']['NodeID']:
- return swarm_info['Swarm']['NodeID']
- return None
-
- def check_if_swarm_node(self, node_id=None):
- """
- Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
- system information looking if specific key in output exists. If 'node_id' is provided then it tries to
- read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
- it is not executed on Swarm manager
-
- :param node_id: Node identifier
- :return:
- bool: True if node is part of Swarm, False otherwise
- """
-
- if node_id is None:
- try:
- info = self.info()
- except APIError:
- self.fail("Failed to get host information.")
-
- if info:
- json_str = json.dumps(info, ensure_ascii=False)
- swarm_info = json.loads(json_str)
- if swarm_info['Swarm']['NodeID']:
- return True
- if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
- return True
- return False
- else:
- try:
- node_info = self.get_node_inspect(node_id=node_id)
- except APIError:
- return
-
- if node_info['ID'] is not None:
- return True
- return False
-
- def check_if_swarm_manager(self):
- """
- Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
- is performed. The inspect_swarm() will fail if node is not a manager
-
- :return: True if node is Swarm Manager, False otherwise
- """
-
- try:
- self.inspect_swarm()
- return True
- except APIError:
- return False
-
- def fail_task_if_not_swarm_manager(self):
- """
- If host is not a swarm manager then Ansible task on this host should end with 'failed' state
- """
- if not self.check_if_swarm_manager():
- self.fail("Error running docker swarm module: must run on swarm manager node")
-
- def check_if_swarm_worker(self):
- """
- Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
- is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
-
- :return: True if node is Swarm Worker, False otherwise
- """
-
- if self.check_if_swarm_node() and not self.check_if_swarm_manager():
- return True
- return False
-
- def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
- """
- Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
- node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
- host that is not part of Swarm it will fail the playbook
-
- :param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
- :param node_id: node ID or name, if None then method will try to get node_id of host module run on
- :return:
- True if node is part of swarm but its state is down, False otherwise
- """
-
- if repeat_check < 1:
- repeat_check = 1
-
- if node_id is None:
- node_id = self.get_swarm_node_id()
-
- for retry in range(0, repeat_check):
- if retry > 0:
- sleep(5)
- node_info = self.get_node_inspect(node_id=node_id)
- if node_info['Status']['State'] == 'down':
- return True
- return False
-
- def get_node_inspect(self, node_id=None, skip_missing=False):
- """
- Returns Swarm node info as in 'docker node inspect' command about single node
-
- :param skip_missing: if True then function will return None instead of failing the task
- :param node_id: node ID or name, if None then method will try to get node_id of host module run on
- :return:
- Single node information structure
- """
-
- if node_id is None:
- node_id = self.get_swarm_node_id()
-
- if node_id is None:
- self.fail("Failed to get node information.")
-
- try:
- node_info = self.inspect_node(node_id=node_id)
- except APIError as exc:
- if exc.status_code == 503:
- self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
- if exc.status_code == 404:
- if skip_missing:
- return None
- self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
- except Exception as exc:
- self.fail("Error inspecting swarm node: %s" % exc)
-
- json_str = json.dumps(node_info, ensure_ascii=False)
- node_info = json.loads(json_str)
-
- if 'ManagerStatus' in node_info:
- if node_info['ManagerStatus'].get('Leader'):
- # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
- # Check moby/moby#35437 for details
- count_colons = node_info['ManagerStatus']['Addr'].count(":")
- if count_colons == 1:
- swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
- else:
- swarm_leader_ip = node_info['Status']['Addr']
- node_info['Status']['Addr'] = swarm_leader_ip
- return node_info
-
- def get_all_nodes_inspect(self):
- """
- Returns Swarm node info as in 'docker node inspect' command about all registered nodes
-
- :return:
- Structure with information about all nodes
- """
- try:
- node_info = self.nodes()
- except APIError as exc:
- if exc.status_code == 503:
- self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
- self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
- except Exception as exc:
- self.fail("Error inspecting swarm node: %s" % exc)
-
- json_str = json.dumps(node_info, ensure_ascii=False)
- node_info = json.loads(json_str)
- return node_info
-
- def get_all_nodes_list(self, output='short'):
- """
- Returns list of nodes registered in Swarm
-
- :param output: Defines format of returned data
- :return:
- If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
- if 'output' is 'long' then returns data is list of dict containing the attributes as in
- output of command 'docker node ls'
- """
- nodes_list = []
-
- nodes_inspect = self.get_all_nodes_inspect()
- if nodes_inspect is None:
- return None
-
- if output == 'short':
- for node in nodes_inspect:
- nodes_list.append(node['Description']['Hostname'])
- elif output == 'long':
- for node in nodes_inspect:
- node_property = {}
-
- node_property.update({'ID': node['ID']})
- node_property.update({'Hostname': node['Description']['Hostname']})
- node_property.update({'Status': node['Status']['State']})
- node_property.update({'Availability': node['Spec']['Availability']})
- if 'ManagerStatus' in node:
- if node['ManagerStatus']['Leader'] is True:
- node_property.update({'Leader': True})
- node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
- node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
-
- nodes_list.append(node_property)
- else:
- return None
-
- return nodes_list
-
- def get_node_name_by_id(self, nodeid):
- return self.get_node_inspect(nodeid)['Description']['Hostname']
-
- def get_unlock_key(self):
- if self.docker_py_version < LooseVersion('2.7.0'):
- return None
- return super(AnsibleDockerSwarmClient, self).get_unlock_key()
-
- def get_service_inspect(self, service_id, skip_missing=False):
- """
- Returns Swarm service info as in 'docker service inspect' command about single service
-
- :param service_id: service ID or name
- :param skip_missing: if True then function will return None instead of failing the task
- :return:
- Single service information structure
- """
- try:
- service_info = self.inspect_service(service_id)
- except NotFound as exc:
- if skip_missing is False:
- self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
- else:
- return None
- except APIError as exc:
- if exc.status_code == 503:
- self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
- self.fail("Error inspecting swarm service: %s" % exc)
- except Exception as exc:
- self.fail("Error inspecting swarm service: %s" % exc)
-
- json_str = json.dumps(service_info, ensure_ascii=False)
- service_info = json.loads(json_str)
- return service_info
diff --git a/lib/ansible/module_utils/exoscale.py b/lib/ansible/module_utils/exoscale.py
deleted file mode 100644
index e56f27144f..0000000000
--- a/lib/ansible/module_utils/exoscale.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2016, René Moser <mail@renemoser.net>
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import os
-
-from ansible.module_utils.six.moves import configparser
-from ansible.module_utils.six import integer_types, string_types
-from ansible.module_utils._text import to_native, to_text
-from ansible.module_utils.urls import fetch_url
-
-EXO_DNS_BASEURL = "https://api.exoscale.ch/dns/v1"
-
-
-def exo_dns_argument_spec():
- return dict(
- api_key=dict(default=os.environ.get('CLOUDSTACK_KEY'), no_log=True),
- api_secret=dict(default=os.environ.get('CLOUDSTACK_SECRET'), no_log=True),
- api_timeout=dict(type='int', default=os.environ.get('CLOUDSTACK_TIMEOUT') or 10),
- api_region=dict(default=os.environ.get('CLOUDSTACK_REGION') or 'cloudstack'),
- validate_certs=dict(default=True, type='bool'),
- )
-
-
-def exo_dns_required_together():
- return [['api_key', 'api_secret']]
-
-
-class ExoDns(object):
-
- def __init__(self, module):
- self.module = module
-
- self.api_key = self.module.params.get('api_key')
- self.api_secret = self.module.params.get('api_secret')
- if not (self.api_key and self.api_secret):
- try:
- region = self.module.params.get('api_region')
- config = self.read_config(ini_group=region)
- self.api_key = config['key']
- self.api_secret = config['secret']
- except Exception as e:
- self.module.fail_json(msg="Error while processing config: %s" % to_native(e))
-
- self.headers = {
- 'X-DNS-Token': "%s:%s" % (self.api_key, self.api_secret),
- 'Content-Type': 'application/json',
- 'Accept': 'application/json',
- }
- self.result = {
- 'changed': False,
- 'diff': {
- 'before': {},
- 'after': {},
- }
- }
-
- def read_config(self, ini_group=None):
- if not ini_group:
- ini_group = os.environ.get('CLOUDSTACK_REGION', 'cloudstack')
-
- keys = ['key', 'secret']
- env_conf = {}
- for key in keys:
- if 'CLOUDSTACK_%s' % key.upper() not in os.environ:
- break
- else:
- env_conf[key] = os.environ['CLOUDSTACK_%s' % key.upper()]
- else:
- return env_conf
-
- # Config file: $PWD/cloudstack.ini or $HOME/.cloudstack.ini
- # Last read wins in configparser
- paths = (
- os.path.join(os.path.expanduser('~'), '.cloudstack.ini'),
- os.path.join(os.getcwd(), 'cloudstack.ini'),
- )
- # Look at CLOUDSTACK_CONFIG first if present
- if 'CLOUDSTACK_CONFIG' in os.environ:
- paths += (os.path.expanduser(os.environ['CLOUDSTACK_CONFIG']),)
- if not any([os.path.exists(c) for c in paths]):
- self.module.fail_json(msg="Config file not found. Tried : %s" % ", ".join(paths))
-
- conf = configparser.ConfigParser()
- conf.read(paths)
- return dict(conf.items(ini_group))
-
- def api_query(self, resource="/domains", method="GET", data=None):
- url = EXO_DNS_BASEURL + resource
- if data:
- data = self.module.jsonify(data)
-
- response, info = fetch_url(
- module=self.module,
- url=url,
- data=data,
- method=method,
- headers=self.headers,
- timeout=self.module.params.get('api_timeout'),
- )
-
- if info['status'] not in (200, 201, 204):
- self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
-
- try:
- return self.module.from_json(to_text(response.read()))
-
- except Exception as e:
- self.module.fail_json(msg="Could not process response into json: %s" % to_native(e))
-
- def has_changed(self, want_dict, current_dict, only_keys=None):
- changed = False
- for key, value in want_dict.items():
- # Optionally limit by a list of keys
- if only_keys and key not in only_keys:
- continue
- # Skip None values
- if value is None:
- continue
- if key in current_dict:
- if isinstance(current_dict[key], integer_types):
- if value != current_dict[key]:
- self.result['diff']['before'][key] = current_dict[key]
- self.result['diff']['after'][key] = value
- changed = True
- elif isinstance(current_dict[key], string_types):
- if value.lower() != current_dict[key].lower():
- self.result['diff']['before'][key] = current_dict[key]
- self.result['diff']['after'][key] = value
- changed = True
- else:
- self.module.fail_json(msg="Unable to determine comparison for key %s" % key)
- else:
- self.result['diff']['after'][key] = value
- changed = True
- return changed
diff --git a/lib/ansible/module_utils/f5_utils.py b/lib/ansible/module_utils/f5_utils.py
deleted file mode 100644
index 4d6533baea..0000000000
--- a/lib/ansible/module_utils/f5_utils.py
+++ /dev/null
@@ -1,383 +0,0 @@
-#
-# Copyright 2016 F5 Networks Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-
-# Legacy
-
-try:
- import bigsuds
- bigsuds_found = True
-except ImportError:
- bigsuds_found = False
-
-
-from ansible.module_utils.basic import env_fallback
-
-
-def f5_argument_spec():
- return dict(
- server=dict(
- type='str',
- required=True,
- fallback=(env_fallback, ['F5_SERVER'])
- ),
- user=dict(
- type='str',
- required=True,
- fallback=(env_fallback, ['F5_USER'])
- ),
- password=dict(
- type='str',
- aliases=['pass', 'pwd'],
- required=True,
- no_log=True,
- fallback=(env_fallback, ['F5_PASSWORD'])
- ),
- validate_certs=dict(
- default='yes',
- type='bool',
- fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
- ),
- server_port=dict(
- type='int',
- default=443,
- fallback=(env_fallback, ['F5_SERVER_PORT'])
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- partition=dict(
- type='str',
- default='Common',
- fallback=(env_fallback, ['F5_PARTITION'])
- )
- )
-
-
-def f5_parse_arguments(module):
- if not bigsuds_found:
- module.fail_json(msg="the python bigsuds module is required")
-
- if module.params['validate_certs']:
- import ssl
- if not hasattr(ssl, 'SSLContext'):
- module.fail_json(
- msg="bigsuds does not support verifying certificates with python < 2.7.9."
- "Either update python or set validate_certs=False on the task'")
-
- return (
- module.params['server'],
- module.params['user'],
- module.params['password'],
- module.params['state'],
- module.params['partition'],
- module.params['validate_certs'],
- module.params['server_port']
- )
-
-
-def bigip_api(bigip, user, password, validate_certs, port=443):
- try:
- if bigsuds.__version__ >= '1.0.4':
- api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs, port=port)
- elif bigsuds.__version__ == '1.0.3':
- api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs)
- else:
- api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
- except TypeError:
- # bigsuds < 1.0.3, no verify param
- if validate_certs:
- # Note: verified we have SSLContext when we parsed params
- api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
- else:
- import ssl
- if hasattr(ssl, 'SSLContext'):
- # Really, you should never do this. It disables certificate
- # verification *globally*. But since older bigip libraries
- # don't give us a way to toggle verification we need to
- # disable it at the global level.
- # From https://www.python.org/dev/peps/pep-0476/#id29
- ssl._create_default_https_context = ssl._create_unverified_context
- api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
-
- return api
-
-
-# Fully Qualified name (with the partition)
-def fq_name(partition, name):
- if name is not None and not name.startswith('/'):
- return '/%s/%s' % (partition, name)
- return name
-
-
-# Fully Qualified name (with partition) for a list
-def fq_list_names(partition, list_names):
- if list_names is None:
- return None
- return map(lambda x: fq_name(partition, x), list_names)
-
-
-def to_commands(module, commands):
- spec = {
- 'command': dict(key=True),
- 'prompt': dict(),
- 'answer': dict()
- }
- transform = ComplexList(spec, module)
- return transform(commands)
-
-
-def run_commands(module, commands, check_rc=True):
- responses = list()
- commands = to_commands(module, to_list(commands))
- for cmd in commands:
- cmd = module.jsonify(cmd)
- rc, out, err = exec_command(module, cmd)
- if check_rc and rc != 0:
- module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc)
- responses.append(to_text(out, errors='surrogate_then_replace'))
- return responses
-
-
-# New style
-
-from abc import ABCMeta, abstractproperty
-from collections import defaultdict
-
-try:
- from f5.bigip import ManagementRoot as BigIpMgmt
- from f5.bigip.contexts import TransactionContextManager as BigIpTxContext
-
- from f5.bigiq import ManagementRoot as BigIqMgmt
-
- from f5.iworkflow import ManagementRoot as iWorkflowMgmt
- from icontrol.exceptions import iControlUnexpectedHTTPError
- HAS_F5SDK = True
-except ImportError:
- HAS_F5SDK = False
-
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six import iteritems, with_metaclass
-from ansible.module_utils.network.common.utils import to_list, ComplexList
-from ansible.module_utils.connection import exec_command
-from ansible.module_utils._text import to_text
-
-
-F5_COMMON_ARGS = dict(
- server=dict(
- type='str',
- required=True,
- fallback=(env_fallback, ['F5_SERVER'])
- ),
- user=dict(
- type='str',
- required=True,
- fallback=(env_fallback, ['F5_USER'])
- ),
- password=dict(
- type='str',
- aliases=['pass', 'pwd'],
- required=True,
- no_log=True,
- fallback=(env_fallback, ['F5_PASSWORD'])
- ),
- validate_certs=dict(
- default='yes',
- type='bool',
- fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
- ),
- server_port=dict(
- type='int',
- default=443,
- fallback=(env_fallback, ['F5_SERVER_PORT'])
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- partition=dict(
- type='str',
- default='Common',
- fallback=(env_fallback, ['F5_PARTITION'])
- )
-)
-
-
-class AnsibleF5Client(object):
- def __init__(self, argument_spec=None, supports_check_mode=False,
- mutually_exclusive=None, required_together=None,
- required_if=None, required_one_of=None, add_file_common_args=False,
- f5_product_name='bigip', sans_state=False, sans_partition=False):
-
- self.f5_product_name = f5_product_name
-
- merged_arg_spec = dict()
- merged_arg_spec.update(F5_COMMON_ARGS)
- if argument_spec:
- merged_arg_spec.update(argument_spec)
- if sans_state:
- del merged_arg_spec['state']
- if sans_partition:
- del merged_arg_spec['partition']
- self.arg_spec = merged_arg_spec
-
- mutually_exclusive_params = []
- if mutually_exclusive:
- mutually_exclusive_params += mutually_exclusive
-
- required_together_params = []
- if required_together:
- required_together_params += required_together
-
- self.module = AnsibleModule(
- argument_spec=merged_arg_spec,
- supports_check_mode=supports_check_mode,
- mutually_exclusive=mutually_exclusive_params,
- required_together=required_together_params,
- required_if=required_if,
- required_one_of=required_one_of,
- add_file_common_args=add_file_common_args
- )
-
- self.check_mode = self.module.check_mode
- self._connect_params = self._get_connect_params()
-
- if 'transport' not in self.module.params or self.module.params['transport'] != 'cli':
- try:
- self.api = self._get_mgmt_root(
- f5_product_name, **self._connect_params
- )
- except iControlUnexpectedHTTPError as exc:
- self.fail(str(exc))
-
- def fail(self, msg):
- self.module.fail_json(msg=msg)
-
- def _get_connect_params(self):
- params = dict(
- user=self.module.params['user'],
- password=self.module.params['password'],
- server=self.module.params['server'],
- server_port=self.module.params['server_port'],
- validate_certs=self.module.params['validate_certs']
- )
- return params
-
- def _get_mgmt_root(self, type, **kwargs):
- if type == 'bigip':
- return BigIpMgmt(
- kwargs['server'],
- kwargs['user'],
- kwargs['password'],
- port=kwargs['server_port'],
- token='tmos'
- )
- elif type == 'iworkflow':
- return iWorkflowMgmt(
- kwargs['server'],
- kwargs['user'],
- kwargs['password'],
- port=kwargs['server_port'],
- token='local'
- )
- elif type == 'bigiq':
- return BigIqMgmt(
- kwargs['server'],
- kwargs['user'],
- kwargs['password'],
- port=kwargs['server_port'],
- auth_provider='local'
- )
-
- def reconnect(self):
- """Attempts to reconnect to a device
-
- The existing token from a ManagementRoot can become invalid if you,
- for example, upgrade the device (such as is done in the *_software
- module.
-
- This method can be used to reconnect to a remote device without
- having to re-instantiate the ArgumentSpec and AnsibleF5Client classes
- it will use the same values that were initially provided to those
- classes
-
- :return:
- :raises iControlUnexpectedHTTPError
- """
- self.api = self._get_mgmt_root(
- self.f5_product_name, **self._connect_params
- )
-
-
-class AnsibleF5Parameters(object):
- def __init__(self, params=None):
- self._values = defaultdict(lambda: None)
- self._values['__warnings'] = []
- if params:
- self.update(params=params)
-
- def update(self, params=None):
- if params:
- for k, v in iteritems(params):
- if self.api_map is not None and k in self.api_map:
- dict_to_use = self.api_map
- map_key = self.api_map[k]
- else:
- dict_to_use = self._values
- map_key = k
-
- # Handle weird API parameters like `dns.proxy.__iter__` by
- # using a map provided by the module developer
- class_attr = getattr(type(self), map_key, None)
- if isinstance(class_attr, property):
- # There is a mapped value for the api_map key
- if class_attr.fset is None:
- # If the mapped value does not have an associated setter
- self._values[map_key] = v
- else:
- # The mapped value has a setter
- setattr(self, map_key, v)
- else:
- # If the mapped value is not a @property
- self._values[map_key] = v
-
- def __getattr__(self, item):
- # Ensures that properties that weren't defined, and therefore stashed
- # in the `_values` dict, will be retrievable.
- return self._values[item]
-
- @property
- def partition(self):
- if self._values['partition'] is None:
- return 'Common'
- return self._values['partition'].strip('/')
-
- @partition.setter
- def partition(self, value):
- self._values['partition'] = value
-
- def _filter_params(self, params):
- return dict((k, v) for k, v in iteritems(params) if v is not None)
-
-
-class F5ModuleError(Exception):
- pass
diff --git a/lib/ansible/module_utils/firewalld.py b/lib/ansible/module_utils/firewalld.py
deleted file mode 100644
index b44e0316aa..0000000000
--- a/lib/ansible/module_utils/firewalld.py
+++ /dev/null
@@ -1,316 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# (c) 2013-2018, Adam Miller (maxamillion@fedoraproject.org)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Imports and info for sanity checking
-from distutils.version import LooseVersion
-
-FW_VERSION = None
-fw = None
-fw_offline = False
-import_failure = True
-try:
- import firewall.config
- FW_VERSION = firewall.config.VERSION
-
- from firewall.client import FirewallClient
- from firewall.client import FirewallClientZoneSettings
- from firewall.errors import FirewallError
- import_failure = False
-
- try:
- fw = FirewallClient()
- fw.getDefaultZone()
-
- except (AttributeError, FirewallError):
- # Firewalld is not currently running, permanent-only operations
- fw_offline = True
-
- # Import other required parts of the firewalld API
- #
- # NOTE:
- # online and offline operations do not share a common firewalld API
- try:
- from firewall.core.fw_test import Firewall_test
- fw = Firewall_test()
- except (ModuleNotFoundError):
- # In firewalld version 0.7.0 this behavior changed
- from firewall.core.fw import Firewall
- fw = Firewall(offline=True)
-
- fw.start()
-except ImportError:
- pass
-
-
-class FirewallTransaction(object):
- """
- FirewallTransaction
-
- This is the base class for all firewalld transactions we might want to have
- """
-
- def __init__(self, module, action_args=(), zone=None, desired_state=None,
- permanent=False, immediate=False, enabled_values=None, disabled_values=None):
- # type: (firewall.client, tuple, str, bool, bool, bool)
- """
- initializer the transaction
-
- :module: AnsibleModule, instance of AnsibleModule
- :action_args: tuple, args to pass for the action to take place
- :zone: str, firewall zone
- :desired_state: str, the desired state (enabled, disabled, etc)
- :permanent: bool, action should be permanent
- :immediate: bool, action should take place immediately
- :enabled_values: str[], acceptable values for enabling something (default: enabled)
- :disabled_values: str[], acceptable values for disabling something (default: disabled)
- """
-
- self.module = module
- self.fw = fw
- self.action_args = action_args
-
- if zone:
- self.zone = zone
- else:
- if fw_offline:
- self.zone = fw.get_default_zone()
- else:
- self.zone = fw.getDefaultZone()
-
- self.desired_state = desired_state
- self.permanent = permanent
- self.immediate = immediate
- self.fw_offline = fw_offline
- self.enabled_values = enabled_values or ["enabled"]
- self.disabled_values = disabled_values or ["disabled"]
-
- # List of messages that we'll call module.fail_json or module.exit_json
- # with.
- self.msgs = []
-
- # Allow for custom messages to be added for certain subclass transaction
- # types
- self.enabled_msg = None
- self.disabled_msg = None
-
- #####################
- # exception handling
- #
- def action_handler(self, action_func, action_func_args):
- """
- Function to wrap calls to make actions on firewalld in try/except
- logic and emit (hopefully) useful error messages
- """
-
- try:
- return action_func(*action_func_args)
- except Exception as e:
-
- # If there are any commonly known errors that we should provide more
- # context for to help the users diagnose what's wrong. Handle that here
- if "INVALID_SERVICE" in "%s" % e:
- self.msgs.append("Services are defined by port/tcp relationship and named as they are in /etc/services (on most systems)")
-
- if len(self.msgs) > 0:
- self.module.fail_json(
- msg='ERROR: Exception caught: %s %s' % (e, ', '.join(self.msgs))
- )
- else:
- self.module.fail_json(msg='ERROR: Exception caught: %s' % e)
-
- def get_fw_zone_settings(self):
- if self.fw_offline:
- fw_zone = self.fw.config.get_zone(self.zone)
- fw_settings = FirewallClientZoneSettings(
- list(self.fw.config.get_zone_config(fw_zone))
- )
- else:
- fw_zone = self.fw.config().getZoneByName(self.zone)
- fw_settings = fw_zone.getSettings()
-
- return (fw_zone, fw_settings)
-
- def update_fw_settings(self, fw_zone, fw_settings):
- if self.fw_offline:
- self.fw.config.set_zone_config(fw_zone, fw_settings.settings)
- else:
- fw_zone.update(fw_settings)
-
- def get_enabled_immediate(self):
- raise NotImplementedError
-
- def get_enabled_permanent(self):
- raise NotImplementedError
-
- def set_enabled_immediate(self):
- raise NotImplementedError
-
- def set_enabled_permanent(self):
- raise NotImplementedError
-
- def set_disabled_immediate(self):
- raise NotImplementedError
-
- def set_disabled_permanent(self):
- raise NotImplementedError
-
- def run(self):
- """
- run
-
- This function contains the "transaction logic" where as all operations
- follow a similar pattern in order to perform their action but simply
- call different functions to carry that action out.
- """
-
- self.changed = False
-
- if self.immediate and self.permanent:
- is_enabled_permanent = self.action_handler(
- self.get_enabled_permanent,
- self.action_args
- )
- is_enabled_immediate = self.action_handler(
- self.get_enabled_immediate,
- self.action_args
- )
- self.msgs.append('Permanent and Non-Permanent(immediate) operation')
-
- if self.desired_state in self.enabled_values:
- if not is_enabled_permanent or not is_enabled_immediate:
- if self.module.check_mode:
- self.module.exit_json(changed=True)
- if not is_enabled_permanent:
- self.action_handler(
- self.set_enabled_permanent,
- self.action_args
- )
- self.changed = True
- if not is_enabled_immediate:
- self.action_handler(
- self.set_enabled_immediate,
- self.action_args
- )
- self.changed = True
- if self.changed and self.enabled_msg:
- self.msgs.append(self.enabled_msg)
-
- elif self.desired_state in self.disabled_values:
- if is_enabled_permanent or is_enabled_immediate:
- if self.module.check_mode:
- self.module.exit_json(changed=True)
- if is_enabled_permanent:
- self.action_handler(
- self.set_disabled_permanent,
- self.action_args
- )
- self.changed = True
- if is_enabled_immediate:
- self.action_handler(
- self.set_disabled_immediate,
- self.action_args
- )
- self.changed = True
- if self.changed and self.disabled_msg:
- self.msgs.append(self.disabled_msg)
-
- elif self.permanent and not self.immediate:
- is_enabled = self.action_handler(
- self.get_enabled_permanent,
- self.action_args
- )
- self.msgs.append('Permanent operation')
-
- if self.desired_state in self.enabled_values:
- if not is_enabled:
- if self.module.check_mode:
- self.module.exit_json(changed=True)
-
- self.action_handler(
- self.set_enabled_permanent,
- self.action_args
- )
- self.changed = True
- if self.changed and self.enabled_msg:
- self.msgs.append(self.enabled_msg)
-
- elif self.desired_state in self.disabled_values:
- if is_enabled:
- if self.module.check_mode:
- self.module.exit_json(changed=True)
-
- self.action_handler(
- self.set_disabled_permanent,
- self.action_args
- )
- self.changed = True
- if self.changed and self.disabled_msg:
- self.msgs.append(self.disabled_msg)
-
- elif self.immediate and not self.permanent:
- is_enabled = self.action_handler(
- self.get_enabled_immediate,
- self.action_args
- )
- self.msgs.append('Non-permanent operation')
-
- if self.desired_state in self.enabled_values:
- if not is_enabled:
- if self.module.check_mode:
- self.module.exit_json(changed=True)
-
- self.action_handler(
- self.set_enabled_immediate,
- self.action_args
- )
- self.changed = True
- if self.changed and self.enabled_msg:
- self.msgs.append(self.enabled_msg)
-
- elif self.desired_state in self.disabled_values:
- if is_enabled:
- if self.module.check_mode:
- self.module.exit_json(changed=True)
-
- self.action_handler(
- self.set_disabled_immediate,
- self.action_args
- )
- self.changed = True
- if self.changed and self.disabled_msg:
- self.msgs.append(self.disabled_msg)
-
- return (self.changed, self.msgs)
-
- @staticmethod
- def sanity_check(module):
- """
- Perform sanity checking, version checks, etc
-
- :module: AnsibleModule instance
- """
-
- if FW_VERSION and fw_offline:
- # Pre-run version checking
- if LooseVersion(FW_VERSION) < LooseVersion("0.3.9"):
- module.fail_json(msg='unsupported version of firewalld, offline operations require >= 0.3.9 - found: {0}'.format(FW_VERSION))
- elif FW_VERSION and not fw_offline:
- # Pre-run version checking
- if LooseVersion(FW_VERSION) < LooseVersion("0.2.11"):
- module.fail_json(msg='unsupported version of firewalld, requires >= 0.2.11 - found: {0}'.format(FW_VERSION))
-
- # Check for firewalld running
- try:
- if fw.connected is False:
- module.fail_json(msg='firewalld service must be running, or try with offline=true')
- except AttributeError:
- module.fail_json(msg="firewalld connection can't be established,\
- installed version (%s) likely too old. Requires firewalld >= 0.2.11" % FW_VERSION)
-
- if import_failure:
- module.fail_json(
- msg='Python Module not found: firewalld and its python module are required for this module, \
- version 0.2.11 or newer required (0.3.9 or newer for offline operations)'
- )
diff --git a/lib/ansible/module_utils/gcdns.py b/lib/ansible/module_utils/gcdns.py
deleted file mode 100644
index fb5c74d1b4..0000000000
--- a/lib/ansible/module_utils/gcdns.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-try:
- from libcloud.dns.types import Provider
- from libcloud.dns.providers import get_driver
- HAS_LIBCLOUD_BASE = True
-except ImportError:
- HAS_LIBCLOUD_BASE = False
-
-from ansible.module_utils.gcp import gcp_connect
-from ansible.module_utils.gcp import unexpected_error_msg as gcp_error
-
-USER_AGENT_PRODUCT = "Ansible-gcdns"
-USER_AGENT_VERSION = "v1"
-
-
-def gcdns_connect(module, provider=None):
- """Return a GCP connection for Google Cloud DNS."""
- if not HAS_LIBCLOUD_BASE:
- module.fail_json(msg='libcloud must be installed to use this module')
-
- provider = provider or Provider.GOOGLE
- return gcp_connect(module, provider, get_driver, USER_AGENT_PRODUCT, USER_AGENT_VERSION)
-
-
-def unexpected_error_msg(error):
- """Create an error string based on passed in error."""
- return gcp_error(error)
diff --git a/lib/ansible/module_utils/gce.py b/lib/ansible/module_utils/gce.py
deleted file mode 100644
index c9d87542c3..0000000000
--- a/lib/ansible/module_utils/gce.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-try:
- from libcloud.compute.types import Provider
- from libcloud.compute.providers import get_driver
- HAS_LIBCLOUD_BASE = True
-except ImportError:
- HAS_LIBCLOUD_BASE = False
-
-from ansible.module_utils.gcp import gcp_connect
-from ansible.module_utils.gcp import unexpected_error_msg as gcp_error
-
-USER_AGENT_PRODUCT = "Ansible-gce"
-USER_AGENT_VERSION = "v1"
-
-
-def gce_connect(module, provider=None):
- """Return a GCP connection for Google Compute Engine."""
- if not HAS_LIBCLOUD_BASE:
- module.fail_json(msg='libcloud must be installed to use this module')
- provider = provider or Provider.GCE
-
- return gcp_connect(module, provider, get_driver, USER_AGENT_PRODUCT, USER_AGENT_VERSION)
-
-
-def unexpected_error_msg(error):
- """Create an error string based on passed in error."""
- return gcp_error(error)
diff --git a/lib/ansible/module_utils/gcp.py b/lib/ansible/module_utils/gcp.py
deleted file mode 100644
index 508df44ab6..0000000000
--- a/lib/ansible/module_utils/gcp.py
+++ /dev/null
@@ -1,815 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-import json
-import os
-import time
-import traceback
-from distutils.version import LooseVersion
-
-# libcloud
-try:
- import libcloud
- HAS_LIBCLOUD_BASE = True
-except ImportError:
- HAS_LIBCLOUD_BASE = False
-
-# google-auth
-try:
- import google.auth
- from google.oauth2 import service_account
- HAS_GOOGLE_AUTH = True
-except ImportError:
- HAS_GOOGLE_AUTH = False
-
-# google-python-api
-try:
- import google_auth_httplib2
- from httplib2 import Http
- from googleapiclient.http import set_user_agent
- from googleapiclient.errors import HttpError
- from apiclient.discovery import build
- HAS_GOOGLE_API_LIB = True
-except ImportError:
- HAS_GOOGLE_API_LIB = False
-
-
-import ansible.module_utils.six.moves.urllib.parse as urlparse
-
-GCP_DEFAULT_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
-
-
-def _get_gcp_ansible_credentials(module):
- """Helper to fetch creds from AnsibleModule object."""
- service_account_email = module.params.get('service_account_email', None)
- # Note: pem_file is discouraged and will be deprecated
- credentials_file = module.params.get('pem_file', None) or module.params.get(
- 'credentials_file', None)
- project_id = module.params.get('project_id', None)
-
- return (service_account_email, credentials_file, project_id)
-
-
-def _get_gcp_environ_var(var_name, default_value):
- """Wrapper around os.environ.get call."""
- return os.environ.get(
- var_name, default_value)
-
-
-def _get_gcp_environment_credentials(service_account_email, credentials_file, project_id):
- """Helper to look in environment variables for credentials."""
- # If any of the values are not given as parameters, check the appropriate
- # environment variables.
- if not service_account_email:
- service_account_email = _get_gcp_environ_var('GCE_EMAIL', None)
- if not credentials_file:
- credentials_file = _get_gcp_environ_var(
- 'GCE_CREDENTIALS_FILE_PATH', None) or _get_gcp_environ_var(
- 'GOOGLE_APPLICATION_CREDENTIALS', None) or _get_gcp_environ_var(
- 'GCE_PEM_FILE_PATH', None)
- if not project_id:
- project_id = _get_gcp_environ_var('GCE_PROJECT', None) or _get_gcp_environ_var(
- 'GOOGLE_CLOUD_PROJECT', None)
- return (service_account_email, credentials_file, project_id)
-
-
-def _get_gcp_credentials(module, require_valid_json=True, check_libcloud=False):
- """
- Obtain GCP credentials by trying various methods.
-
- There are 3 ways to specify GCP credentials:
- 1. Specify via Ansible module parameters (recommended).
- 2. Specify via environment variables. Two sets of env vars are available:
- a) GOOGLE_CLOUD_PROJECT, GOOGLE_CREDENTIALS_APPLICATION (preferred)
- b) GCE_PROJECT, GCE_CREDENTIAL_FILE_PATH, GCE_EMAIL (legacy, not recommended; req'd if
- using p12 key)
- 3. Specify via libcloud secrets.py file (deprecated).
-
- There are 3 helper functions to assist in the above.
-
- Regardless of method, the user also has the option of specifying a JSON
- file or a p12 file as the credentials file. JSON is strongly recommended and
- p12 will be removed in the future.
-
- Additionally, flags may be set to require valid json and check the libcloud
- version.
-
- AnsibleModule.fail_json is called only if the project_id cannot be found.
-
- :param module: initialized Ansible module object
- :type module: `class AnsibleModule`
-
- :param require_valid_json: If true, require credentials to be valid JSON. Default is True.
- :type require_valid_json: ``bool``
-
- :params check_libcloud: If true, check the libcloud version available to see if
- JSON creds are supported.
- :type check_libcloud: ``bool``
-
- :return: {'service_account_email': service_account_email,
- 'credentials_file': credentials_file,
- 'project_id': project_id}
- :rtype: ``dict``
- """
- (service_account_email,
- credentials_file,
- project_id) = _get_gcp_ansible_credentials(module)
-
- # If any of the values are not given as parameters, check the appropriate
- # environment variables.
- (service_account_email,
- credentials_file,
- project_id) = _get_gcp_environment_credentials(service_account_email,
- credentials_file, project_id)
-
- if credentials_file is None or project_id is None or service_account_email is None:
- if check_libcloud is True:
- if project_id is None:
- # TODO(supertom): this message is legacy and integration tests
- # depend on it.
- module.fail_json(msg='Missing GCE connection parameters in libcloud '
- 'secrets file.')
- else:
- if project_id is None:
- module.fail_json(msg=('GCP connection error: unable to determine project (%s) or '
- 'credentials file (%s)' % (project_id, credentials_file)))
- # Set these fields to empty strings if they are None
- # consumers of this will make the distinction between an empty string
- # and None.
- if credentials_file is None:
- credentials_file = ''
- if service_account_email is None:
- service_account_email = ''
-
- # ensure the credentials file is found and is in the proper format.
- if credentials_file:
- _validate_credentials_file(module, credentials_file,
- require_valid_json=require_valid_json,
- check_libcloud=check_libcloud)
-
- return {'service_account_email': service_account_email,
- 'credentials_file': credentials_file,
- 'project_id': project_id}
-
-
-def _validate_credentials_file(module, credentials_file, require_valid_json=True, check_libcloud=False):
- """
- Check for valid credentials file.
-
- Optionally check for JSON format and if libcloud supports JSON.
-
- :param module: initialized Ansible module object
- :type module: `class AnsibleModule`
-
- :param credentials_file: path to file on disk
- :type credentials_file: ``str``. Complete path to file on disk.
-
- :param require_valid_json: This argument is ignored as of Ansible 2.7.
- :type require_valid_json: ``bool``
-
- :params check_libcloud: If true, check the libcloud version available to see if
- JSON creds are supported.
- :type check_libcloud: ``bool``
-
- :returns: True
- :rtype: ``bool``
- """
- try:
- # Try to read credentials as JSON
- with open(credentials_file) as credentials:
- json.loads(credentials.read())
- # If the credentials are proper JSON and we do not have the minimum
- # required libcloud version, bail out and return a descriptive
- # error
- if check_libcloud and LooseVersion(libcloud.__version__) < '0.17.0':
- module.fail_json(msg='Using JSON credentials but libcloud minimum version not met. '
- 'Upgrade to libcloud>=0.17.0.')
- return True
- except IOError as e:
- module.fail_json(msg='GCP Credentials File %s not found.' %
- credentials_file, changed=False)
- return False
- except ValueError as e:
- module.fail_json(
- msg='Non-JSON credentials file provided. Please generate a new JSON key from the Google Cloud console',
- changed=False)
-
-
-def gcp_connect(module, provider, get_driver, user_agent_product, user_agent_version):
- """Return a Google libcloud driver connection."""
- if not HAS_LIBCLOUD_BASE:
- module.fail_json(msg='libcloud must be installed to use this module')
-
- creds = _get_gcp_credentials(module,
- require_valid_json=False,
- check_libcloud=True)
- try:
- gcp = get_driver(provider)(creds['service_account_email'], creds['credentials_file'],
- datacenter=module.params.get('zone', None),
- project=creds['project_id'])
- gcp.connection.user_agent_append("%s/%s" % (
- user_agent_product, user_agent_version))
- except (RuntimeError, ValueError) as e:
- module.fail_json(msg=str(e), changed=False)
- except Exception as e:
- module.fail_json(msg=unexpected_error_msg(e), changed=False)
-
- return gcp
-
-
-def get_google_cloud_credentials(module, scopes=None):
- """
- Get credentials object for use with Google Cloud client.
-
- Attempts to obtain credentials by calling _get_gcp_credentials. If those are
- not present will attempt to connect via Application Default Credentials.
-
- To connect via libcloud, don't use this function, use gcp_connect instead. For
- Google Python API Client, see get_google_api_auth for how to connect.
-
- For more information on Google's client library options for Python, see:
- U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries)
-
- Google Cloud example:
- creds, params = get_google_cloud_credentials(module, scopes, user_agent_product, user_agent_version)
- pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds)
- pubsub_client.user_agent = 'ansible-pubsub-0.1'
- ...
-
- :param module: initialized Ansible module object
- :type module: `class AnsibleModule`
-
- :param scopes: list of scopes
- :type module: ``list`` of URIs
-
- :returns: A tuple containing (google authorized) credentials object and
- params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
- :rtype: ``tuple``
- """
- scopes = [] if scopes is None else scopes
-
- if not HAS_GOOGLE_AUTH:
- module.fail_json(msg='Please install google-auth.')
-
- conn_params = _get_gcp_credentials(module,
- require_valid_json=True,
- check_libcloud=False)
- try:
- if conn_params['credentials_file']:
- credentials = service_account.Credentials.from_service_account_file(
- conn_params['credentials_file'])
- if scopes:
- credentials = credentials.with_scopes(scopes)
- else:
- (credentials, project_id) = google.auth.default(
- scopes=scopes)
- if project_id is not None:
- conn_params['project_id'] = project_id
-
- return (credentials, conn_params)
- except Exception as e:
- module.fail_json(msg=unexpected_error_msg(e), changed=False)
- return (None, None)
-
-
-def get_google_api_auth(module, scopes=None, user_agent_product='ansible-python-api', user_agent_version='NA'):
- """
- Authentication for use with google-python-api-client.
-
- Function calls get_google_cloud_credentials, which attempts to assemble the credentials
- from various locations. Next it attempts to authenticate with Google.
-
- This function returns an httplib2 (compatible) object that can be provided to the Google Python API client.
-
- For libcloud, don't use this function, use gcp_connect instead. For Google Cloud, See
- get_google_cloud_credentials for how to connect.
-
- For more information on Google's client library options for Python, see:
- U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries)
-
- Google API example:
- http_auth, conn_params = get_google_api_auth(module, scopes, user_agent_product, user_agent_version)
- service = build('myservice', 'v1', http=http_auth)
- ...
-
- :param module: initialized Ansible module object
- :type module: `class AnsibleModule`
-
- :param scopes: list of scopes
- :type scopes: ``list`` of URIs
-
- :param user_agent_product: User agent product. eg: 'ansible-python-api'
- :type user_agent_product: ``str``
-
- :param user_agent_version: Version string to append to product. eg: 'NA' or '0.1'
- :type user_agent_version: ``str``
-
- :returns: A tuple containing (google authorized) httplib2 request object and a
- params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
- :rtype: ``tuple``
- """
- scopes = [] if scopes is None else scopes
-
- if not HAS_GOOGLE_API_LIB:
- module.fail_json(msg="Please install google-api-python-client library")
- if not scopes:
- scopes = GCP_DEFAULT_SCOPES
- try:
- (credentials, conn_params) = get_google_cloud_credentials(module, scopes)
- http = set_user_agent(Http(), '%s-%s' %
- (user_agent_product, user_agent_version))
- http_auth = google_auth_httplib2.AuthorizedHttp(credentials, http=http)
-
- return (http_auth, conn_params)
- except Exception as e:
- module.fail_json(msg=unexpected_error_msg(e), changed=False)
- return (None, None)
-
-
-def get_google_api_client(module, service, user_agent_product, user_agent_version,
- scopes=None, api_version='v1'):
- """
- Get the discovery-based python client. Use when a cloud client is not available.
-
- client = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
- user_agent_version=USER_AGENT_VERSION)
-
- :returns: A tuple containing the authorized client to the specified service and a
- params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
- :rtype: ``tuple``
- """
- if not scopes:
- scopes = GCP_DEFAULT_SCOPES
-
- http_auth, conn_params = get_google_api_auth(module, scopes=scopes,
- user_agent_product=user_agent_product,
- user_agent_version=user_agent_version)
- client = build(service, api_version, http=http_auth)
-
- return (client, conn_params)
-
-
-def check_min_pkg_version(pkg_name, minimum_version):
- """Minimum required version is >= installed version."""
- from pkg_resources import get_distribution
- try:
- installed_version = get_distribution(pkg_name).version
- return LooseVersion(installed_version) >= minimum_version
- except Exception as e:
- return False
-
-
-def unexpected_error_msg(error):
- """Create an error string based on passed in error."""
- return 'Unexpected response: (%s). Detail: %s' % (str(error), traceback.format_exc())
-
-
-def get_valid_location(module, driver, location, location_type='zone'):
- if location_type == 'zone':
- l = driver.ex_get_zone(location)
- else:
- l = driver.ex_get_region(location)
- if l is None:
- link = 'https://cloud.google.com/compute/docs/regions-zones/regions-zones#available'
- module.fail_json(msg=('%s %s is invalid. Please see the list of '
- 'available %s at %s' % (
- location_type, location, location_type, link)),
- changed=False)
- return l
-
-
-def check_params(params, field_list):
- """
- Helper to validate params.
-
- Use this in function definitions if they require specific fields
- to be present.
-
- :param params: structure that contains the fields
- :type params: ``dict``
-
- :param field_list: list of dict representing the fields
- [{'name': str, 'required': True/False', 'type': cls}]
- :type field_list: ``list`` of ``dict``
-
- :return True or raises ValueError
- :rtype: ``bool`` or `class:ValueError`
- """
- for d in field_list:
- if not d['name'] in params:
- if 'required' in d and d['required'] is True:
- raise ValueError(("%s is required and must be of type: %s" %
- (d['name'], str(d['type']))))
- else:
- if not isinstance(params[d['name']], d['type']):
- raise ValueError(("%s must be of type: %s. %s (%s) provided." % (
- d['name'], str(d['type']), params[d['name']],
- type(params[d['name']]))))
- if 'values' in d:
- if params[d['name']] not in d['values']:
- raise ValueError(("%s must be one of: %s" % (
- d['name'], ','.join(d['values']))))
- if isinstance(params[d['name']], int):
- if 'min' in d:
- if params[d['name']] < d['min']:
- raise ValueError(("%s must be greater than or equal to: %s" % (
- d['name'], d['min'])))
- if 'max' in d:
- if params[d['name']] > d['max']:
- raise ValueError("%s must be less than or equal to: %s" % (
- d['name'], d['max']))
- return True
-
-
-class GCPUtils(object):
- """
- Helper utilities for GCP.
- """
-
- @staticmethod
- def underscore_to_camel(txt):
- return txt.split('_')[0] + ''.join(x.capitalize() or '_' for x in txt.split('_')[1:])
-
- @staticmethod
- def remove_non_gcp_params(params):
- """
- Remove params if found.
- """
- params_to_remove = ['state']
- for p in params_to_remove:
- if p in params:
- del params[p]
-
- return params
-
- @staticmethod
- def params_to_gcp_dict(params, resource_name=None):
- """
- Recursively convert ansible params to GCP Params.
-
- Keys are converted from snake to camelCase
- ex: default_service to defaultService
-
- Handles lists, dicts and strings
-
- special provision for the resource name
- """
- if not isinstance(params, dict):
- return params
- gcp_dict = {}
- params = GCPUtils.remove_non_gcp_params(params)
- for k, v in params.items():
- gcp_key = GCPUtils.underscore_to_camel(k)
- if isinstance(v, dict):
- retval = GCPUtils.params_to_gcp_dict(v)
- gcp_dict[gcp_key] = retval
- elif isinstance(v, list):
- gcp_dict[gcp_key] = [GCPUtils.params_to_gcp_dict(x) for x in v]
- else:
- if resource_name and k == resource_name:
- gcp_dict['name'] = v
- else:
- gcp_dict[gcp_key] = v
- return gcp_dict
-
- @staticmethod
- def execute_api_client_req(req, client=None, raw=True,
- operation_timeout=180, poll_interval=5,
- raise_404=True):
- """
- General python api client interaction function.
-
- For use with google-api-python-client, or clients created
- with get_google_api_client function
- Not for use with Google Cloud client libraries
-
- For long-running operations, we make an immediate query and then
- sleep poll_interval before re-querying. After the request is done
- we rebuild the request with a get method and return the result.
-
- """
- try:
- resp = req.execute()
-
- if not resp:
- return None
-
- if raw:
- return resp
-
- if resp['kind'] == 'compute#operation':
- resp = GCPUtils.execute_api_client_operation_req(req, resp,
- client,
- operation_timeout,
- poll_interval)
-
- if 'items' in resp:
- return resp['items']
-
- return resp
- except HttpError as h:
- # Note: 404s can be generated (incorrectly) for dependent
- # resources not existing. We let the caller determine if
- # they want 404s raised for their invocation.
- if h.resp.status == 404 and not raise_404:
- return None
- else:
- raise
- except Exception:
- raise
-
- @staticmethod
- def execute_api_client_operation_req(orig_req, op_resp, client,
- operation_timeout=180, poll_interval=5):
- """
- Poll an operation for a result.
- """
- parsed_url = GCPUtils.parse_gcp_url(orig_req.uri)
- project_id = parsed_url['project']
- resource_name = GCPUtils.get_gcp_resource_from_methodId(
- orig_req.methodId)
- resource = GCPUtils.build_resource_from_name(client, resource_name)
-
- start_time = time.time()
-
- complete = False
- attempts = 1
- while not complete:
- if start_time + operation_timeout >= time.time():
- op_req = client.globalOperations().get(
- project=project_id, operation=op_resp['name'])
- op_resp = op_req.execute()
- if op_resp['status'] != 'DONE':
- time.sleep(poll_interval)
- attempts += 1
- else:
- complete = True
- if op_resp['operationType'] == 'delete':
- # don't wait for the delete
- return True
- elif op_resp['operationType'] in ['insert', 'update', 'patch']:
- # TODO(supertom): Isolate 'build-new-request' stuff.
- resource_name_singular = GCPUtils.get_entity_name_from_resource_name(
- resource_name)
- if op_resp['operationType'] == 'insert' or 'entity_name' not in parsed_url:
- parsed_url['entity_name'] = GCPUtils.parse_gcp_url(op_resp['targetLink'])[
- 'entity_name']
- args = {'project': project_id,
- resource_name_singular: parsed_url['entity_name']}
- new_req = resource.get(**args)
- resp = new_req.execute()
- return resp
- else:
- # assuming multiple entities, do a list call.
- new_req = resource.list(project=project_id)
- resp = new_req.execute()
- return resp
- else:
- # operation didn't complete on time.
- raise GCPOperationTimeoutError("Operation timed out: %s" % (
- op_resp['targetLink']))
-
- @staticmethod
- def build_resource_from_name(client, resource_name):
- try:
- method = getattr(client, resource_name)
- return method()
- except AttributeError:
- raise NotImplementedError('%s is not an attribute of %s' % (resource_name,
- client))
-
- @staticmethod
- def get_gcp_resource_from_methodId(methodId):
- try:
- parts = methodId.split('.')
- if len(parts) != 3:
- return None
- else:
- return parts[1]
- except AttributeError:
- return None
-
- @staticmethod
- def get_entity_name_from_resource_name(resource_name):
- if not resource_name:
- return None
-
- try:
- # Chop off global or region prefixes
- if resource_name.startswith('global'):
- resource_name = resource_name.replace('global', '')
- elif resource_name.startswith('regional'):
- resource_name = resource_name.replace('region', '')
-
- # ensure we have a lower case first letter
- resource_name = resource_name[0].lower() + resource_name[1:]
-
- if resource_name[-3:] == 'ies':
- return resource_name.replace(
- resource_name[-3:], 'y')
- if resource_name[-1] == 's':
- return resource_name[:-1]
-
- return resource_name
-
- except AttributeError:
- return None
-
- @staticmethod
- def parse_gcp_url(url):
- """
- Parse GCP urls and return dict of parts.
-
- Supported URL structures:
- /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE
- /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME
- /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME/METHOD_NAME
- /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE
- /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME
- /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME/METHOD_NAME
- /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE
- /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME
- /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME/METHOD_NAME
-
- :param url: GCP-generated URL, such as a selflink or resource location.
- :type url: ``str``
-
- :return: dictionary of parts. Includes stanard components of urlparse, plus
- GCP-specific 'service', 'api_version', 'project' and
- 'resource_name' keys. Optionally, 'zone', 'region', 'entity_name'
- and 'method_name', if applicable.
- :rtype: ``dict``
- """
-
- p = urlparse.urlparse(url)
- if not p:
- return None
- else:
- # we add extra items such as
- # zone, region and resource_name
- url_parts = {}
- url_parts['scheme'] = p.scheme
- url_parts['host'] = p.netloc
- url_parts['path'] = p.path
- if p.path.find('/') == 0:
- url_parts['path'] = p.path[1:]
- url_parts['params'] = p.params
- url_parts['fragment'] = p.fragment
- url_parts['query'] = p.query
- url_parts['project'] = None
- url_parts['service'] = None
- url_parts['api_version'] = None
-
- path_parts = url_parts['path'].split('/')
- url_parts['service'] = path_parts[0]
- url_parts['api_version'] = path_parts[1]
- if path_parts[2] == 'projects':
- url_parts['project'] = path_parts[3]
- else:
- # invalid URL
- raise GCPInvalidURLError('unable to parse: %s' % url)
-
- if 'global' in path_parts:
- url_parts['global'] = True
- idx = path_parts.index('global')
- if len(path_parts) - idx == 4:
- # we have a resource, entity and method_name
- url_parts['resource_name'] = path_parts[idx + 1]
- url_parts['entity_name'] = path_parts[idx + 2]
- url_parts['method_name'] = path_parts[idx + 3]
-
- if len(path_parts) - idx == 3:
- # we have a resource and entity
- url_parts['resource_name'] = path_parts[idx + 1]
- url_parts['entity_name'] = path_parts[idx + 2]
-
- if len(path_parts) - idx == 2:
- url_parts['resource_name'] = path_parts[idx + 1]
-
- if len(path_parts) - idx < 2:
- # invalid URL
- raise GCPInvalidURLError('unable to parse: %s' % url)
-
- elif 'regions' in path_parts or 'zones' in path_parts:
- idx = -1
- if 'regions' in path_parts:
- idx = path_parts.index('regions')
- url_parts['region'] = path_parts[idx + 1]
- else:
- idx = path_parts.index('zones')
- url_parts['zone'] = path_parts[idx + 1]
-
- if len(path_parts) - idx == 5:
- # we have a resource, entity and method_name
- url_parts['resource_name'] = path_parts[idx + 2]
- url_parts['entity_name'] = path_parts[idx + 3]
- url_parts['method_name'] = path_parts[idx + 4]
-
- if len(path_parts) - idx == 4:
- # we have a resource and entity
- url_parts['resource_name'] = path_parts[idx + 2]
- url_parts['entity_name'] = path_parts[idx + 3]
-
- if len(path_parts) - idx == 3:
- url_parts['resource_name'] = path_parts[idx + 2]
-
- if len(path_parts) - idx < 3:
- # invalid URL
- raise GCPInvalidURLError('unable to parse: %s' % url)
-
- else:
- # no location in URL.
- idx = path_parts.index('projects')
- if len(path_parts) - idx == 5:
- # we have a resource, entity and method_name
- url_parts['resource_name'] = path_parts[idx + 2]
- url_parts['entity_name'] = path_parts[idx + 3]
- url_parts['method_name'] = path_parts[idx + 4]
-
- if len(path_parts) - idx == 4:
- # we have a resource and entity
- url_parts['resource_name'] = path_parts[idx + 2]
- url_parts['entity_name'] = path_parts[idx + 3]
-
- if len(path_parts) - idx == 3:
- url_parts['resource_name'] = path_parts[idx + 2]
-
- if len(path_parts) - idx < 3:
- # invalid URL
- raise GCPInvalidURLError('unable to parse: %s' % url)
-
- return url_parts
-
- @staticmethod
- def build_googleapi_url(project, api_version='v1', service='compute'):
- return 'https://www.googleapis.com/%s/%s/projects/%s' % (service, api_version, project)
-
- @staticmethod
- def filter_gcp_fields(params, excluded_fields=None):
- new_params = {}
- if not excluded_fields:
- excluded_fields = ['creationTimestamp', 'id', 'kind',
- 'selfLink', 'fingerprint', 'description']
-
- if isinstance(params, list):
- new_params = [GCPUtils.filter_gcp_fields(
- x, excluded_fields) for x in params]
- elif isinstance(params, dict):
- for k in params.keys():
- if k not in excluded_fields:
- new_params[k] = GCPUtils.filter_gcp_fields(
- params[k], excluded_fields)
- else:
- new_params = params
-
- return new_params
-
- @staticmethod
- def are_params_equal(p1, p2):
- """
- Check if two params dicts are equal.
- TODO(supertom): need a way to filter out URLs, or they need to be built
- """
- filtered_p1 = GCPUtils.filter_gcp_fields(p1)
- filtered_p2 = GCPUtils.filter_gcp_fields(p2)
- if filtered_p1 != filtered_p2:
- return False
- return True
-
-
-class GCPError(Exception):
- pass
-
-
-class GCPOperationTimeoutError(GCPError):
- pass
-
-
-class GCPInvalidURLError(GCPError):
- pass
diff --git a/lib/ansible/module_utils/gitlab.py b/lib/ansible/module_utils/gitlab.py
deleted file mode 100644
index 5d8a7fea2b..0000000000
--- a/lib/ansible/module_utils/gitlab.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
-# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import
-import json
-from distutils.version import StrictVersion
-
-from ansible.module_utils.basic import missing_required_lib
-from ansible.module_utils.urls import fetch_url
-from ansible.module_utils._text import to_native
-
-try:
- from urllib import quote_plus # Python 2.X
-except ImportError:
- from urllib.parse import quote_plus # Python 3+
-
-import traceback
-
-GITLAB_IMP_ERR = None
-try:
- import gitlab
- HAS_GITLAB_PACKAGE = True
-except Exception:
- GITLAB_IMP_ERR = traceback.format_exc()
- HAS_GITLAB_PACKAGE = False
-
-
-def request(module, api_url, project, path, access_token, private_token, rawdata='', method='GET'):
- url = "%s/v4/projects/%s%s" % (api_url, quote_plus(project), path)
- headers = {}
- if access_token:
- headers['Authorization'] = "Bearer %s" % access_token
- else:
- headers['Private-Token'] = private_token
-
- headers['Accept'] = "application/json"
- headers['Content-Type'] = "application/json"
-
- response, info = fetch_url(module=module, url=url, headers=headers, data=rawdata, method=method)
- status = info['status']
- content = ""
- if response:
- content = response.read()
- if status == 204:
- return True, content
- elif status == 200 or status == 201:
- return True, json.loads(content)
- else:
- return False, str(status) + ": " + content
-
-
-def findProject(gitlab_instance, identifier):
- try:
- project = gitlab_instance.projects.get(identifier)
- except Exception as e:
- current_user = gitlab_instance.user
- try:
- project = gitlab_instance.projects.get(current_user.username + '/' + identifier)
- except Exception as e:
- return None
-
- return project
-
-
-def findGroup(gitlab_instance, identifier):
- try:
- project = gitlab_instance.groups.get(identifier)
- except Exception as e:
- return None
-
- return project
-
-
-def gitlabAuthentication(module):
- gitlab_url = module.params['api_url']
- validate_certs = module.params['validate_certs']
- gitlab_user = module.params['api_username']
- gitlab_password = module.params['api_password']
- gitlab_token = module.params['api_token']
-
- if not HAS_GITLAB_PACKAGE:
- module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
-
- try:
- # python-gitlab library remove support for username/password authentication since 1.13.0
- # Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0
- # This condition allow to still support older version of the python-gitlab library
- if StrictVersion(gitlab.__version__) < StrictVersion("1.13.0"):
- gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
- private_token=gitlab_token, api_version=4)
- else:
- gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token, api_version=4)
-
- gitlab_instance.auth()
- except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
- module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e))
- except (gitlab.exceptions.GitlabHttpError) as e:
- module.fail_json(msg="Failed to connect to GitLab server: %s. \
- GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e))
-
- return gitlab_instance
diff --git a/lib/ansible/module_utils/heroku.py b/lib/ansible/module_utils/heroku.py
deleted file mode 100644
index b6e89614f1..0000000000
--- a/lib/ansible/module_utils/heroku.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright: (c) 2018, Ansible Project
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import traceback
-
-from ansible.module_utils.basic import env_fallback, missing_required_lib
-
-HAS_HEROKU = False
-HEROKU_IMP_ERR = None
-try:
- import heroku3
- HAS_HEROKU = True
-except ImportError:
- HEROKU_IMP_ERR = traceback.format_exc()
-
-
-class HerokuHelper():
- def __init__(self, module):
- self.module = module
- self.check_lib()
- self.api_key = module.params["api_key"]
-
- def check_lib(self):
- if not HAS_HEROKU:
- self.module.fail_json(msg=missing_required_lib('heroku3'), exception=HEROKU_IMP_ERR)
-
- @staticmethod
- def heroku_argument_spec():
- return dict(
- api_key=dict(fallback=(env_fallback, ['HEROKU_API_KEY', 'TF_VAR_HEROKU_API_KEY']), type='str', no_log=True))
-
- def get_heroku_client(self):
- client = heroku3.from_key(self.api_key)
-
- if not client.is_authenticated:
- self.module.fail_json(msg='Heroku authentication failure, please check your API Key')
-
- return client
diff --git a/lib/ansible/module_utils/hetzner.py b/lib/ansible/module_utils/hetzner.py
deleted file mode 100644
index 2bc3d1666a..0000000000
--- a/lib/ansible/module_utils/hetzner.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Felix Fontein <felix@fontein.de>, 2019
-#
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-from ansible.module_utils.urls import fetch_url
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-
-import time
-
-
-HETZNER_DEFAULT_ARGUMENT_SPEC = dict(
- hetzner_user=dict(type='str', required=True),
- hetzner_password=dict(type='str', required=True, no_log=True),
-)
-
-# The API endpoint is fixed.
-BASE_URL = "https://robot-ws.your-server.de"
-
-
-def fetch_url_json(module, url, method='GET', timeout=10, data=None, headers=None, accept_errors=None):
- '''
- Make general request to Hetzner's JSON robot API.
- '''
- module.params['url_username'] = module.params['hetzner_user']
- module.params['url_password'] = module.params['hetzner_password']
- resp, info = fetch_url(module, url, method=method, timeout=timeout, data=data, headers=headers)
- try:
- content = resp.read()
- except AttributeError:
- content = info.pop('body', None)
-
- if not content:
- module.fail_json(msg='Cannot retrieve content from {0}'.format(url))
-
- try:
- result = module.from_json(content.decode('utf8'))
- if 'error' in result:
- if accept_errors:
- if result['error']['code'] in accept_errors:
- return result, result['error']['code']
- module.fail_json(msg='Request failed: {0} {1} ({2})'.format(
- result['error']['status'],
- result['error']['code'],
- result['error']['message']
- ))
- return result, None
- except ValueError:
- module.fail_json(msg='Cannot decode content retrieved from {0}'.format(url))
-
-
-class CheckDoneTimeoutException(Exception):
- def __init__(self, result, error):
- super(CheckDoneTimeoutException, self).__init__()
- self.result = result
- self.error = error
-
-
-def fetch_url_json_with_retries(module, url, check_done_callback, check_done_delay=10, check_done_timeout=180, skip_first=False, **kwargs):
- '''
- Make general request to Hetzner's JSON robot API, with retries until a condition is satisfied.
-
- The condition is tested by calling ``check_done_callback(result, error)``. If it is not satisfied,
- it will be retried with delays ``check_done_delay`` (in seconds) until a total timeout of
- ``check_done_timeout`` (in seconds) since the time the first request is started is reached.
-
- If ``skip_first`` is specified, will assume that a first call has already been made and will
- directly start with waiting.
- '''
- start_time = time.time()
- if not skip_first:
- result, error = fetch_url_json(module, url, **kwargs)
- if check_done_callback(result, error):
- return result, error
- while True:
- elapsed = (time.time() - start_time)
- left_time = check_done_timeout - elapsed
- time.sleep(max(min(check_done_delay, left_time), 0))
- result, error = fetch_url_json(module, url, **kwargs)
- if check_done_callback(result, error):
- return result, error
- if left_time < check_done_delay:
- raise CheckDoneTimeoutException(result, error)
-
-
-# #####################################################################################
-# ## FAILOVER IP ######################################################################
-
-def get_failover_record(module, ip):
- '''
- Get information record of failover IP.
-
- See https://robot.your-server.de/doc/webservice/en.html#get-failover-failover-ip
- '''
- url = "{0}/failover/{1}".format(BASE_URL, ip)
- result, error = fetch_url_json(module, url)
- if 'failover' not in result:
- module.fail_json(msg='Cannot interpret result: {0}'.format(result))
- return result['failover']
-
-
-def get_failover(module, ip):
- '''
- Get current routing target of failover IP.
-
- The value ``None`` represents unrouted.
-
- See https://robot.your-server.de/doc/webservice/en.html#get-failover-failover-ip
- '''
- return get_failover_record(module, ip)['active_server_ip']
-
-
-def set_failover(module, ip, value, timeout=180):
- '''
- Set current routing target of failover IP.
-
- Return a pair ``(value, changed)``. The value ``None`` for ``value`` represents unrouted.
-
- See https://robot.your-server.de/doc/webservice/en.html#post-failover-failover-ip
- and https://robot.your-server.de/doc/webservice/en.html#delete-failover-failover-ip
- '''
- url = "{0}/failover/{1}".format(BASE_URL, ip)
- if value is None:
- result, error = fetch_url_json(
- module,
- url,
- method='DELETE',
- timeout=timeout,
- accept_errors=['FAILOVER_ALREADY_ROUTED']
- )
- else:
- headers = {"Content-type": "application/x-www-form-urlencoded"}
- data = dict(
- active_server_ip=value,
- )
- result, error = fetch_url_json(
- module,
- url,
- method='POST',
- timeout=timeout,
- data=urlencode(data),
- headers=headers,
- accept_errors=['FAILOVER_ALREADY_ROUTED']
- )
- if error is not None:
- return value, False
- else:
- return result['failover']['active_server_ip'], True
-
-
-def get_failover_state(value):
- '''
- Create result dictionary for failover IP's value.
-
- The value ``None`` represents unrouted.
- '''
- return dict(
- value=value,
- state='routed' if value else 'unrouted'
- )
diff --git a/lib/ansible/module_utils/hwc_utils.py b/lib/ansible/module_utils/hwc_utils.py
deleted file mode 100644
index 52b5cb5cf8..0000000000
--- a/lib/ansible/module_utils/hwc_utils.py
+++ /dev/null
@@ -1,438 +0,0 @@
-# Copyright (c), Google Inc, 2017
-# Simplified BSD License (see licenses/simplified_bsd.txt or
-# https://opensource.org/licenses/BSD-2-Clause)
-
-import re
-import time
-import traceback
-
-THIRD_LIBRARIES_IMP_ERR = None
-try:
- from keystoneauth1.adapter import Adapter
- from keystoneauth1.identity import v3
- from keystoneauth1 import session
- HAS_THIRD_LIBRARIES = True
-except ImportError:
- THIRD_LIBRARIES_IMP_ERR = traceback.format_exc()
- HAS_THIRD_LIBRARIES = False
-
-from ansible.module_utils.basic import (AnsibleModule, env_fallback,
- missing_required_lib)
-from ansible.module_utils._text import to_text
-
-
-class HwcModuleException(Exception):
- def __init__(self, message):
- super(HwcModuleException, self).__init__()
-
- self._message = message
-
- def __str__(self):
- return "[HwcClientException] message=%s" % self._message
-
-
-class HwcClientException(Exception):
- def __init__(self, code, message):
- super(HwcClientException, self).__init__()
-
- self._code = code
- self._message = message
-
- def __str__(self):
- msg = " code=%s," % str(self._code) if self._code != 0 else ""
- return "[HwcClientException]%s message=%s" % (
- msg, self._message)
-
-
-class HwcClientException404(HwcClientException):
- def __init__(self, message):
- super(HwcClientException404, self).__init__(404, message)
-
- def __str__(self):
- return "[HwcClientException404] message=%s" % self._message
-
-
-def session_method_wrapper(f):
- def _wrap(self, url, *args, **kwargs):
- try:
- url = self.endpoint + url
- r = f(self, url, *args, **kwargs)
- except Exception as ex:
- raise HwcClientException(
- 0, "Sending request failed, error=%s" % ex)
-
- result = None
- if r.content:
- try:
- result = r.json()
- except Exception as ex:
- raise HwcClientException(
- 0, "Parsing response to json failed, error: %s" % ex)
-
- code = r.status_code
- if code not in [200, 201, 202, 203, 204, 205, 206, 207, 208, 226]:
- msg = ""
- for i in ['message', 'error.message']:
- try:
- msg = navigate_value(result, i)
- break
- except Exception:
- pass
- else:
- msg = str(result)
-
- if code == 404:
- raise HwcClientException404(msg)
-
- raise HwcClientException(code, msg)
-
- return result
-
- return _wrap
-
-
-class _ServiceClient(object):
- def __init__(self, client, endpoint, product):
- self._client = client
- self._endpoint = endpoint
- self._default_header = {
- 'User-Agent': "Huawei-Ansible-MM-%s" % product,
- 'Accept': 'application/json',
- }
-
- @property
- def endpoint(self):
- return self._endpoint
-
- @endpoint.setter
- def endpoint(self, e):
- self._endpoint = e
-
- @session_method_wrapper
- def get(self, url, body=None, header=None, timeout=None):
- return self._client.get(url, json=body, timeout=timeout,
- headers=self._header(header))
-
- @session_method_wrapper
- def post(self, url, body=None, header=None, timeout=None):
- return self._client.post(url, json=body, timeout=timeout,
- headers=self._header(header))
-
- @session_method_wrapper
- def delete(self, url, body=None, header=None, timeout=None):
- return self._client.delete(url, json=body, timeout=timeout,
- headers=self._header(header))
-
- @session_method_wrapper
- def put(self, url, body=None, header=None, timeout=None):
- return self._client.put(url, json=body, timeout=timeout,
- headers=self._header(header))
-
- def _header(self, header):
- if header and isinstance(header, dict):
- for k, v in self._default_header.items():
- if k not in header:
- header[k] = v
- else:
- header = self._default_header
-
- return header
-
-
-class Config(object):
- def __init__(self, module, product):
- self._project_client = None
- self._domain_client = None
- self._module = module
- self._product = product
- self._endpoints = {}
-
- self._validate()
- self._gen_provider_client()
-
- @property
- def module(self):
- return self._module
-
- def client(self, region, service_type, service_level):
- c = self._project_client
- if service_level == "domain":
- c = self._domain_client
-
- e = self._get_service_endpoint(c, service_type, region)
-
- return _ServiceClient(c, e, self._product)
-
- def _gen_provider_client(self):
- m = self._module
- p = {
- "auth_url": m.params['identity_endpoint'],
- "password": m.params['password'],
- "username": m.params['user'],
- "project_name": m.params['project'],
- "user_domain_name": m.params['domain'],
- "reauthenticate": True
- }
-
- self._project_client = Adapter(
- session.Session(auth=v3.Password(**p)),
- raise_exc=False)
-
- p.pop("project_name")
- self._domain_client = Adapter(
- session.Session(auth=v3.Password(**p)),
- raise_exc=False)
-
- def _get_service_endpoint(self, client, service_type, region):
- k = "%s.%s" % (service_type, region if region else "")
-
- if k in self._endpoints:
- return self._endpoints.get(k)
-
- url = None
- try:
- url = client.get_endpoint(service_type=service_type,
- region_name=region, interface="public")
- except Exception as ex:
- raise HwcClientException(
- 0, "Getting endpoint failed, error=%s" % ex)
-
- if url == "":
- raise HwcClientException(
- 0, "Can not find the enpoint for %s" % service_type)
-
- if url[-1] != "/":
- url += "/"
-
- self._endpoints[k] = url
- return url
-
- def _validate(self):
- if not HAS_THIRD_LIBRARIES:
- self.module.fail_json(
- msg=missing_required_lib('keystoneauth1'),
- exception=THIRD_LIBRARIES_IMP_ERR)
-
-
-class HwcModule(AnsibleModule):
- def __init__(self, *args, **kwargs):
- arg_spec = kwargs.setdefault('argument_spec', {})
-
- arg_spec.update(
- dict(
- identity_endpoint=dict(
- required=True, type='str',
- fallback=(env_fallback, ['ANSIBLE_HWC_IDENTITY_ENDPOINT']),
- ),
- user=dict(
- required=True, type='str',
- fallback=(env_fallback, ['ANSIBLE_HWC_USER']),
- ),
- password=dict(
- required=True, type='str', no_log=True,
- fallback=(env_fallback, ['ANSIBLE_HWC_PASSWORD']),
- ),
- domain=dict(
- required=True, type='str',
- fallback=(env_fallback, ['ANSIBLE_HWC_DOMAIN']),
- ),
- project=dict(
- required=True, type='str',
- fallback=(env_fallback, ['ANSIBLE_HWC_PROJECT']),
- ),
- region=dict(
- type='str',
- fallback=(env_fallback, ['ANSIBLE_HWC_REGION']),
- ),
- id=dict(type='str')
- )
- )
-
- super(HwcModule, self).__init__(*args, **kwargs)
-
-
-class _DictComparison(object):
- ''' This class takes in two dictionaries `a` and `b`.
- These are dictionaries of arbitrary depth, but made up of standard
- Python types only.
- This differ will compare all values in `a` to those in `b`.
- If value in `a` is None, always returns True, indicating
- this value is no need to compare.
- Note: On all lists, order does matter.
- '''
-
- def __init__(self, request):
- self.request = request
-
- def __eq__(self, other):
- return self._compare_dicts(self.request, other.request)
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def _compare_dicts(self, dict1, dict2):
- if dict1 is None:
- return True
-
- if set(dict1.keys()) != set(dict2.keys()):
- return False
-
- for k in dict1:
- if not self._compare_value(dict1.get(k), dict2.get(k)):
- return False
-
- return True
-
- def _compare_lists(self, list1, list2):
- """Takes in two lists and compares them."""
- if list1 is None:
- return True
-
- if len(list1) != len(list2):
- return False
-
- for i in range(len(list1)):
- if not self._compare_value(list1[i], list2[i]):
- return False
-
- return True
-
- def _compare_value(self, value1, value2):
- """
- return: True: value1 is same as value2, otherwise False.
- """
- if value1 is None:
- return True
-
- if not (value1 and value2):
- return (not value1) and (not value2)
-
- # Can assume non-None types at this point.
- if isinstance(value1, list) and isinstance(value2, list):
- return self._compare_lists(value1, value2)
-
- elif isinstance(value1, dict) and isinstance(value2, dict):
- return self._compare_dicts(value1, value2)
-
- # Always use to_text values to avoid unicode issues.
- return (to_text(value1, errors='surrogate_or_strict') == to_text(
- value2, errors='surrogate_or_strict'))
-
-
-def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3):
- is_last_time = False
- not_found_times = 0
- wait = 0
-
- time.sleep(delay)
-
- end = time.time() + timeout
- while not is_last_time:
- if time.time() > end:
- is_last_time = True
-
- obj, status = refresh()
-
- if obj is None:
- not_found_times += 1
-
- if not_found_times > 10:
- raise HwcModuleException(
- "not found the object for %d times" % not_found_times)
- else:
- not_found_times = 0
-
- if status in target:
- return obj
-
- if pending and status not in pending:
- raise HwcModuleException(
- "unexpect status(%s) occured" % status)
-
- if not is_last_time:
- wait *= 2
- if wait < min_interval:
- wait = min_interval
- elif wait > 10:
- wait = 10
-
- time.sleep(wait)
-
- raise HwcModuleException("asycn wait timeout after %d seconds" % timeout)
-
-
-def navigate_value(data, index, array_index=None):
- if array_index and (not isinstance(array_index, dict)):
- raise HwcModuleException("array_index must be dict")
-
- d = data
- for n in range(len(index)):
- if d is None:
- return None
-
- if not isinstance(d, dict):
- raise HwcModuleException(
- "can't navigate value from a non-dict object")
-
- i = index[n]
- if i not in d:
- raise HwcModuleException(
- "navigate value failed: key(%s) is not exist in dict" % i)
- d = d[i]
-
- if not array_index:
- continue
-
- k = ".".join(index[: (n + 1)])
- if k not in array_index:
- continue
-
- if d is None:
- return None
-
- if not isinstance(d, list):
- raise HwcModuleException(
- "can't navigate value from a non-list object")
-
- j = array_index.get(k)
- if j >= len(d):
- raise HwcModuleException(
- "navigate value failed: the index is out of list")
- d = d[j]
-
- return d
-
-
-def build_path(module, path, kv=None):
- if kv is None:
- kv = dict()
-
- v = {}
- for p in re.findall(r"{[^/]*}", path):
- n = p[1:][:-1]
-
- if n in kv:
- v[n] = str(kv[n])
-
- else:
- if n in module.params:
- v[n] = str(module.params.get(n))
- else:
- v[n] = ""
-
- return path.format(**v)
-
-
-def get_region(module):
- if module.params['region']:
- return module.params['region']
-
- return module.params['project'].split("_")[0]
-
-
-def is_empty_value(v):
- return (not v)
-
-
-def are_different_dicts(dict1, dict2):
- return _DictComparison(dict1) != _DictComparison(dict2)
diff --git a/lib/ansible/module_utils/ibm_sa_utils.py b/lib/ansible/module_utils/ibm_sa_utils.py
deleted file mode 100644
index c3ab4103a9..0000000000
--- a/lib/ansible/module_utils/ibm_sa_utils.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright (C) 2018 IBM CORPORATION
-# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
-#
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import traceback
-
-from functools import wraps
-from ansible.module_utils._text import to_native
-from ansible.module_utils.basic import missing_required_lib
-
-PYXCLI_INSTALLED = True
-PYXCLI_IMP_ERR = None
-try:
- from pyxcli import client, errors
-except ImportError:
- PYXCLI_IMP_ERR = traceback.format_exc()
- PYXCLI_INSTALLED = False
-
-AVAILABLE_PYXCLI_FIELDS = ['pool', 'size', 'snapshot_size',
- 'domain', 'perf_class', 'vol',
- 'iscsi_chap_name', 'iscsi_chap_secret',
- 'cluster', 'host', 'lun', 'override',
- 'fcaddress', 'iscsi_name', 'max_dms',
- 'max_cgs', 'ldap_id', 'max_mirrors',
- 'max_pools', 'max_volumes', 'hard_capacity',
- 'soft_capacity']
-
-
-def xcli_wrapper(func):
- """ Catch xcli errors and return a proper message"""
- @wraps(func)
- def wrapper(module, *args, **kwargs):
- try:
- return func(module, *args, **kwargs)
- except errors.CommandExecutionError as e:
- module.fail_json(msg=to_native(e))
- return wrapper
-
-
-@xcli_wrapper
-def connect_ssl(module):
- endpoints = module.params['endpoints']
- username = module.params['username']
- password = module.params['password']
- if not (username and password and endpoints):
- module.fail_json(
- msg="Username, password or endpoints arguments "
- "are missing from the module arguments")
-
- try:
- return client.XCLIClient.connect_multiendpoint_ssl(username,
- password,
- endpoints)
- except errors.CommandFailedConnectionError as e:
- module.fail_json(
- msg="Connection with Spectrum Accelerate system has "
- "failed: {[0]}.".format(to_native(e)))
-
-
-def spectrum_accelerate_spec():
- """ Return arguments spec for AnsibleModule """
- return dict(
- endpoints=dict(required=True),
- username=dict(required=True),
- password=dict(no_log=True, required=True),
- )
-
-
-@xcli_wrapper
-def execute_pyxcli_command(module, xcli_command, xcli_client):
- pyxcli_args = build_pyxcli_command(module.params)
- getattr(xcli_client.cmd, xcli_command)(**(pyxcli_args))
- return True
-
-
-def build_pyxcli_command(fields):
- """ Builds the args for pyxcli using the exact args from ansible"""
- pyxcli_args = {}
- for field in fields:
- if not fields[field]:
- continue
- if field in AVAILABLE_PYXCLI_FIELDS and fields[field] != '':
- pyxcli_args[field] = fields[field]
- return pyxcli_args
-
-
-def is_pyxcli_installed(module):
- if not PYXCLI_INSTALLED:
- module.fail_json(msg=missing_required_lib('pyxcli'),
- exception=PYXCLI_IMP_ERR)
diff --git a/lib/ansible/module_utils/identity/__init__.py b/lib/ansible/module_utils/identity/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/identity/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/identity/keycloak/__init__.py b/lib/ansible/module_utils/identity/keycloak/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/identity/keycloak/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/identity/keycloak/keycloak.py b/lib/ansible/module_utils/identity/keycloak/keycloak.py
deleted file mode 100644
index 5cab048dc8..0000000000
--- a/lib/ansible/module_utils/identity/keycloak/keycloak.py
+++ /dev/null
@@ -1,480 +0,0 @@
-# Copyright (c) 2017, Eike Frost <ei@kefro.st>
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-import json
-
-from ansible.module_utils.urls import open_url
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.module_utils.six.moves.urllib.error import HTTPError
-from ansible.module_utils._text import to_native
-
-URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token"
-URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}"
-URL_CLIENTS = "{url}/admin/realms/{realm}/clients"
-URL_CLIENT_ROLES = "{url}/admin/realms/{realm}/clients/{id}/roles"
-URL_REALM_ROLES = "{url}/admin/realms/{realm}/roles"
-
-URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}"
-URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates"
-URL_GROUPS = "{url}/admin/realms/{realm}/groups"
-URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}"
-
-
-def keycloak_argument_spec():
- """
- Returns argument_spec of options common to keycloak_*-modules
-
- :return: argument_spec dict
- """
- return dict(
- auth_keycloak_url=dict(type='str', aliases=['url'], required=True),
- auth_client_id=dict(type='str', default='admin-cli'),
- auth_realm=dict(type='str', required=True),
- auth_client_secret=dict(type='str', default=None),
- auth_username=dict(type='str', aliases=['username'], required=True),
- auth_password=dict(type='str', aliases=['password'], required=True, no_log=True),
- validate_certs=dict(type='bool', default=True)
- )
-
-
-def camel(words):
- return words.split('_')[0] + ''.join(x.capitalize() or '_' for x in words.split('_')[1:])
-
-
-class KeycloakError(Exception):
- pass
-
-
-def get_token(base_url, validate_certs, auth_realm, client_id,
- auth_username, auth_password, client_secret):
- auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm)
- temp_payload = {
- 'grant_type': 'password',
- 'client_id': client_id,
- 'client_secret': client_secret,
- 'username': auth_username,
- 'password': auth_password,
- }
- # Remove empty items, for instance missing client_secret
- payload = dict(
- (k, v) for k, v in temp_payload.items() if v is not None)
- try:
- r = json.loads(to_native(open_url(auth_url, method='POST',
- validate_certs=validate_certs,
- data=urlencode(payload)).read()))
- except ValueError as e:
- raise KeycloakError(
- 'API returned invalid JSON when trying to obtain access token from %s: %s'
- % (auth_url, str(e)))
- except Exception as e:
- raise KeycloakError('Could not obtain access token from %s: %s'
- % (auth_url, str(e)))
-
- try:
- return {
- 'Authorization': 'Bearer ' + r['access_token'],
- 'Content-Type': 'application/json'
- }
- except KeyError:
- raise KeycloakError(
- 'Could not obtain access token from %s' % auth_url)
-
-
-class KeycloakAPI(object):
- """ Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which
- is obtained through OpenID connect
- """
- def __init__(self, module, connection_header):
- self.module = module
- self.baseurl = self.module.params.get('auth_keycloak_url')
- self.validate_certs = self.module.params.get('validate_certs')
- self.restheaders = connection_header
-
- def get_clients(self, realm='master', filter=None):
- """ Obtains client representations for clients in a realm
-
- :param realm: realm to be queried
- :param filter: if defined, only the client with clientId specified in the filter is returned
- :return: list of dicts of client representations
- """
- clientlist_url = URL_CLIENTS.format(url=self.baseurl, realm=realm)
- if filter is not None:
- clientlist_url += '?clientId=%s' % filter
-
- try:
- return json.loads(to_native(open_url(clientlist_url, method='GET', headers=self.restheaders,
- validate_certs=self.validate_certs).read()))
- except ValueError as e:
- self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of clients for realm %s: %s'
- % (realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not obtain list of clients for realm %s: %s'
- % (realm, str(e)))
-
- def get_client_by_clientid(self, client_id, realm='master'):
- """ Get client representation by clientId
- :param client_id: The clientId to be queried
- :param realm: realm from which to obtain the client representation
- :return: dict with a client representation or None if none matching exist
- """
- r = self.get_clients(realm=realm, filter=client_id)
- if len(r) > 0:
- return r[0]
- else:
- return None
-
- def get_client_by_id(self, id, realm='master'):
- """ Obtain client representation by id
-
- :param id: id (not clientId) of client to be queried
- :param realm: client from this realm
- :return: dict of client representation or None if none matching exist
- """
- client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
-
- try:
- return json.loads(to_native(open_url(client_url, method='GET', headers=self.restheaders,
- validate_certs=self.validate_certs).read()))
-
- except HTTPError as e:
- if e.code == 404:
- return None
- else:
- self.module.fail_json(msg='Could not obtain client %s for realm %s: %s'
- % (id, realm, str(e)))
- except ValueError as e:
- self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client %s for realm %s: %s'
- % (id, realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not obtain client %s for realm %s: %s'
- % (id, realm, str(e)))
-
- def get_client_id(self, client_id, realm='master'):
- """ Obtain id of client by client_id
-
- :param client_id: client_id of client to be queried
- :param realm: client template from this realm
- :return: id of client (usually a UUID)
- """
- result = self.get_client_by_clientid(client_id, realm)
- if isinstance(result, dict) and 'id' in result:
- return result['id']
- else:
- return None
-
- def update_client(self, id, clientrep, realm="master"):
- """ Update an existing client
- :param id: id (not clientId) of client to be updated in Keycloak
- :param clientrep: corresponding (partial/full) client representation with updates
- :param realm: realm the client is in
- :return: HTTPResponse object on success
- """
- client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
-
- try:
- return open_url(client_url, method='PUT', headers=self.restheaders,
- data=json.dumps(clientrep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not update client %s in realm %s: %s'
- % (id, realm, str(e)))
-
- def create_client(self, clientrep, realm="master"):
- """ Create a client in keycloak
- :param clientrep: Client representation of client to be created. Must at least contain field clientId
- :param realm: realm for client to be created
- :return: HTTPResponse object on success
- """
- client_url = URL_CLIENTS.format(url=self.baseurl, realm=realm)
-
- try:
- return open_url(client_url, method='POST', headers=self.restheaders,
- data=json.dumps(clientrep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not create client %s in realm %s: %s'
- % (clientrep['clientId'], realm, str(e)))
-
- def delete_client(self, id, realm="master"):
- """ Delete a client from Keycloak
-
- :param id: id (not clientId) of client to be deleted
- :param realm: realm of client to be deleted
- :return: HTTPResponse object on success
- """
- client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
-
- try:
- return open_url(client_url, method='DELETE', headers=self.restheaders,
- validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not delete client %s in realm %s: %s'
- % (id, realm, str(e)))
-
- def get_client_templates(self, realm='master'):
- """ Obtains client template representations for client templates in a realm
-
- :param realm: realm to be queried
- :return: list of dicts of client representations
- """
- url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm)
-
- try:
- return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders,
- validate_certs=self.validate_certs).read()))
- except ValueError as e:
- self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of client templates for realm %s: %s'
- % (realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not obtain list of client templates for realm %s: %s'
- % (realm, str(e)))
-
- def get_client_template_by_id(self, id, realm='master'):
- """ Obtain client template representation by id
-
- :param id: id (not name) of client template to be queried
- :param realm: client template from this realm
- :return: dict of client template representation or None if none matching exist
- """
- url = URL_CLIENTTEMPLATE.format(url=self.baseurl, id=id, realm=realm)
-
- try:
- return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders,
- validate_certs=self.validate_certs).read()))
- except ValueError as e:
- self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client templates %s for realm %s: %s'
- % (id, realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not obtain client template %s for realm %s: %s'
- % (id, realm, str(e)))
-
- def get_client_template_by_name(self, name, realm='master'):
- """ Obtain client template representation by name
-
- :param name: name of client template to be queried
- :param realm: client template from this realm
- :return: dict of client template representation or None if none matching exist
- """
- result = self.get_client_templates(realm)
- if isinstance(result, list):
- result = [x for x in result if x['name'] == name]
- if len(result) > 0:
- return result[0]
- return None
-
- def get_client_template_id(self, name, realm='master'):
- """ Obtain client template id by name
-
- :param name: name of client template to be queried
- :param realm: client template from this realm
- :return: client template id (usually a UUID)
- """
- result = self.get_client_template_by_name(name, realm)
- if isinstance(result, dict) and 'id' in result:
- return result['id']
- else:
- return None
-
- def update_client_template(self, id, clienttrep, realm="master"):
- """ Update an existing client template
- :param id: id (not name) of client template to be updated in Keycloak
- :param clienttrep: corresponding (partial/full) client template representation with updates
- :param realm: realm the client template is in
- :return: HTTPResponse object on success
- """
- url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id)
-
- try:
- return open_url(url, method='PUT', headers=self.restheaders,
- data=json.dumps(clienttrep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not update client template %s in realm %s: %s'
- % (id, realm, str(e)))
-
- def create_client_template(self, clienttrep, realm="master"):
- """ Create a client in keycloak
- :param clienttrep: Client template representation of client template to be created. Must at least contain field name
- :param realm: realm for client template to be created in
- :return: HTTPResponse object on success
- """
- url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm)
-
- try:
- return open_url(url, method='POST', headers=self.restheaders,
- data=json.dumps(clienttrep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not create client template %s in realm %s: %s'
- % (clienttrep['clientId'], realm, str(e)))
-
- def delete_client_template(self, id, realm="master"):
- """ Delete a client template from Keycloak
-
- :param id: id (not name) of client to be deleted
- :param realm: realm of client template to be deleted
- :return: HTTPResponse object on success
- """
- url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id)
-
- try:
- return open_url(url, method='DELETE', headers=self.restheaders,
- validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not delete client template %s in realm %s: %s'
- % (id, realm, str(e)))
-
- def get_groups(self, realm="master"):
- """ Fetch the name and ID of all groups on the Keycloak server.
-
- To fetch the full data of the group, make a subsequent call to
- get_group_by_groupid, passing in the ID of the group you wish to return.
-
- :param realm: Return the groups of this realm (default "master").
- """
- groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
- try:
- return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders,
- validate_certs=self.validate_certs).read()))
- except Exception as e:
- self.module.fail_json(msg="Could not fetch list of groups in realm %s: %s"
- % (realm, str(e)))
-
- def get_group_by_groupid(self, gid, realm="master"):
- """ Fetch a keycloak group from the provided realm using the group's unique ID.
-
- If the group does not exist, None is returned.
-
- gid is a UUID provided by the Keycloak API
- :param gid: UUID of the group to be returned
- :param realm: Realm in which the group resides; default 'master'.
- """
- groups_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=gid)
- try:
- return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders,
- validate_certs=self.validate_certs).read()))
-
- except HTTPError as e:
- if e.code == 404:
- return None
- else:
- self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
- % (gid, realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
- % (gid, realm, str(e)))
-
- def get_group_by_name(self, name, realm="master"):
- """ Fetch a keycloak group within a realm based on its name.
-
- The Keycloak API does not allow filtering of the Groups resource by name.
- As a result, this method first retrieves the entire list of groups - name and ID -
- then performs a second query to fetch the group.
-
- If the group does not exist, None is returned.
- :param name: Name of the group to fetch.
- :param realm: Realm in which the group resides; default 'master'
- """
- groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
- try:
- all_groups = self.get_groups(realm=realm)
-
- for group in all_groups:
- if group['name'] == name:
- return self.get_group_by_groupid(group['id'], realm=realm)
-
- return None
-
- except Exception as e:
- self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
- % (name, realm, str(e)))
-
- def create_group(self, grouprep, realm="master"):
- """ Create a Keycloak group.
-
- :param grouprep: a GroupRepresentation of the group to be created. Must contain at minimum the field name.
- :return: HTTPResponse object on success
- """
- groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
- try:
- return open_url(groups_url, method='POST', headers=self.restheaders,
- data=json.dumps(grouprep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg="Could not create group %s in realm %s: %s"
- % (grouprep['name'], realm, str(e)))
-
- def update_group(self, grouprep, realm="master"):
- """ Update an existing group.
-
- :param grouprep: A GroupRepresentation of the updated group.
- :return HTTPResponse object on success
- """
- group_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=grouprep['id'])
-
- try:
- return open_url(group_url, method='PUT', headers=self.restheaders,
- data=json.dumps(grouprep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not update group %s in realm %s: %s'
- % (grouprep['name'], realm, str(e)))
-
- def delete_group(self, name=None, groupid=None, realm="master"):
- """ Delete a group. One of name or groupid must be provided.
-
- Providing the group ID is preferred as it avoids a second lookup to
- convert a group name to an ID.
-
- :param name: The name of the group. A lookup will be performed to retrieve the group ID.
- :param groupid: The ID of the group (preferred to name).
- :param realm: The realm in which this group resides, default "master".
- """
-
- if groupid is None and name is None:
- # prefer an exception since this is almost certainly a programming error in the module itself.
- raise Exception("Unable to delete group - one of group ID or name must be provided.")
-
- # only lookup the name if groupid isn't provided.
- # in the case that both are provided, prefer the ID, since it's one
- # less lookup.
- if groupid is None and name is not None:
- for group in self.get_groups(realm=realm):
- if group['name'] == name:
- groupid = group['id']
- break
-
- # if the group doesn't exist - no problem, nothing to delete.
- if groupid is None:
- return None
-
- # should have a good groupid by here.
- group_url = URL_GROUP.format(realm=realm, groupid=groupid, url=self.baseurl)
- try:
- return open_url(group_url, method='DELETE', headers=self.restheaders,
- validate_certs=self.validate_certs)
-
- except Exception as e:
- self.module.fail_json(msg="Unable to delete group %s: %s" % (groupid, str(e)))
diff --git a/lib/ansible/module_utils/infinibox.py b/lib/ansible/module_utils/infinibox.py
deleted file mode 100644
index 57ee89ec2c..0000000000
--- a/lib/ansible/module_utils/infinibox.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Gregory Shulov <gregory.shulov@gmail.com>,2016
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-HAS_INFINISDK = True
-try:
- from infinisdk import InfiniBox, core
-except ImportError:
- HAS_INFINISDK = False
-
-from functools import wraps
-from os import environ
-from os import path
-
-
-def api_wrapper(func):
- """ Catch API Errors Decorator"""
- @wraps(func)
- def __wrapper(*args, **kwargs):
- module = args[0]
- try:
- return func(*args, **kwargs)
- except core.exceptions.APICommandException as e:
- module.fail_json(msg=e.message)
- except core.exceptions.SystemNotFoundException as e:
- module.fail_json(msg=e.message)
- except Exception:
- raise
- return __wrapper
-
-
-@api_wrapper
-def get_system(module):
- """Return System Object or Fail"""
- box = module.params['system']
- user = module.params.get('user', None)
- password = module.params.get('password', None)
-
- if user and password:
- system = InfiniBox(box, auth=(user, password))
- elif environ.get('INFINIBOX_USER') and environ.get('INFINIBOX_PASSWORD'):
- system = InfiniBox(box, auth=(environ.get('INFINIBOX_USER'), environ.get('INFINIBOX_PASSWORD')))
- elif path.isfile(path.expanduser('~') + '/.infinidat/infinisdk.ini'):
- system = InfiniBox(box)
- else:
- module.fail_json(msg="You must set INFINIBOX_USER and INFINIBOX_PASSWORD environment variables or set username/password module arguments")
-
- try:
- system.login()
- except Exception:
- module.fail_json(msg="Infinibox authentication failed. Check your credentials")
- return system
-
-
-def infinibox_argument_spec():
- """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
-
- return dict(
- system=dict(required=True),
- user=dict(),
- password=dict(no_log=True),
- )
-
-
-def infinibox_required_together():
- """Return the default list used for the required_together argument to AnsibleModule"""
- return [['user', 'password']]
diff --git a/lib/ansible/module_utils/influxdb.py b/lib/ansible/module_utils/influxdb.py
deleted file mode 100644
index 0bdd4e6cdd..0000000000
--- a/lib/ansible/module_utils/influxdb.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017, Ansible Project
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import traceback
-
-from ansible.module_utils.basic import missing_required_lib
-
-REQUESTS_IMP_ERR = None
-try:
- import requests.exceptions
- HAS_REQUESTS = True
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- HAS_REQUESTS = False
-
-INFLUXDB_IMP_ERR = None
-try:
- from influxdb import InfluxDBClient
- from influxdb import __version__ as influxdb_version
- from influxdb import exceptions
- HAS_INFLUXDB = True
-except ImportError:
- INFLUXDB_IMP_ERR = traceback.format_exc()
- HAS_INFLUXDB = False
-
-
-class InfluxDb():
- def __init__(self, module):
- self.module = module
- self.params = self.module.params
- self.check_lib()
- self.hostname = self.params['hostname']
- self.port = self.params['port']
- self.path = self.params['path']
- self.username = self.params['username']
- self.password = self.params['password']
- self.database_name = self.params.get('database_name')
-
- def check_lib(self):
- if not HAS_REQUESTS:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
-
- if not HAS_INFLUXDB:
- self.module.fail_json(msg=missing_required_lib('influxdb'), exception=INFLUXDB_IMP_ERR)
-
- @staticmethod
- def influxdb_argument_spec():
- return dict(
- hostname=dict(type='str', default='localhost'),
- port=dict(type='int', default=8086),
- path=dict(type='str', default=''),
- username=dict(type='str', default='root', aliases=['login_username']),
- password=dict(type='str', default='root', no_log=True, aliases=['login_password']),
- ssl=dict(type='bool', default=False),
- validate_certs=dict(type='bool', default=True),
- timeout=dict(type='int'),
- retries=dict(type='int', default=3),
- proxies=dict(type='dict', default={}),
- use_udp=dict(type='bool', default=False),
- udp_port=dict(type='int', default=4444),
- )
-
- def connect_to_influxdb(self):
- args = dict(
- host=self.hostname,
- port=self.port,
- path=self.path,
- username=self.username,
- password=self.password,
- database=self.database_name,
- ssl=self.params['ssl'],
- verify_ssl=self.params['validate_certs'],
- timeout=self.params['timeout'],
- use_udp=self.params['use_udp'],
- udp_port=self.params['udp_port'],
- proxies=self.params['proxies'],
- )
- influxdb_api_version = tuple(influxdb_version.split("."))
- if influxdb_api_version >= ('4', '1', '0'):
- # retries option is added in version 4.1.0
- args.update(retries=self.params['retries'])
-
- return InfluxDBClient(**args)
diff --git a/lib/ansible/module_utils/ipa.py b/lib/ansible/module_utils/ipa.py
deleted file mode 100644
index c834c873f8..0000000000
--- a/lib/ansible/module_utils/ipa.py
+++ /dev/null
@@ -1,226 +0,0 @@
-# -*- coding: utf-8 -*-
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c) 2016 Thomas Krahn (@Nosmoht)
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import json
-import os
-import socket
-import uuid
-
-import re
-from ansible.module_utils._text import to_bytes, to_native, to_text
-from ansible.module_utils.six import PY3
-from ansible.module_utils.six.moves.urllib.parse import quote
-from ansible.module_utils.urls import fetch_url, HAS_GSSAPI
-from ansible.module_utils.basic import env_fallback, AnsibleFallbackNotFound
-
-
-def _env_then_dns_fallback(*args, **kwargs):
- ''' Load value from environment or DNS in that order'''
- try:
- return env_fallback(*args, **kwargs)
- except AnsibleFallbackNotFound:
- # If no host was given, we try to guess it from IPA.
- # The ipa-ca entry is a standard entry that IPA will have set for
- # the CA.
- try:
- return socket.gethostbyaddr(socket.gethostbyname('ipa-ca'))[0]
- except Exception:
- raise AnsibleFallbackNotFound
-
-
-class IPAClient(object):
- def __init__(self, module, host, port, protocol):
- self.host = host
- self.port = port
- self.protocol = protocol
- self.module = module
- self.headers = None
- self.timeout = module.params.get('ipa_timeout')
- self.use_gssapi = False
-
- def get_base_url(self):
- return '%s://%s/ipa' % (self.protocol, self.host)
-
- def get_json_url(self):
- return '%s/session/json' % self.get_base_url()
-
- def login(self, username, password):
- if 'KRB5CCNAME' in os.environ and HAS_GSSAPI:
- self.use_gssapi = True
- elif 'KRB5_CLIENT_KTNAME' in os.environ and HAS_GSSAPI:
- ccache = "MEMORY:" + str(uuid.uuid4())
- os.environ['KRB5CCNAME'] = ccache
- self.use_gssapi = True
- else:
- if not password:
- if 'KRB5CCNAME' in os.environ or 'KRB5_CLIENT_KTNAME' in os.environ:
- self.module.warn("In order to use GSSAPI, you need to install 'urllib_gssapi'")
- self._fail('login', 'Password is required if not using '
- 'GSSAPI. To use GSSAPI, please set the '
- 'KRB5_CLIENT_KTNAME or KRB5CCNAME (or both) '
- ' environment variables.')
- url = '%s/session/login_password' % self.get_base_url()
- data = 'user=%s&password=%s' % (quote(username, safe=''), quote(password, safe=''))
- headers = {'referer': self.get_base_url(),
- 'Content-Type': 'application/x-www-form-urlencoded',
- 'Accept': 'text/plain'}
- try:
- resp, info = fetch_url(module=self.module, url=url, data=to_bytes(data), headers=headers, timeout=self.timeout)
- status_code = info['status']
- if status_code not in [200, 201, 204]:
- self._fail('login', info['msg'])
-
- self.headers = {'Cookie': resp.info().get('Set-Cookie')}
- except Exception as e:
- self._fail('login', to_native(e))
- if not self.headers:
- self.headers = dict()
- self.headers.update({
- 'referer': self.get_base_url(),
- 'Content-Type': 'application/json',
- 'Accept': 'application/json'})
-
- def _fail(self, msg, e):
- if 'message' in e:
- err_string = e.get('message')
- else:
- err_string = e
- self.module.fail_json(msg='%s: %s' % (msg, err_string))
-
- def get_ipa_version(self):
- response = self.ping()['summary']
- ipa_ver_regex = re.compile(r'IPA server version (\d\.\d\.\d).*')
- version_match = ipa_ver_regex.match(response)
- ipa_version = None
- if version_match:
- ipa_version = version_match.groups()[0]
- return ipa_version
-
- def ping(self):
- return self._post_json(method='ping', name=None)
-
- def _post_json(self, method, name, item=None):
- if item is None:
- item = {}
- url = '%s/session/json' % self.get_base_url()
- data = dict(method=method)
-
- # TODO: We should probably handle this a little better.
- if method in ('ping', 'config_show'):
- data['params'] = [[], {}]
- elif method == 'config_mod':
- data['params'] = [[], item]
- else:
- data['params'] = [[name], item]
-
- try:
- resp, info = fetch_url(module=self.module, url=url, data=to_bytes(json.dumps(data)),
- headers=self.headers, timeout=self.timeout, use_gssapi=self.use_gssapi)
- status_code = info['status']
- if status_code not in [200, 201, 204]:
- self._fail(method, info['msg'])
- except Exception as e:
- self._fail('post %s' % method, to_native(e))
-
- if PY3:
- charset = resp.headers.get_content_charset('latin-1')
- else:
- response_charset = resp.headers.getparam('charset')
- if response_charset:
- charset = response_charset
- else:
- charset = 'latin-1'
- resp = json.loads(to_text(resp.read(), encoding=charset), encoding=charset)
- err = resp.get('error')
- if err is not None:
- self._fail('response %s' % method, err)
-
- if 'result' in resp:
- result = resp.get('result')
- if 'result' in result:
- result = result.get('result')
- if isinstance(result, list):
- if len(result) > 0:
- return result[0]
- else:
- return {}
- return result
- return None
-
- def get_diff(self, ipa_data, module_data):
- result = []
- for key in module_data.keys():
- mod_value = module_data.get(key, None)
- if isinstance(mod_value, list):
- default = []
- else:
- default = None
- ipa_value = ipa_data.get(key, default)
- if isinstance(ipa_value, list) and not isinstance(mod_value, list):
- mod_value = [mod_value]
- if isinstance(ipa_value, list) and isinstance(mod_value, list):
- mod_value = sorted(mod_value)
- ipa_value = sorted(ipa_value)
- if mod_value != ipa_value:
- result.append(key)
- return result
-
- def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None):
- changed = False
- diff = list(set(ipa_list) - set(module_list))
- if len(diff) > 0:
- changed = True
- if not self.module.check_mode:
- if item:
- remove_method(name=name, item={item: diff})
- else:
- remove_method(name=name, item=diff)
-
- diff = list(set(module_list) - set(ipa_list))
- if len(diff) > 0:
- changed = True
- if not self.module.check_mode:
- if item:
- add_method(name=name, item={item: diff})
- else:
- add_method(name=name, item=diff)
-
- return changed
-
-
-def ipa_argument_spec():
- return dict(
- ipa_prot=dict(type='str', default='https', choices=['http', 'https'], fallback=(env_fallback, ['IPA_PROT'])),
- ipa_host=dict(type='str', default='ipa.example.com', fallback=(_env_then_dns_fallback, ['IPA_HOST'])),
- ipa_port=dict(type='int', default=443, fallback=(env_fallback, ['IPA_PORT'])),
- ipa_user=dict(type='str', default='admin', fallback=(env_fallback, ['IPA_USER'])),
- ipa_pass=dict(type='str', no_log=True, fallback=(env_fallback, ['IPA_PASS'])),
- ipa_timeout=dict(type='int', default=10, fallback=(env_fallback, ['IPA_TIMEOUT'])),
- validate_certs=dict(type='bool', default=True),
- )
diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py
deleted file mode 100644
index 42f067f225..0000000000
--- a/lib/ansible/module_utils/known_hosts.py
+++ /dev/null
@@ -1,195 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-import hmac
-import re
-
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-
-try:
- from hashlib import sha1
-except ImportError:
- import sha as sha1
-
-HASHED_KEY_MAGIC = "|1|"
-
-
-def is_ssh_url(url):
-
- """ check if url is ssh """
-
- if "@" in url and "://" not in url:
- return True
- for scheme in "ssh://", "git+ssh://", "ssh+git://":
- if url.startswith(scheme):
- return True
- return False
-
-
-def get_fqdn_and_port(repo_url):
-
- """ chop the hostname and port out of a url """
-
- fqdn = None
- port = None
- ipv6_re = re.compile(r'(\[[^]]*\])(?::([0-9]+))?')
- if "@" in repo_url and "://" not in repo_url:
- # most likely an user@host:path or user@host/path type URL
- repo_url = repo_url.split("@", 1)[1]
- match = ipv6_re.match(repo_url)
- # For this type of URL, colon specifies the path, not the port
- if match:
- fqdn, path = match.groups()
- elif ":" in repo_url:
- fqdn = repo_url.split(":")[0]
- elif "/" in repo_url:
- fqdn = repo_url.split("/")[0]
- elif "://" in repo_url:
- # this should be something we can parse with urlparse
- parts = urlparse(repo_url)
- # parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
- # ensure we actually have a parts[1] before continuing.
- if parts[1] != '':
- fqdn = parts[1]
- if "@" in fqdn:
- fqdn = fqdn.split("@", 1)[1]
- match = ipv6_re.match(fqdn)
- if match:
- fqdn, port = match.groups()
- elif ":" in fqdn:
- fqdn, port = fqdn.split(":")[0:2]
- return fqdn, port
-
-
-def check_hostkey(module, fqdn):
- return not not_in_host_file(module, fqdn)
-
-
-# this is a variant of code found in connection_plugins/paramiko.py and we should modify
-# the paramiko code to import and use this.
-
-def not_in_host_file(self, host):
-
- if 'USER' in os.environ:
- user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
- else:
- user_host_file = "~/.ssh/known_hosts"
- user_host_file = os.path.expanduser(user_host_file)
-
- host_file_list = []
- host_file_list.append(user_host_file)
- host_file_list.append("/etc/ssh/ssh_known_hosts")
- host_file_list.append("/etc/ssh/ssh_known_hosts2")
- host_file_list.append("/etc/openssh/ssh_known_hosts")
-
- hfiles_not_found = 0
- for hf in host_file_list:
- if not os.path.exists(hf):
- hfiles_not_found += 1
- continue
-
- try:
- host_fh = open(hf)
- except IOError:
- hfiles_not_found += 1
- continue
- else:
- data = host_fh.read()
- host_fh.close()
-
- for line in data.split("\n"):
- if line is None or " " not in line:
- continue
- tokens = line.split()
- if tokens[0].find(HASHED_KEY_MAGIC) == 0:
- # this is a hashed known host entry
- try:
- (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|", 2)
- hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
- hash.update(host)
- if hash.digest() == kn_host.decode('base64'):
- return False
- except Exception:
- # invalid hashed host key, skip it
- continue
- else:
- # standard host file entry
- if host in tokens[0]:
- return False
-
- return True
-
-
-def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
-
- """ use ssh-keyscan to add the hostkey """
-
- keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
-
- if 'USER' in os.environ:
- user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
- user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
- else:
- user_ssh_dir = "~/.ssh/"
- user_host_file = "~/.ssh/known_hosts"
- user_ssh_dir = os.path.expanduser(user_ssh_dir)
-
- if not os.path.exists(user_ssh_dir):
- if create_dir:
- try:
- os.makedirs(user_ssh_dir, int('700', 8))
- except Exception:
- module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
- else:
- module.fail_json(msg="%s does not exist" % user_ssh_dir)
- elif not os.path.isdir(user_ssh_dir):
- module.fail_json(msg="%s is not a directory" % user_ssh_dir)
-
- if port:
- this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn)
- else:
- this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
-
- rc, out, err = module.run_command(this_cmd)
- # ssh-keyscan gives a 0 exit code and prints nothing on timeout
- if rc != 0 or not out:
- msg = 'failed to retrieve hostkey'
- if not out:
- msg += '. "%s" returned no matches.' % this_cmd
- else:
- msg += ' using command "%s". [stdout]: %s' % (this_cmd, out)
-
- if err:
- msg += ' [stderr]: %s' % err
-
- module.fail_json(msg=msg)
-
- module.append_to_file(user_host_file, out)
-
- return rc, out, err
diff --git a/lib/ansible/module_utils/kubevirt.py b/lib/ansible/module_utils/kubevirt.py
deleted file mode 100644
index 89472757e0..0000000000
--- a/lib/ansible/module_utils/kubevirt.py
+++ /dev/null
@@ -1,462 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-
-# Copyright (c) 2018, KubeVirt Team <@kubevirt>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from collections import defaultdict
-from distutils.version import Version
-
-from ansible.module_utils.common import dict_transformations
-from ansible.module_utils.common._collections_compat import Sequence
-from ansible.module_utils.k8s.common import list_dict_str
-from ansible.module_utils.k8s.raw import KubernetesRawModule
-
-import copy
-import re
-
-MAX_SUPPORTED_API_VERSION = 'v1alpha3'
-API_GROUP = 'kubevirt.io'
-
-
-# Put all args that (can) modify 'spec:' here:
-VM_SPEC_DEF_ARG_SPEC = {
- 'resource_definition': {
- 'type': 'dict',
- 'aliases': ['definition', 'inline']
- },
- 'memory': {'type': 'str'},
- 'memory_limit': {'type': 'str'},
- 'cpu_cores': {'type': 'int'},
- 'disks': {'type': 'list'},
- 'labels': {'type': 'dict'},
- 'interfaces': {'type': 'list'},
- 'machine_type': {'type': 'str'},
- 'cloud_init_nocloud': {'type': 'dict'},
- 'bootloader': {'type': 'str'},
- 'smbios_uuid': {'type': 'str'},
- 'cpu_model': {'type': 'str'},
- 'headless': {'type': 'str'},
- 'hugepage_size': {'type': 'str'},
- 'tablets': {'type': 'list'},
- 'cpu_limit': {'type': 'int'},
- 'cpu_shares': {'type': 'int'},
- 'cpu_features': {'type': 'list'},
- 'affinity': {'type': 'dict'},
- 'anti_affinity': {'type': 'dict'},
- 'node_affinity': {'type': 'dict'},
-}
-# And other common args go here:
-VM_COMMON_ARG_SPEC = {
- 'name': {'required': True},
- 'namespace': {'required': True},
- 'hostname': {'type': 'str'},
- 'subdomain': {'type': 'str'},
- 'state': {
- 'default': 'present',
- 'choices': ['present', 'absent'],
- },
- 'force': {
- 'type': 'bool',
- 'default': False,
- },
- 'merge_type': {'type': 'list', 'choices': ['json', 'merge', 'strategic-merge']},
- 'wait': {'type': 'bool', 'default': True},
- 'wait_timeout': {'type': 'int', 'default': 120},
- 'wait_sleep': {'type': 'int', 'default': 5},
-}
-VM_COMMON_ARG_SPEC.update(VM_SPEC_DEF_ARG_SPEC)
-
-
-def virtdict():
- """
- This function create dictionary, with defaults to dictionary.
- """
- return defaultdict(virtdict)
-
-
-class KubeAPIVersion(Version):
- component_re = re.compile(r'(\d+ | [a-z]+)', re.VERBOSE)
-
- def __init__(self, vstring=None):
- if vstring:
- self.parse(vstring)
-
- def parse(self, vstring):
- self.vstring = vstring
- components = [x for x in self.component_re.split(vstring) if x]
- for i, obj in enumerate(components):
- try:
- components[i] = int(obj)
- except ValueError:
- pass
-
- errmsg = "version '{0}' does not conform to kubernetes api versioning guidelines".format(vstring)
- c = components
-
- if len(c) not in (2, 4) or c[0] != 'v' or not isinstance(c[1], int):
- raise ValueError(errmsg)
- if len(c) == 4 and (c[2] not in ('alpha', 'beta') or not isinstance(c[3], int)):
- raise ValueError(errmsg)
-
- self.version = components
-
- def __str__(self):
- return self.vstring
-
- def __repr__(self):
- return "KubeAPIVersion ('{0}')".format(str(self))
-
- def _cmp(self, other):
- if isinstance(other, str):
- other = KubeAPIVersion(other)
-
- myver = self.version
- otherver = other.version
-
- for ver in myver, otherver:
- if len(ver) == 2:
- ver.extend(['zeta', 9999])
-
- if myver == otherver:
- return 0
- if myver < otherver:
- return -1
- if myver > otherver:
- return 1
-
- # python2 compatibility
- def __cmp__(self, other):
- return self._cmp(other)
-
-
-class KubeVirtRawModule(KubernetesRawModule):
- def __init__(self, *args, **kwargs):
- super(KubeVirtRawModule, self).__init__(*args, **kwargs)
-
- @staticmethod
- def merge_dicts(base_dict, merging_dicts):
- """This function merges a base dictionary with one or more other dictionaries.
- The base dictionary takes precedence when there is a key collision.
- merging_dicts can be a dict or a list or tuple of dicts. In the latter case, the
- dictionaries at the front of the list have higher precedence over the ones at the end.
- """
- if not merging_dicts:
- merging_dicts = ({},)
-
- if not isinstance(merging_dicts, Sequence):
- merging_dicts = (merging_dicts,)
-
- new_dict = {}
- for d in reversed(merging_dicts):
- new_dict = dict_transformations.dict_merge(new_dict, d)
-
- new_dict = dict_transformations.dict_merge(new_dict, base_dict)
-
- return new_dict
-
- def get_resource(self, resource):
- try:
- existing = resource.get(name=self.name, namespace=self.namespace)
- except Exception:
- existing = None
-
- return existing
-
- def _define_datavolumes(self, datavolumes, spec):
- """
- Takes datavoulmes parameter of Ansible and create kubevirt API datavolumesTemplateSpec
- structure from it
- """
- if not datavolumes:
- return
-
- spec['dataVolumeTemplates'] = []
- for dv in datavolumes:
- # Add datavolume to datavolumetemplates spec:
- dvt = virtdict()
- dvt['metadata']['name'] = dv.get('name')
- dvt['spec']['pvc'] = {
- 'accessModes': dv.get('pvc').get('accessModes'),
- 'resources': {
- 'requests': {
- 'storage': dv.get('pvc').get('storage'),
- }
- }
- }
- dvt['spec']['source'] = dv.get('source')
- spec['dataVolumeTemplates'].append(dvt)
-
- # Add datavolume to disks spec:
- if not spec['template']['spec']['domain']['devices']['disks']:
- spec['template']['spec']['domain']['devices']['disks'] = []
-
- spec['template']['spec']['domain']['devices']['disks'].append(
- {
- 'name': dv.get('name'),
- 'disk': dv.get('disk', {'bus': 'virtio'}),
- }
- )
-
- # Add datavolume to volumes spec:
- if not spec['template']['spec']['volumes']:
- spec['template']['spec']['volumes'] = []
-
- spec['template']['spec']['volumes'].append(
- {
- 'dataVolume': {
- 'name': dv.get('name')
- },
- 'name': dv.get('name'),
- }
- )
-
- def _define_cloud_init(self, cloud_init_nocloud, template_spec):
- """
- Takes the user's cloud_init_nocloud parameter and fill it in kubevirt
- API strucuture. The name for disk is hardcoded to ansiblecloudinitdisk.
- """
- if cloud_init_nocloud:
- if not template_spec['volumes']:
- template_spec['volumes'] = []
- if not template_spec['domain']['devices']['disks']:
- template_spec['domain']['devices']['disks'] = []
-
- template_spec['volumes'].append({'name': 'ansiblecloudinitdisk', 'cloudInitNoCloud': cloud_init_nocloud})
- template_spec['domain']['devices']['disks'].append({
- 'name': 'ansiblecloudinitdisk',
- 'disk': {'bus': 'virtio'},
- })
-
- def _define_interfaces(self, interfaces, template_spec, defaults):
- """
- Takes interfaces parameter of Ansible and create kubevirt API interfaces
- and networks strucutre out from it.
- """
- if not interfaces and defaults and 'interfaces' in defaults:
- interfaces = copy.deepcopy(defaults['interfaces'])
- for d in interfaces:
- d['network'] = defaults['networks'][0]
-
- if interfaces:
- # Extract interfaces k8s specification from interfaces list passed to Ansible:
- spec_interfaces = []
- for i in interfaces:
- spec_interfaces.append(
- self.merge_dicts(dict((k, v) for k, v in i.items() if k != 'network'), defaults['interfaces'])
- )
- if 'interfaces' not in template_spec['domain']['devices']:
- template_spec['domain']['devices']['interfaces'] = []
- template_spec['domain']['devices']['interfaces'].extend(spec_interfaces)
-
- # Extract networks k8s specification from interfaces list passed to Ansible:
- spec_networks = []
- for i in interfaces:
- net = i['network']
- net['name'] = i['name']
- spec_networks.append(self.merge_dicts(net, defaults['networks']))
- if 'networks' not in template_spec:
- template_spec['networks'] = []
- template_spec['networks'].extend(spec_networks)
-
- def _define_disks(self, disks, template_spec, defaults):
- """
- Takes disks parameter of Ansible and create kubevirt API disks and
- volumes strucutre out from it.
- """
- if not disks and defaults and 'disks' in defaults:
- disks = copy.deepcopy(defaults['disks'])
- for d in disks:
- d['volume'] = defaults['volumes'][0]
-
- if disks:
- # Extract k8s specification from disks list passed to Ansible:
- spec_disks = []
- for d in disks:
- spec_disks.append(
- self.merge_dicts(dict((k, v) for k, v in d.items() if k != 'volume'), defaults['disks'])
- )
- if 'disks' not in template_spec['domain']['devices']:
- template_spec['domain']['devices']['disks'] = []
- template_spec['domain']['devices']['disks'].extend(spec_disks)
-
- # Extract volumes k8s specification from disks list passed to Ansible:
- spec_volumes = []
- for d in disks:
- volume = d['volume']
- volume['name'] = d['name']
- spec_volumes.append(self.merge_dicts(volume, defaults['volumes']))
- if 'volumes' not in template_spec:
- template_spec['volumes'] = []
- template_spec['volumes'].extend(spec_volumes)
-
- def find_supported_resource(self, kind):
- results = self.client.resources.search(kind=kind, group=API_GROUP)
- if not results:
- self.fail('Failed to find resource {0} in {1}'.format(kind, API_GROUP))
- sr = sorted(results, key=lambda r: KubeAPIVersion(r.api_version), reverse=True)
- for r in sr:
- if KubeAPIVersion(r.api_version) <= KubeAPIVersion(MAX_SUPPORTED_API_VERSION):
- return r
- self.fail("API versions {0} are too recent. Max supported is {1}/{2}.".format(
- str([r.api_version for r in sr]), API_GROUP, MAX_SUPPORTED_API_VERSION))
-
- def _construct_vm_definition(self, kind, definition, template, params, defaults=None):
- self.client = self.get_api_client()
-
- disks = params.get('disks', [])
- memory = params.get('memory')
- memory_limit = params.get('memory_limit')
- cpu_cores = params.get('cpu_cores')
- cpu_model = params.get('cpu_model')
- cpu_features = params.get('cpu_features')
- labels = params.get('labels')
- datavolumes = params.get('datavolumes')
- interfaces = params.get('interfaces')
- bootloader = params.get('bootloader')
- cloud_init_nocloud = params.get('cloud_init_nocloud')
- machine_type = params.get('machine_type')
- headless = params.get('headless')
- smbios_uuid = params.get('smbios_uuid')
- hugepage_size = params.get('hugepage_size')
- tablets = params.get('tablets')
- cpu_shares = params.get('cpu_shares')
- cpu_limit = params.get('cpu_limit')
- node_affinity = params.get('node_affinity')
- vm_affinity = params.get('affinity')
- vm_anti_affinity = params.get('anti_affinity')
- hostname = params.get('hostname')
- subdomain = params.get('subdomain')
- template_spec = template['spec']
-
- # Merge additional flat parameters:
- if memory:
- template_spec['domain']['resources']['requests']['memory'] = memory
-
- if cpu_shares:
- template_spec['domain']['resources']['requests']['cpu'] = cpu_shares
-
- if cpu_limit:
- template_spec['domain']['resources']['limits']['cpu'] = cpu_limit
-
- if tablets:
- for tablet in tablets:
- tablet['type'] = 'tablet'
- template_spec['domain']['devices']['inputs'] = tablets
-
- if memory_limit:
- template_spec['domain']['resources']['limits']['memory'] = memory_limit
-
- if hugepage_size is not None:
- template_spec['domain']['memory']['hugepages']['pageSize'] = hugepage_size
-
- if cpu_features is not None:
- template_spec['domain']['cpu']['features'] = cpu_features
-
- if cpu_cores is not None:
- template_spec['domain']['cpu']['cores'] = cpu_cores
-
- if cpu_model:
- template_spec['domain']['cpu']['model'] = cpu_model
-
- if labels:
- template['metadata']['labels'] = self.merge_dicts(labels, template['metadata']['labels'])
-
- if machine_type:
- template_spec['domain']['machine']['type'] = machine_type
-
- if bootloader:
- template_spec['domain']['firmware']['bootloader'] = {bootloader: {}}
-
- if smbios_uuid:
- template_spec['domain']['firmware']['uuid'] = smbios_uuid
-
- if headless is not None:
- template_spec['domain']['devices']['autoattachGraphicsDevice'] = not headless
-
- if vm_affinity or vm_anti_affinity:
- vms_affinity = vm_affinity or vm_anti_affinity
- affinity_name = 'podAffinity' if vm_affinity else 'podAntiAffinity'
- for affinity in vms_affinity.get('soft', []):
- if not template_spec['affinity'][affinity_name]['preferredDuringSchedulingIgnoredDuringExecution']:
- template_spec['affinity'][affinity_name]['preferredDuringSchedulingIgnoredDuringExecution'] = []
- template_spec['affinity'][affinity_name]['preferredDuringSchedulingIgnoredDuringExecution'].append({
- 'weight': affinity.get('weight'),
- 'podAffinityTerm': {
- 'labelSelector': {
- 'matchExpressions': affinity.get('term').get('match_expressions'),
- },
- 'topologyKey': affinity.get('topology_key'),
- },
- })
- for affinity in vms_affinity.get('hard', []):
- if not template_spec['affinity'][affinity_name]['requiredDuringSchedulingIgnoredDuringExecution']:
- template_spec['affinity'][affinity_name]['requiredDuringSchedulingIgnoredDuringExecution'] = []
- template_spec['affinity'][affinity_name]['requiredDuringSchedulingIgnoredDuringExecution'].append({
- 'labelSelector': {
- 'matchExpressions': affinity.get('term').get('match_expressions'),
- },
- 'topologyKey': affinity.get('topology_key'),
- })
-
- if node_affinity:
- for affinity in node_affinity.get('soft', []):
- if not template_spec['affinity']['nodeAffinity']['preferredDuringSchedulingIgnoredDuringExecution']:
- template_spec['affinity']['nodeAffinity']['preferredDuringSchedulingIgnoredDuringExecution'] = []
- template_spec['affinity']['nodeAffinity']['preferredDuringSchedulingIgnoredDuringExecution'].append({
- 'weight': affinity.get('weight'),
- 'preference': {
- 'matchExpressions': affinity.get('term').get('match_expressions'),
- }
- })
- for affinity in node_affinity.get('hard', []):
- if not template_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms']:
- template_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms'] = []
- template_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms'].append({
- 'matchExpressions': affinity.get('term').get('match_expressions'),
- })
-
- if hostname:
- template_spec['hostname'] = hostname
-
- if subdomain:
- template_spec['subdomain'] = subdomain
-
- # Define disks
- self._define_disks(disks, template_spec, defaults)
-
- # Define cloud init disk if defined:
- # Note, that this must be called after _define_disks, so the cloud_init
- # is not first in order and it's not used as boot disk:
- self._define_cloud_init(cloud_init_nocloud, template_spec)
-
- # Define interfaces:
- self._define_interfaces(interfaces, template_spec, defaults)
-
- # Define datavolumes:
- self._define_datavolumes(datavolumes, definition['spec'])
-
- return self.merge_dicts(definition, self.resource_definitions[0])
-
- def construct_vm_definition(self, kind, definition, template, defaults=None):
- definition = self._construct_vm_definition(kind, definition, template, self.params, defaults)
- resource = self.find_supported_resource(kind)
- definition = self.set_defaults(resource, definition)
- return resource, definition
-
- def construct_vm_template_definition(self, kind, definition, template, params):
- definition = self._construct_vm_definition(kind, definition, template, params)
- resource = self.find_resource(kind, definition['apiVersion'], fail=True)
-
- # Set defaults:
- definition['kind'] = kind
- definition['metadata']['name'] = params.get('name')
- definition['metadata']['namespace'] = params.get('namespace')
-
- return resource, definition
-
- def execute_crud(self, kind, definition):
- """ Module execution """
- resource = self.find_supported_resource(kind)
- definition = self.set_defaults(resource, definition)
- return self.perform_action(resource, definition)
diff --git a/lib/ansible/module_utils/ldap.py b/lib/ansible/module_utils/ldap.py
deleted file mode 100644
index d49d0a97e8..0000000000
--- a/lib/ansible/module_utils/ldap.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
-# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
-# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) <kellerfuchs@hashbang.sh>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import traceback
-from ansible.module_utils._text import to_native
-
-try:
- import ldap
- import ldap.sasl
-
- HAS_LDAP = True
-except ImportError:
- HAS_LDAP = False
-
-
-def gen_specs(**specs):
- specs.update({
- 'bind_dn': dict(),
- 'bind_pw': dict(default='', no_log=True),
- 'dn': dict(required=True),
- 'server_uri': dict(default='ldapi:///'),
- 'start_tls': dict(default=False, type='bool'),
- 'validate_certs': dict(default=True, type='bool'),
- })
-
- return specs
-
-
-class LdapGeneric(object):
- def __init__(self, module):
- # Shortcuts
- self.module = module
- self.bind_dn = self.module.params['bind_dn']
- self.bind_pw = self.module.params['bind_pw']
- self.dn = self.module.params['dn']
- self.server_uri = self.module.params['server_uri']
- self.start_tls = self.module.params['start_tls']
- self.verify_cert = self.module.params['validate_certs']
-
- # Establish connection
- self.connection = self._connect_to_ldap()
-
- def fail(self, msg, exn):
- self.module.fail_json(
- msg=msg,
- details=to_native(exn),
- exception=traceback.format_exc()
- )
-
- def _connect_to_ldap(self):
- if not self.verify_cert:
- ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
-
- connection = ldap.initialize(self.server_uri)
-
- if self.start_tls:
- try:
- connection.start_tls_s()
- except ldap.LDAPError as e:
- self.fail("Cannot start TLS.", e)
-
- try:
- if self.bind_dn is not None:
- connection.simple_bind_s(self.bind_dn, self.bind_pw)
- else:
- connection.sasl_interactive_bind_s('', ldap.sasl.external())
- except ldap.LDAPError as e:
- self.fail("Cannot bind to the server.", e)
-
- return connection
diff --git a/lib/ansible/module_utils/linode.py b/lib/ansible/module_utils/linode.py
deleted file mode 100644
index a631f74b2f..0000000000
--- a/lib/ansible/module_utils/linode.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Luke Murphy @decentral1se
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-
-def get_user_agent(module):
- """Retrieve a user-agent to send with LinodeClient requests."""
- try:
- from ansible.module_utils.ansible_release import __version__ as ansible_version
- except ImportError:
- ansible_version = 'unknown'
- return 'Ansible-%s/%s' % (module, ansible_version)
diff --git a/lib/ansible/module_utils/lxd.py b/lib/ansible/module_utils/lxd.py
deleted file mode 100644
index c53c3a76ab..0000000000
--- a/lib/ansible/module_utils/lxd.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import socket
-import ssl
-
-from ansible.module_utils.urls import generic_urlparse
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-from ansible.module_utils.six.moves import http_client
-from ansible.module_utils._text import to_text
-
-# httplib/http.client connection using unix domain socket
-HTTPConnection = http_client.HTTPConnection
-HTTPSConnection = http_client.HTTPSConnection
-
-import json
-
-
-class UnixHTTPConnection(HTTPConnection):
- def __init__(self, path):
- HTTPConnection.__init__(self, 'localhost')
- self.path = path
-
- def connect(self):
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- sock.connect(self.path)
- self.sock = sock
-
-
-class LXDClientException(Exception):
- def __init__(self, msg, **kwargs):
- self.msg = msg
- self.kwargs = kwargs
-
-
-class LXDClient(object):
- def __init__(self, url, key_file=None, cert_file=None, debug=False):
- """LXD Client.
-
- :param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1)
- :type url: ``str``
- :param key_file: The path of the client certificate key file.
- :type key_file: ``str``
- :param cert_file: The path of the client certificate file.
- :type cert_file: ``str``
- :param debug: The debug flag. The request and response are stored in logs when debug is true.
- :type debug: ``bool``
- """
- self.url = url
- self.debug = debug
- self.logs = []
- if url.startswith('https:'):
- self.cert_file = cert_file
- self.key_file = key_file
- parts = generic_urlparse(urlparse(self.url))
- ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
- ctx.load_cert_chain(cert_file, keyfile=key_file)
- self.connection = HTTPSConnection(parts.get('netloc'), context=ctx)
- elif url.startswith('unix:'):
- unix_socket_path = url[len('unix:'):]
- self.connection = UnixHTTPConnection(unix_socket_path)
- else:
- raise LXDClientException('URL scheme must be unix: or https:')
-
- def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
- resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout)
- if resp_json['type'] == 'async':
- url = '{0}/wait'.format(resp_json['operation'])
- resp_json = self._send_request('GET', url)
- if resp_json['metadata']['status'] != 'Success':
- self._raise_err_from_json(resp_json)
- return resp_json
-
- def authenticate(self, trust_password):
- body_json = {'type': 'client', 'password': trust_password}
- return self._send_request('POST', '/1.0/certificates', body_json=body_json)
-
- def _send_request(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
- try:
- body = json.dumps(body_json)
- self.connection.request(method, url, body=body)
- resp = self.connection.getresponse()
- resp_data = resp.read()
- resp_data = to_text(resp_data, errors='surrogate_or_strict')
- resp_json = json.loads(resp_data)
- self.logs.append({
- 'type': 'sent request',
- 'request': {'method': method, 'url': url, 'json': body_json, 'timeout': timeout},
- 'response': {'json': resp_json}
- })
- resp_type = resp_json.get('type', None)
- if resp_type == 'error':
- if ok_error_codes is not None and resp_json['error_code'] in ok_error_codes:
- return resp_json
- if resp_json['error'] == "Certificate already in trust store":
- return resp_json
- self._raise_err_from_json(resp_json)
- return resp_json
- except socket.error as e:
- raise LXDClientException('cannot connect to the LXD server', err=e)
-
- def _raise_err_from_json(self, resp_json):
- err_params = {}
- if self.debug:
- err_params['logs'] = self.logs
- raise LXDClientException(self._get_err_from_resp_json(resp_json), **err_params)
-
- @staticmethod
- def _get_err_from_resp_json(resp_json):
- err = None
- metadata = resp_json.get('metadata', None)
- if metadata is not None:
- err = metadata.get('err', None)
- if err is None:
- err = resp_json.get('error', None)
- return err
diff --git a/lib/ansible/module_utils/manageiq.py b/lib/ansible/module_utils/manageiq.py
deleted file mode 100644
index 36e130f895..0000000000
--- a/lib/ansible/module_utils/manageiq.py
+++ /dev/null
@@ -1,170 +0,0 @@
-#
-# Copyright (c) 2017, Daniel Korn <korndaniel1@gmail.com>
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import os
-import traceback
-
-from ansible.module_utils.basic import missing_required_lib
-
-CLIENT_IMP_ERR = None
-try:
- from manageiq_client.api import ManageIQClient
- HAS_CLIENT = True
-except ImportError:
- CLIENT_IMP_ERR = traceback.format_exc()
- HAS_CLIENT = False
-
-
-def manageiq_argument_spec():
- options = dict(
- url=dict(default=os.environ.get('MIQ_URL', None)),
- username=dict(default=os.environ.get('MIQ_USERNAME', None)),
- password=dict(default=os.environ.get('MIQ_PASSWORD', None), no_log=True),
- token=dict(default=os.environ.get('MIQ_TOKEN', None), no_log=True),
- validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
- ca_cert=dict(required=False, default=None, aliases=['ca_bundle_path']),
- )
-
- return dict(
- manageiq_connection=dict(type='dict',
- apply_defaults=True,
- options=options),
- )
-
-
-def check_client(module):
- if not HAS_CLIENT:
- module.fail_json(msg=missing_required_lib('manageiq-client'), exception=CLIENT_IMP_ERR)
-
-
-def validate_connection_params(module):
- params = module.params['manageiq_connection']
- error_str = "missing required argument: manageiq_connection[{}]"
- url = params['url']
- token = params['token']
- username = params['username']
- password = params['password']
-
- if (url and username and password) or (url and token):
- return params
- for arg in ['url', 'username', 'password']:
- if params[arg] in (None, ''):
- module.fail_json(msg=error_str.format(arg))
-
-
-def manageiq_entities():
- return {
- 'provider': 'providers', 'host': 'hosts', 'vm': 'vms',
- 'category': 'categories', 'cluster': 'clusters', 'data store': 'data_stores',
- 'group': 'groups', 'resource pool': 'resource_pools', 'service': 'services',
- 'service template': 'service_templates', 'template': 'templates',
- 'tenant': 'tenants', 'user': 'users', 'blueprint': 'blueprints'
- }
-
-
-class ManageIQ(object):
- """
- class encapsulating ManageIQ API client.
- """
-
- def __init__(self, module):
- # handle import errors
- check_client(module)
-
- params = validate_connection_params(module)
-
- url = params['url']
- username = params['username']
- password = params['password']
- token = params['token']
- verify_ssl = params['validate_certs']
- ca_bundle_path = params['ca_cert']
-
- self._module = module
- self._api_url = url + '/api'
- self._auth = dict(user=username, password=password, token=token)
- try:
- self._client = ManageIQClient(self._api_url, self._auth, verify_ssl=verify_ssl, ca_bundle_path=ca_bundle_path)
- except Exception as e:
- self.module.fail_json(msg="failed to open connection (%s): %s" % (url, str(e)))
-
- @property
- def module(self):
- """ Ansible module module
-
- Returns:
- the ansible module
- """
- return self._module
-
- @property
- def api_url(self):
- """ Base ManageIQ API
-
- Returns:
- the base ManageIQ API
- """
- return self._api_url
-
- @property
- def client(self):
- """ ManageIQ client
-
- Returns:
- the ManageIQ client
- """
- return self._client
-
- def find_collection_resource_by(self, collection_name, **params):
- """ Searches the collection resource by the collection name and the param passed.
-
- Returns:
- the resource as an object if it exists in manageiq, None otherwise.
- """
- try:
- entity = self.client.collections.__getattribute__(collection_name).get(**params)
- except ValueError:
- return None
- except Exception as e:
- self.module.fail_json(msg="failed to find resource {error}".format(error=e))
- return vars(entity)
-
- def find_collection_resource_or_fail(self, collection_name, **params):
- """ Searches the collection resource by the collection name and the param passed.
-
- Returns:
- the resource as an object if it exists in manageiq, Fail otherwise.
- """
- resource = self.find_collection_resource_by(collection_name, **params)
- if resource:
- return resource
- else:
- msg = "{collection_name} where {params} does not exist in manageiq".format(
- collection_name=collection_name, params=str(params))
- self.module.fail_json(msg=msg)
diff --git a/lib/ansible/module_utils/memset.py b/lib/ansible/module_utils/memset.py
deleted file mode 100644
index 51dce0c690..0000000000
--- a/lib/ansible/module_utils/memset.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.module_utils.urls import open_url, urllib_error
-from ansible.module_utils.basic import json
-
-
-class Response(object):
- '''
- Create a response object to mimic that of requests.
- '''
-
- def __init__(self):
- self.content = None
- self.status_code = None
-
- def json(self):
- return json.loads(self.content)
-
-
-def memset_api_call(api_key, api_method, payload=None):
- '''
- Generic function which returns results back to calling function.
-
- Requires an API key and an API method to assemble the API URL.
- Returns response text to be analysed.
- '''
- # instantiate a response object
- response = Response()
-
- # if we've already started preloading the payload then copy it
- # and use that, otherwise we need to isntantiate it.
- if payload is None:
- payload = dict()
- else:
- payload = payload.copy()
-
- # set some sane defaults
- has_failed = False
- msg = None
-
- data = urlencode(payload)
- headers = {'Content-Type': 'application/x-www-form-urlencoded'}
- api_uri_base = 'https://api.memset.com/v1/json/'
- api_uri = '{0}{1}/' . format(api_uri_base, api_method)
-
- try:
- resp = open_url(api_uri, data=data, headers=headers, method="POST", force_basic_auth=True, url_username=api_key)
- response.content = resp.read().decode('utf-8')
- response.status_code = resp.getcode()
- except urllib_error.HTTPError as e:
- try:
- errorcode = e.code
- except AttributeError:
- errorcode = None
-
- has_failed = True
- response.content = e.read().decode('utf8')
- response.status_code = errorcode
-
- if response.status_code is not None:
- msg = "Memset API returned a {0} response ({1}, {2})." . format(response.status_code, response.json()['error_type'], response.json()['error'])
- else:
- msg = "Memset API returned an error ({0}, {1})." . format(response.json()['error_type'], response.json()['error'])
-
- if msg is None:
- msg = response.json()
-
- return(has_failed, msg, response)
-
-
-def check_zone_domain(data, domain):
- '''
- Returns true if domain already exists, and false if not.
- '''
- exists = False
-
- if data.status_code in [201, 200]:
- for zone_domain in data.json():
- if zone_domain['domain'] == domain:
- exists = True
-
- return(exists)
-
-
-def check_zone(data, name):
- '''
- Returns true if zone already exists, and false if not.
- '''
- counter = 0
- exists = False
-
- if data.status_code in [201, 200]:
- for zone in data.json():
- if zone['nickname'] == name:
- counter += 1
- if counter == 1:
- exists = True
-
- return(exists, counter)
-
-
-def get_zone_id(zone_name, current_zones):
- '''
- Returns the zone's id if it exists and is unique
- '''
- zone_exists = False
- zone_id, msg = None, None
- zone_list = []
-
- for zone in current_zones:
- if zone['nickname'] == zone_name:
- zone_list.append(zone['id'])
-
- counter = len(zone_list)
-
- if counter == 0:
- msg = 'No matching zone found'
- elif counter == 1:
- zone_id = zone_list[0]
- zone_exists = True
- elif counter > 1:
- zone_id = None
- msg = 'Zone ID could not be returned as duplicate zone names were detected'
-
- return(zone_exists, msg, counter, zone_id)
diff --git a/lib/ansible/module_utils/mysql.py b/lib/ansible/module_utils/mysql.py
deleted file mode 100644
index 46198f367b..0000000000
--- a/lib/ansible/module_utils/mysql.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Jonathan Mainguy <jon@soh.re>, 2015
-# Most of this was originally added by Sven Schliesing @muffl0n in the mysql_user.py module
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-
-try:
- import pymysql as mysql_driver
- _mysql_cursor_param = 'cursor'
-except ImportError:
- try:
- import MySQLdb as mysql_driver
- import MySQLdb.cursors
- _mysql_cursor_param = 'cursorclass'
- except ImportError:
- mysql_driver = None
-
-mysql_driver_fail_msg = 'The PyMySQL (Python 2.7 and Python 3.X) or MySQL-python (Python 2.X) module is required.'
-
-
-def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None,
- connect_timeout=30, autocommit=False):
- config = {}
-
- if ssl_ca is not None or ssl_key is not None or ssl_cert is not None:
- config['ssl'] = {}
-
- if module.params['login_unix_socket']:
- config['unix_socket'] = module.params['login_unix_socket']
- else:
- config['host'] = module.params['login_host']
- config['port'] = module.params['login_port']
-
- if os.path.exists(config_file):
- config['read_default_file'] = config_file
-
- # If login_user or login_password are given, they should override the
- # config file
- if login_user is not None:
- config['user'] = login_user
- if login_password is not None:
- config['passwd'] = login_password
- if ssl_cert is not None:
- config['ssl']['cert'] = ssl_cert
- if ssl_key is not None:
- config['ssl']['key'] = ssl_key
- if ssl_ca is not None:
- config['ssl']['ca'] = ssl_ca
- if db is not None:
- config['db'] = db
- if connect_timeout is not None:
- config['connect_timeout'] = connect_timeout
-
- if _mysql_cursor_param == 'cursor':
- # In case of PyMySQL driver:
- db_connection = mysql_driver.connect(autocommit=autocommit, **config)
- else:
- # In case of MySQLdb driver
- db_connection = mysql_driver.connect(**config)
- if autocommit:
- db_connection.autocommit(True)
-
- if cursor_class == 'DictCursor':
- return db_connection.cursor(**{_mysql_cursor_param: mysql_driver.cursors.DictCursor}), db_connection
- else:
- return db_connection.cursor(), db_connection
-
-
-def mysql_common_argument_spec():
- return dict(
- login_user=dict(type='str', default=None),
- login_password=dict(type='str', no_log=True),
- login_host=dict(type='str', default='localhost'),
- login_port=dict(type='int', default=3306),
- login_unix_socket=dict(type='str'),
- config_file=dict(type='path', default='~/.my.cnf'),
- connect_timeout=dict(type='int', default=30),
- client_cert=dict(type='path', aliases=['ssl_cert']),
- client_key=dict(type='path', aliases=['ssl_key']),
- ca_cert=dict(type='path', aliases=['ssl_ca']),
- )
diff --git a/lib/ansible/module_utils/net_tools/__init__.py b/lib/ansible/module_utils/net_tools/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/net_tools/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/net_tools/netbox/__init__.py b/lib/ansible/module_utils/net_tools/netbox/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/net_tools/netbox/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/net_tools/nios/__init__.py b/lib/ansible/module_utils/net_tools/nios/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/net_tools/nios/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/net_tools/nios/api.py b/lib/ansible/module_utils/net_tools/nios/api.py
deleted file mode 100644
index 2a759033e2..0000000000
--- a/lib/ansible/module_utils/net_tools/nios/api.py
+++ /dev/null
@@ -1,601 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# (c) 2018 Red Hat Inc.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-import os
-from functools import partial
-from ansible.module_utils._text import to_native
-from ansible.module_utils.six import iteritems
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-
-try:
- from infoblox_client.connector import Connector
- from infoblox_client.exceptions import InfobloxException
- HAS_INFOBLOX_CLIENT = True
-except ImportError:
- HAS_INFOBLOX_CLIENT = False
-
-# defining nios constants
-NIOS_DNS_VIEW = 'view'
-NIOS_NETWORK_VIEW = 'networkview'
-NIOS_HOST_RECORD = 'record:host'
-NIOS_IPV4_NETWORK = 'network'
-NIOS_IPV6_NETWORK = 'ipv6network'
-NIOS_ZONE = 'zone_auth'
-NIOS_PTR_RECORD = 'record:ptr'
-NIOS_A_RECORD = 'record:a'
-NIOS_AAAA_RECORD = 'record:aaaa'
-NIOS_CNAME_RECORD = 'record:cname'
-NIOS_MX_RECORD = 'record:mx'
-NIOS_SRV_RECORD = 'record:srv'
-NIOS_NAPTR_RECORD = 'record:naptr'
-NIOS_TXT_RECORD = 'record:txt'
-NIOS_NSGROUP = 'nsgroup'
-NIOS_IPV4_FIXED_ADDRESS = 'fixedaddress'
-NIOS_IPV6_FIXED_ADDRESS = 'ipv6fixedaddress'
-NIOS_NEXT_AVAILABLE_IP = 'func:nextavailableip'
-NIOS_IPV4_NETWORK_CONTAINER = 'networkcontainer'
-NIOS_IPV6_NETWORK_CONTAINER = 'ipv6networkcontainer'
-NIOS_MEMBER = 'member'
-
-NIOS_PROVIDER_SPEC = {
- 'host': dict(fallback=(env_fallback, ['INFOBLOX_HOST'])),
- 'username': dict(fallback=(env_fallback, ['INFOBLOX_USERNAME'])),
- 'password': dict(fallback=(env_fallback, ['INFOBLOX_PASSWORD']), no_log=True),
- 'validate_certs': dict(type='bool', default=False, fallback=(env_fallback, ['INFOBLOX_SSL_VERIFY']), aliases=['ssl_verify']),
- 'silent_ssl_warnings': dict(type='bool', default=True),
- 'http_request_timeout': dict(type='int', default=10, fallback=(env_fallback, ['INFOBLOX_HTTP_REQUEST_TIMEOUT'])),
- 'http_pool_connections': dict(type='int', default=10),
- 'http_pool_maxsize': dict(type='int', default=10),
- 'max_retries': dict(type='int', default=3, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES'])),
- 'wapi_version': dict(default='2.1', fallback=(env_fallback, ['INFOBLOX_WAP_VERSION'])),
- 'max_results': dict(type='int', default=1000, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES']))
-}
-
-
-def get_connector(*args, **kwargs):
- ''' Returns an instance of infoblox_client.connector.Connector
- :params args: positional arguments are silently ignored
- :params kwargs: dict that is passed to Connector init
- :returns: Connector
- '''
- if not HAS_INFOBLOX_CLIENT:
- raise Exception('infoblox-client is required but does not appear '
- 'to be installed. It can be installed using the '
- 'command `pip install infoblox-client`')
-
- if not set(kwargs.keys()).issubset(list(NIOS_PROVIDER_SPEC.keys()) + ['ssl_verify']):
- raise Exception('invalid or unsupported keyword argument for connector')
- for key, value in iteritems(NIOS_PROVIDER_SPEC):
- if key not in kwargs:
- # apply default values from NIOS_PROVIDER_SPEC since we cannot just
- # assume the provider values are coming from AnsibleModule
- if 'default' in value:
- kwargs[key] = value['default']
-
- # override any values with env variables unless they were
- # explicitly set
- env = ('INFOBLOX_%s' % key).upper()
- if env in os.environ:
- kwargs[key] = os.environ.get(env)
-
- if 'validate_certs' in kwargs.keys():
- kwargs['ssl_verify'] = kwargs['validate_certs']
- kwargs.pop('validate_certs', None)
-
- return Connector(kwargs)
-
-
-def normalize_extattrs(value):
- ''' Normalize extattrs field to expected format
- The module accepts extattrs as key/value pairs. This method will
- transform the key/value pairs into a structure suitable for
- sending across WAPI in the format of:
- extattrs: {
- key: {
- value: <value>
- }
- }
- '''
- return dict([(k, {'value': v}) for k, v in iteritems(value)])
-
-
-def flatten_extattrs(value):
- ''' Flatten the key/value struct for extattrs
- WAPI returns extattrs field as a dict in form of:
- extattrs: {
- key: {
- value: <value>
- }
- }
- This method will flatten the structure to:
- extattrs: {
- key: value
- }
- '''
- return dict([(k, v['value']) for k, v in iteritems(value)])
-
-
-def member_normalize(member_spec):
- ''' Transforms the member module arguments into a valid WAPI struct
- This function will transform the arguments into a structure that
- is a valid WAPI structure in the format of:
- {
- key: <value>,
- }
- It will remove any arguments that are set to None since WAPI will error on
- that condition.
- The remainder of the value validation is performed by WAPI
- Some parameters in ib_spec are passed as a list in order to pass the validation for elements.
- In this function, they are converted to dictionary.
- '''
- member_elements = ['vip_setting', 'ipv6_setting', 'lan2_port_setting', 'mgmt_port_setting',
- 'pre_provisioning', 'network_setting', 'v6_network_setting',
- 'ha_port_setting', 'lan_port_setting', 'lan2_physical_setting',
- 'lan_ha_port_setting', 'mgmt_network_setting', 'v6_mgmt_network_setting']
- for key in member_spec.keys():
- if key in member_elements and member_spec[key] is not None:
- member_spec[key] = member_spec[key][0]
- if isinstance(member_spec[key], dict):
- member_spec[key] = member_normalize(member_spec[key])
- elif isinstance(member_spec[key], list):
- for x in member_spec[key]:
- if isinstance(x, dict):
- x = member_normalize(x)
- elif member_spec[key] is None:
- del member_spec[key]
- return member_spec
-
-
-class WapiBase(object):
- ''' Base class for implementing Infoblox WAPI API '''
- provider_spec = {'provider': dict(type='dict', options=NIOS_PROVIDER_SPEC)}
-
- def __init__(self, provider):
- self.connector = get_connector(**provider)
-
- def __getattr__(self, name):
- try:
- return self.__dict__[name]
- except KeyError:
- if name.startswith('_'):
- raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
- return partial(self._invoke_method, name)
-
- def _invoke_method(self, name, *args, **kwargs):
- try:
- method = getattr(self.connector, name)
- return method(*args, **kwargs)
- except InfobloxException as exc:
- if hasattr(self, 'handle_exception'):
- self.handle_exception(name, exc)
- else:
- raise
-
-
-class WapiLookup(WapiBase):
- ''' Implements WapiBase for lookup plugins '''
- def handle_exception(self, method_name, exc):
- if ('text' in exc.response):
- raise Exception(exc.response['text'])
- else:
- raise Exception(exc)
-
-
-class WapiInventory(WapiBase):
- ''' Implements WapiBase for dynamic inventory script '''
- pass
-
-
-class WapiModule(WapiBase):
- ''' Implements WapiBase for executing a NIOS module '''
- def __init__(self, module):
- self.module = module
- provider = module.params['provider']
- try:
- super(WapiModule, self).__init__(provider)
- except Exception as exc:
- self.module.fail_json(msg=to_text(exc))
-
- def handle_exception(self, method_name, exc):
- ''' Handles any exceptions raised
- This method will be called if an InfobloxException is raised for
- any call to the instance of Connector and also, in case of generic
- exception. This method will then gracefully fail the module.
- :args exc: instance of InfobloxException
- '''
- if ('text' in exc.response):
- self.module.fail_json(
- msg=exc.response['text'],
- type=exc.response['Error'].split(':')[0],
- code=exc.response.get('code'),
- operation=method_name
- )
- else:
- self.module.fail_json(msg=to_native(exc))
-
- def run(self, ib_obj_type, ib_spec):
- ''' Runs the module and performans configuration tasks
- :args ib_obj_type: the WAPI object type to operate against
- :args ib_spec: the specification for the WAPI object as a dict
- :returns: a results dict
- '''
-
- update = new_name = None
- state = self.module.params['state']
- if state not in ('present', 'absent'):
- self.module.fail_json(msg='state must be one of `present`, `absent`, got `%s`' % state)
-
- result = {'changed': False}
-
- obj_filter = dict([(k, self.module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
-
- # get object reference
- ib_obj_ref, update, new_name = self.get_object_ref(self.module, ib_obj_type, obj_filter, ib_spec)
- proposed_object = {}
- for key, value in iteritems(ib_spec):
- if self.module.params[key] is not None:
- if 'transform' in value:
- proposed_object[key] = value['transform'](self.module)
- else:
- proposed_object[key] = self.module.params[key]
-
- # If configure_by_dns is set to False, then delete the default dns set in the param else throw exception
- if not proposed_object.get('configure_for_dns') and proposed_object.get('view') == 'default'\
- and ib_obj_type == NIOS_HOST_RECORD:
- del proposed_object['view']
- elif not proposed_object.get('configure_for_dns') and proposed_object.get('view') != 'default'\
- and ib_obj_type == NIOS_HOST_RECORD:
- self.module.fail_json(msg='DNS Bypass is not allowed if DNS view is set other than \'default\'')
-
- if ib_obj_ref:
- if len(ib_obj_ref) > 1:
- for each in ib_obj_ref:
- # To check for existing A_record with same name with input A_record by IP
- if each.get('ipv4addr') and each.get('ipv4addr') == proposed_object.get('ipv4addr'):
- current_object = each
- # To check for existing Host_record with same name with input Host_record by IP
- elif each.get('ipv4addrs')[0].get('ipv4addr') and each.get('ipv4addrs')[0].get('ipv4addr')\
- == proposed_object.get('ipv4addrs')[0].get('ipv4addr'):
- current_object = each
- # Else set the current_object with input value
- else:
- current_object = obj_filter
- ref = None
- else:
- current_object = ib_obj_ref[0]
- if 'extattrs' in current_object:
- current_object['extattrs'] = flatten_extattrs(current_object['extattrs'])
- if current_object.get('_ref'):
- ref = current_object.pop('_ref')
- else:
- current_object = obj_filter
- ref = None
- # checks if the object type is member to normalize the attributes being passed
- if (ib_obj_type == NIOS_MEMBER):
- proposed_object = member_normalize(proposed_object)
-
- # checks if the name's field has been updated
- if update and new_name:
- proposed_object['name'] = new_name
-
- check_remove = []
- if (ib_obj_type == NIOS_HOST_RECORD):
- # this check is for idempotency, as if the same ip address shall be passed
- # add param will be removed, and same exists true for remove case as well.
- if 'ipv4addrs' in [current_object and proposed_object]:
- for each in current_object['ipv4addrs']:
- if each['ipv4addr'] == proposed_object['ipv4addrs'][0]['ipv4addr']:
- if 'add' in proposed_object['ipv4addrs'][0]:
- del proposed_object['ipv4addrs'][0]['add']
- break
- check_remove += each.values()
- if proposed_object['ipv4addrs'][0]['ipv4addr'] not in check_remove:
- if 'remove' in proposed_object['ipv4addrs'][0]:
- del proposed_object['ipv4addrs'][0]['remove']
-
- res = None
- modified = not self.compare_objects(current_object, proposed_object)
- if 'extattrs' in proposed_object:
- proposed_object['extattrs'] = normalize_extattrs(proposed_object['extattrs'])
-
- # Checks if nios_next_ip param is passed in ipv4addrs/ipv4addr args
- proposed_object = self.check_if_nios_next_ip_exists(proposed_object)
-
- if state == 'present':
- if ref is None:
- if not self.module.check_mode:
- self.create_object(ib_obj_type, proposed_object)
- result['changed'] = True
- # Check if NIOS_MEMBER and the flag to call function create_token is set
- elif (ib_obj_type == NIOS_MEMBER) and (proposed_object['create_token']):
- proposed_object = None
- # the function creates a token that can be used by a pre-provisioned member to join the grid
- result['api_results'] = self.call_func('create_token', ref, proposed_object)
- result['changed'] = True
- elif modified:
- if 'ipv4addrs' in proposed_object:
- if ('add' not in proposed_object['ipv4addrs'][0]) and ('remove' not in proposed_object['ipv4addrs'][0]):
- self.check_if_recordname_exists(obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object)
-
- if (ib_obj_type in (NIOS_HOST_RECORD, NIOS_NETWORK_VIEW, NIOS_DNS_VIEW)):
- run_update = True
- proposed_object = self.on_update(proposed_object, ib_spec)
- if 'ipv4addrs' in proposed_object:
- if ('add' or 'remove') in proposed_object['ipv4addrs'][0]:
- run_update, proposed_object = self.check_if_add_remove_ip_arg_exists(proposed_object)
- if run_update:
- res = self.update_object(ref, proposed_object)
- result['changed'] = True
- else:
- res = ref
- if (ib_obj_type in (NIOS_A_RECORD, NIOS_AAAA_RECORD, NIOS_PTR_RECORD, NIOS_SRV_RECORD)):
- # popping 'view' key as update of 'view' is not supported with respect to a:record/aaaa:record/srv:record/ptr:record
- proposed_object = self.on_update(proposed_object, ib_spec)
- del proposed_object['view']
- if not self.module.check_mode:
- res = self.update_object(ref, proposed_object)
- result['changed'] = True
- elif 'network_view' in proposed_object:
- proposed_object.pop('network_view')
- result['changed'] = True
- if not self.module.check_mode and res is None:
- proposed_object = self.on_update(proposed_object, ib_spec)
- self.update_object(ref, proposed_object)
- result['changed'] = True
-
- elif state == 'absent':
- if ref is not None:
- if 'ipv4addrs' in proposed_object:
- if 'remove' in proposed_object['ipv4addrs'][0]:
- self.check_if_add_remove_ip_arg_exists(proposed_object)
- self.update_object(ref, proposed_object)
- result['changed'] = True
- elif not self.module.check_mode:
- self.delete_object(ref)
- result['changed'] = True
-
- return result
-
- def check_if_recordname_exists(self, obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object):
- ''' Send POST request if host record input name and retrieved ref name is same,
- but input IP and retrieved IP is different'''
-
- if 'name' in (obj_filter and ib_obj_ref[0]) and ib_obj_type == NIOS_HOST_RECORD:
- obj_host_name = obj_filter['name']
- ref_host_name = ib_obj_ref[0]['name']
- if 'ipv4addrs' in (current_object and proposed_object):
- current_ip_addr = current_object['ipv4addrs'][0]['ipv4addr']
- proposed_ip_addr = proposed_object['ipv4addrs'][0]['ipv4addr']
- elif 'ipv6addrs' in (current_object and proposed_object):
- current_ip_addr = current_object['ipv6addrs'][0]['ipv6addr']
- proposed_ip_addr = proposed_object['ipv6addrs'][0]['ipv6addr']
-
- if obj_host_name == ref_host_name and current_ip_addr != proposed_ip_addr:
- self.create_object(ib_obj_type, proposed_object)
-
- def check_if_nios_next_ip_exists(self, proposed_object):
- ''' Check if nios_next_ip argument is passed in ipaddr while creating
- host record, if yes then format proposed object ipv4addrs and pass
- func:nextavailableip and ipaddr range to create hostrecord with next
- available ip in one call to avoid any race condition '''
-
- if 'ipv4addrs' in proposed_object:
- if 'nios_next_ip' in proposed_object['ipv4addrs'][0]['ipv4addr']:
- ip_range = self.module._check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip']
- proposed_object['ipv4addrs'][0]['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
- elif 'ipv4addr' in proposed_object:
- if 'nios_next_ip' in proposed_object['ipv4addr']:
- ip_range = self.module._check_type_dict(proposed_object['ipv4addr'])['nios_next_ip']
- proposed_object['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
-
- return proposed_object
-
- def check_if_add_remove_ip_arg_exists(self, proposed_object):
- '''
- This function shall check if add/remove param is set to true and
- is passed in the args, then we will update the proposed dictionary
- to add/remove IP to existing host_record, if the user passes false
- param with the argument nothing shall be done.
- :returns: True if param is changed based on add/remove, and also the
- changed proposed_object.
- '''
- update = False
- if 'add' in proposed_object['ipv4addrs'][0]:
- if proposed_object['ipv4addrs'][0]['add']:
- proposed_object['ipv4addrs+'] = proposed_object['ipv4addrs']
- del proposed_object['ipv4addrs']
- del proposed_object['ipv4addrs+'][0]['add']
- update = True
- else:
- del proposed_object['ipv4addrs'][0]['add']
- elif 'remove' in proposed_object['ipv4addrs'][0]:
- if proposed_object['ipv4addrs'][0]['remove']:
- proposed_object['ipv4addrs-'] = proposed_object['ipv4addrs']
- del proposed_object['ipv4addrs']
- del proposed_object['ipv4addrs-'][0]['remove']
- update = True
- else:
- del proposed_object['ipv4addrs'][0]['remove']
- return update, proposed_object
-
- def issubset(self, item, objects):
- ''' Checks if item is a subset of objects
- :args item: the subset item to validate
- :args objects: superset list of objects to validate against
- :returns: True if item is a subset of one entry in objects otherwise
- this method will return None
- '''
- for obj in objects:
- if isinstance(item, dict):
- if all(entry in obj.items() for entry in item.items()):
- return True
- else:
- if item in obj:
- return True
-
- def compare_objects(self, current_object, proposed_object):
- for key, proposed_item in iteritems(proposed_object):
- current_item = current_object.get(key)
-
- # if proposed has a key that current doesn't then the objects are
- # not equal and False will be immediately returned
- if current_item is None:
- return False
-
- elif isinstance(proposed_item, list):
- for subitem in proposed_item:
- if not self.issubset(subitem, current_item):
- return False
-
- elif isinstance(proposed_item, dict):
- return self.compare_objects(current_item, proposed_item)
-
- else:
- if current_item != proposed_item:
- return False
-
- return True
-
- def get_object_ref(self, module, ib_obj_type, obj_filter, ib_spec):
- ''' this function gets the reference object of pre-existing nios objects '''
-
- update = False
- old_name = new_name = None
- if ('name' in obj_filter):
- # gets and returns the current object based on name/old_name passed
- try:
- name_obj = self.module._check_type_dict(obj_filter['name'])
- old_name = name_obj['old_name']
- new_name = name_obj['new_name']
- except TypeError:
- name = obj_filter['name']
-
- if old_name and new_name:
- if (ib_obj_type == NIOS_HOST_RECORD):
- test_obj_filter = dict([('name', old_name), ('view', obj_filter['view'])])
- elif (ib_obj_type in (NIOS_AAAA_RECORD, NIOS_A_RECORD)):
- test_obj_filter = obj_filter
- else:
- test_obj_filter = dict([('name', old_name)])
- # get the object reference
- ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
- if ib_obj:
- obj_filter['name'] = new_name
- else:
- test_obj_filter['name'] = new_name
- ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
- update = True
- return ib_obj, update, new_name
- if (ib_obj_type == NIOS_HOST_RECORD):
- # to check only by name if dns bypassing is set
- if not obj_filter['configure_for_dns']:
- test_obj_filter = dict([('name', name)])
- else:
- test_obj_filter = dict([('name', name), ('view', obj_filter['view'])])
- elif (ib_obj_type == NIOS_IPV4_FIXED_ADDRESS or ib_obj_type == NIOS_IPV6_FIXED_ADDRESS and 'mac' in obj_filter):
- test_obj_filter = dict([['mac', obj_filter['mac']]])
- elif (ib_obj_type == NIOS_A_RECORD):
- # resolves issue where a_record with uppercase name was returning null and was failing
- test_obj_filter = obj_filter
- test_obj_filter['name'] = test_obj_filter['name'].lower()
- # resolves issue where multiple a_records with same name and different IP address
- try:
- ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
- ipaddr = ipaddr_obj['old_ipv4addr']
- except TypeError:
- ipaddr = obj_filter['ipv4addr']
- test_obj_filter['ipv4addr'] = ipaddr
- elif (ib_obj_type == NIOS_TXT_RECORD):
- # resolves issue where multiple txt_records with same name and different text
- test_obj_filter = obj_filter
- try:
- text_obj = self.module._check_type_dict(obj_filter['text'])
- txt = text_obj['old_text']
- except TypeError:
- txt = obj_filter['text']
- test_obj_filter['text'] = txt
- # check if test_obj_filter is empty copy passed obj_filter
- else:
- test_obj_filter = obj_filter
- ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
- elif (ib_obj_type == NIOS_A_RECORD):
- # resolves issue where multiple a_records with same name and different IP address
- test_obj_filter = obj_filter
- try:
- ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
- ipaddr = ipaddr_obj['old_ipv4addr']
- except TypeError:
- ipaddr = obj_filter['ipv4addr']
- test_obj_filter['ipv4addr'] = ipaddr
- ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
- elif (ib_obj_type == NIOS_TXT_RECORD):
- # resolves issue where multiple txt_records with same name and different text
- test_obj_filter = obj_filter
- try:
- text_obj = self.module._check_type_dict(obj_filter['text'])
- txt = text_obj['old_text']
- except TypeError:
- txt = obj_filter['text']
- test_obj_filter['text'] = txt
- ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
- elif (ib_obj_type == NIOS_ZONE):
- # del key 'restart_if_needed' as nios_zone get_object fails with the key present
- temp = ib_spec['restart_if_needed']
- del ib_spec['restart_if_needed']
- ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
- # reinstate restart_if_needed if ib_obj is none, meaning there's no existing nios_zone ref
- if not ib_obj:
- ib_spec['restart_if_needed'] = temp
- elif (ib_obj_type == NIOS_MEMBER):
- # del key 'create_token' as nios_member get_object fails with the key present
- temp = ib_spec['create_token']
- del ib_spec['create_token']
- ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
- if temp:
- # reinstate 'create_token' key
- ib_spec['create_token'] = temp
- else:
- ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
- return ib_obj, update, new_name
-
- def on_update(self, proposed_object, ib_spec):
- ''' Event called before the update is sent to the API endpoing
- This method will allow the final proposed object to be changed
- and/or keys filtered before it is sent to the API endpoint to
- be processed.
- :args proposed_object: A dict item that will be encoded and sent
- the API endpoint with the updated data structure
- :returns: updated object to be sent to API endpoint
- '''
- keys = set()
- for key, value in iteritems(proposed_object):
- update = ib_spec[key].get('update', True)
- if not update:
- keys.add(key)
- return dict([(k, v) for k, v in iteritems(proposed_object) if k not in keys])
diff --git a/lib/ansible/module_utils/network/__init__.py b/lib/ansible/module_utils/network/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/a10/__init__.py b/lib/ansible/module_utils/network/a10/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/a10/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/a10/a10.py b/lib/ansible/module_utils/network/a10/a10.py
deleted file mode 100644
index bf713702e4..0000000000
--- a/lib/ansible/module_utils/network/a10/a10.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import json
-
-from ansible.module_utils.urls import fetch_url
-
-
-AXAPI_PORT_PROTOCOLS = {
- 'tcp': 2,
- 'udp': 3,
-}
-
-AXAPI_VPORT_PROTOCOLS = {
- 'tcp': 2,
- 'udp': 3,
- 'fast-http': 9,
- 'http': 11,
- 'https': 12,
-}
-
-
-def a10_argument_spec():
- return dict(
- host=dict(type='str', required=True),
- username=dict(type='str', aliases=['user', 'admin'], required=True),
- password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True),
- write_config=dict(type='bool', default=False)
- )
-
-
-def axapi_failure(result):
- if 'response' in result and result['response'].get('status') == 'fail':
- return True
- return False
-
-
-def axapi_call(module, url, post=None):
- '''
- Returns a datastructure based on the result of the API call
- '''
- rsp, info = fetch_url(module, url, data=post)
- if not rsp or info['status'] >= 400:
- module.fail_json(msg="failed to connect (status code %s), error was %s" % (info['status'], info.get('msg', 'no error given')))
- try:
- raw_data = rsp.read()
- data = json.loads(raw_data)
- except ValueError:
- # at least one API call (system.action.write_config) returns
- # XML even when JSON is requested, so do some minimal handling
- # here to prevent failing even when the call succeeded
- if 'status="ok"' in raw_data.lower():
- data = {"response": {"status": "OK"}}
- else:
- data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
- except Exception:
- module.fail_json(msg="could not read the result from the host")
- finally:
- rsp.close()
- return data
-
-
-def axapi_authenticate(module, base_url, username, password):
- url = '%s&method=authenticate&username=%s&password=%s' % (base_url, username, password)
- result = axapi_call(module, url)
- if axapi_failure(result):
- return module.fail_json(msg=result['response']['err']['msg'])
- sessid = result['session_id']
- return base_url + '&session_id=' + sessid
-
-
-def axapi_authenticate_v3(module, base_url, username, password):
- url = base_url
- auth_payload = {"credentials": {"username": username, "password": password}}
- result = axapi_call_v3(module, url, method='POST', body=json.dumps(auth_payload))
- if axapi_failure(result):
- return module.fail_json(msg=result['response']['err']['msg'])
- signature = result['authresponse']['signature']
- return signature
-
-
-def axapi_call_v3(module, url, method=None, body=None, signature=None):
- '''
- Returns a datastructure based on the result of the API call
- '''
- if signature:
- headers = {'content-type': 'application/json', 'Authorization': 'A10 %s' % signature}
- else:
- headers = {'content-type': 'application/json'}
- rsp, info = fetch_url(module, url, method=method, data=body, headers=headers)
- if not rsp or info['status'] >= 400:
- module.fail_json(msg="failed to connect (status code %s), error was %s" % (info['status'], info.get('msg', 'no error given')))
- try:
- raw_data = rsp.read()
- data = json.loads(raw_data)
- except ValueError:
- # at least one API call (system.action.write_config) returns
- # XML even when JSON is requested, so do some minimal handling
- # here to prevent failing even when the call succeeded
- if 'status="ok"' in raw_data.lower():
- data = {"response": {"status": "OK"}}
- else:
- data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
- except Exception:
- module.fail_json(msg="could not read the result from the host")
- finally:
- rsp.close()
- return data
-
-
-def axapi_enabled_disabled(flag):
- '''
- The axapi uses 0/1 integer values for flags, rather than strings
- or booleans, so convert the given flag to a 0 or 1. For now, params
- are specified as strings only so thats what we check.
- '''
- if flag == 'enabled':
- return 1
- else:
- return 0
-
-
-def axapi_get_port_protocol(protocol):
- return AXAPI_PORT_PROTOCOLS.get(protocol.lower(), None)
-
-
-def axapi_get_vport_protocol(protocol):
- return AXAPI_VPORT_PROTOCOLS.get(protocol.lower(), None)
diff --git a/lib/ansible/module_utils/network/aci/__init__.py b/lib/ansible/module_utils/network/aci/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/aci/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/aireos/__init__.py b/lib/ansible/module_utils/network/aireos/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/aireos/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/aireos/aireos.py b/lib/ansible/module_utils/network/aireos/aireos.py
deleted file mode 100644
index f5402cd969..0000000000
--- a/lib/ansible/module_utils/network/aireos/aireos.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# (c) 2016 Red Hat Inc.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.network.common.utils import to_list, ComplexList
-from ansible.module_utils.connection import exec_command
-
-_DEVICE_CONFIGS = {}
-
-aireos_provider_spec = {
- 'host': dict(),
- 'port': dict(type='int'),
- 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
- 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
- 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
- 'timeout': dict(type='int'),
-}
-aireos_argument_spec = {
- 'provider': dict(type='dict', options=aireos_provider_spec)
-}
-
-aireos_top_spec = {
- 'host': dict(removed_in_version=2.9),
- 'port': dict(removed_in_version=2.9, type='int'),
- 'username': dict(removed_in_version=2.9),
- 'password': dict(removed_in_version=2.9, no_log=True),
- 'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
- 'timeout': dict(removed_in_version=2.9, type='int'),
-}
-aireos_argument_spec.update(aireos_top_spec)
-
-
-def sanitize(resp):
- # Takes response from device and strips whitespace from all lines
- # Aireos adds in extra preceding whitespace which netcfg parses as children/parents, which Aireos does not do
- # Aireos also adds in trailing whitespace that is unused
- cleaned = []
- for line in resp.splitlines():
- cleaned.append(line.strip())
- return '\n'.join(cleaned).strip()
-
-
-def get_provider_argspec():
- return aireos_provider_spec
-
-
-def check_args(module, warnings):
- pass
-
-
-def get_config(module, flags=None):
- flags = [] if flags is None else flags
-
- cmd = 'show run-config commands '
- cmd += ' '.join(flags)
- cmd = cmd.strip()
-
- try:
- return _DEVICE_CONFIGS[cmd]
- except KeyError:
- rc, out, err = exec_command(module, cmd)
- if rc != 0:
- module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_then_replace'))
- cfg = sanitize(to_text(out, errors='surrogate_then_replace').strip())
- _DEVICE_CONFIGS[cmd] = cfg
- return cfg
-
-
-def to_commands(module, commands):
- spec = {
- 'command': dict(key=True),
- 'prompt': dict(),
- 'answer': dict()
- }
- transform = ComplexList(spec, module)
- return transform(commands)
-
-
-def run_commands(module, commands, check_rc=True):
- responses = list()
- commands = to_commands(module, to_list(commands))
- for cmd in commands:
- cmd = module.jsonify(cmd)
- rc, out, err = exec_command(module, cmd)
- if check_rc and rc != 0:
- module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc)
- responses.append(sanitize(to_text(out, errors='surrogate_then_replace')))
- return responses
-
-
-def load_config(module, commands):
-
- rc, out, err = exec_command(module, 'config')
- if rc != 0:
- module.fail_json(msg='unable to enter configuration mode', err=to_text(out, errors='surrogate_then_replace'))
-
- for command in to_list(commands):
- if command == 'end':
- continue
- rc, out, err = exec_command(module, command)
- if rc != 0:
- module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
-
- exec_command(module, 'end')
diff --git a/lib/ansible/module_utils/network/aos/__init__.py b/lib/ansible/module_utils/network/aos/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/aos/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/aos/aos.py b/lib/ansible/module_utils/network/aos/aos.py
deleted file mode 100644
index 092bbf5b4a..0000000000
--- a/lib/ansible/module_utils/network/aos/aos.py
+++ /dev/null
@@ -1,180 +0,0 @@
-#
-# Copyright (c) 2017 Apstra Inc, <community@apstra.com>
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-"""
-This module adds shared support for Apstra AOS modules
-
-In order to use this module, include it as part of your module
-
-from ansible.module_utils.network.aos.aos import (check_aos_version, get_aos_session, find_collection_item,
- content_to_dict, do_load_resource)
-
-"""
-import json
-
-from distutils.version import LooseVersion
-
-try:
- import yaml
- HAS_YAML = True
-except ImportError:
- HAS_YAML = False
-
-try:
- from apstra.aosom.session import Session
-
- HAS_AOS_PYEZ = True
-except ImportError:
- HAS_AOS_PYEZ = False
-
-from ansible.module_utils._text import to_native
-
-
-def check_aos_version(module, min=False):
- """
- Check if the library aos-pyez is present.
- If provided, also check if the minimum version requirement is met
- """
- if not HAS_AOS_PYEZ:
- module.fail_json(msg='aos-pyez is not installed. Please see details '
- 'here: https://github.com/Apstra/aos-pyez')
-
- elif min:
- import apstra.aosom
- AOS_PYEZ_VERSION = apstra.aosom.__version__
-
- if LooseVersion(AOS_PYEZ_VERSION) < LooseVersion(min):
- module.fail_json(msg='aos-pyez >= %s is required for this module' % min)
-
- return True
-
-
-def get_aos_session(module, auth):
- """
- Resume an existing session and return an AOS object.
-
- Args:
- auth (dict): An AOS session as obtained by aos_login module blocks::
-
- dict( token=<token>,
- server=<ip>,
- port=<port>
- )
-
- Return:
- Aos object
- """
-
- check_aos_version(module)
-
- aos = Session()
- aos.session = auth
-
- return aos
-
-
-def find_collection_item(collection, item_name=False, item_id=False):
- """
- Find collection_item based on name or id from a collection object
- Both Collection_item and Collection Objects are provided by aos-pyez library
-
- Return
- collection_item: object corresponding to the collection type
- """
- my_dict = None
-
- if item_name:
- my_dict = collection.find(label=item_name)
- elif item_id:
- my_dict = collection.find(uid=item_id)
-
- if my_dict is None:
- return collection['']
- else:
- return my_dict
-
-
-def content_to_dict(module, content):
- """
- Convert 'content' into a Python Dict based on 'content_format'
- """
-
- # if not HAS_YAML:
- # module.fail_json(msg="Python Library Yaml is not present, mandatory to use 'content'")
-
- content_dict = None
-
- # try:
- # content_dict = json.loads(content.replace("\'", '"'))
- # except:
- # module.fail_json(msg="Unable to convert 'content' from JSON, please check if valid")
- #
- # elif format in ['yaml', 'var']:
-
- try:
- content_dict = yaml.safe_load(content)
-
- if not isinstance(content_dict, dict):
- raise Exception()
-
- # Check if dict is empty and return an error if it's
- if not content_dict:
- raise Exception()
-
- except Exception:
- module.fail_json(msg="Unable to convert 'content' to a dict, please check if valid")
-
- # replace the string with the dict
- module.params['content'] = content_dict
-
- return content_dict
-
-
-def do_load_resource(module, collection, name):
- """
- Create a new object (collection.item) by loading a datastructure directly
- """
-
- try:
- item = find_collection_item(collection, name, '')
- except Exception:
- module.fail_json(msg="An error occurred while running 'find_collection_item'")
-
- if item.exists:
- module.exit_json(changed=False, name=item.name, id=item.id, value=item.value)
-
- # If not in check mode, apply the changes
- if not module.check_mode:
- try:
- item.datum = module.params['content']
- item.write()
- except Exception as e:
- module.fail_json(msg="Unable to write item content : %r" % to_native(e))
-
- module.exit_json(changed=True, name=item.name, id=item.id, value=item.value)
diff --git a/lib/ansible/module_utils/network/apconos/__init__.py b/lib/ansible/module_utils/network/apconos/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/apconos/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/apconos/apconos.py b/lib/ansible/module_utils/network/apconos/apconos.py
deleted file mode 100644
index 4eb4079a6e..0000000000
--- a/lib/ansible/module_utils/network/apconos/apconos.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by
-# Ansible still belong to the author of the module, and may assign their own
-# license to the complete work.
-#
-# Copyright (C) 2019 APCON, Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-# Contains utility methods
-# APCON Networking
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.network.common.utils import EntityCollection
-from ansible.module_utils.connection import Connection, exec_command
-from ansible.module_utils.connection import ConnectionError
-
-_DEVICE_CONFIGS = {}
-_CONNECTION = None
-
-
-command_spec = {
- 'command': dict(key=True),
-}
-
-
-def check_args(module, warnings):
- pass
-
-
-def get_connection(module):
- global _CONNECTION
- if _CONNECTION:
- return _CONNECTION
- _CONNECTION = Connection(module._socket_path)
-
- return _CONNECTION
-
-
-def get_config(module, flags=None):
- flags = [] if flags is None else flags
-
- cmd = ' '.join(flags).strip()
-
- try:
- return _DEVICE_CONFIGS[cmd]
- except KeyError:
- conn = get_connection(module)
- out = conn.get(cmd)
- cfg = to_text(out, errors='surrogate_then_replace').strip()
- _DEVICE_CONFIGS[cmd] = cfg
- return cfg
-
-
-def run_commands(module, commands, check_rc=True):
- connection = get_connection(module)
- transform = EntityCollection(module, command_spec)
- commands = transform(commands)
-
- responses = list()
-
- for cmd in commands:
- out = connection.get(**cmd)
- responses.append(to_text(out, errors='surrogate_then_replace'))
-
- return responses
-
-
-def load_config(module, config):
- try:
- conn = get_connection(module)
- conn.edit_config(config)
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc))
-
-
-def get_defaults_flag(module):
- rc, out, err = exec_command(module, 'display running-config ?')
- out = to_text(out, errors='surrogate_then_replace')
-
- commands = set()
- for line in out.splitlines():
- if line:
- commands.add(line.strip().split()[0])
-
- if 'all' in commands:
- return 'all'
- else:
- return 'full'
diff --git a/lib/ansible/module_utils/network/aruba/__init__.py b/lib/ansible/module_utils/network/aruba/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/aruba/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/aruba/aruba.py b/lib/ansible/module_utils/network/aruba/aruba.py
deleted file mode 100644
index cdb0ad5ff0..0000000000
--- a/lib/ansible/module_utils/network/aruba/aruba.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# (c) 2016 Red Hat Inc.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-import re
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.network.common.utils import to_list, ComplexList
-from ansible.module_utils.connection import exec_command
-
-_DEVICE_CONFIGS = {}
-
-aruba_provider_spec = {
- 'host': dict(),
- 'port': dict(type='int'),
- 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
- 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
- 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
- 'timeout': dict(type='int'),
-}
-aruba_argument_spec = {
- 'provider': dict(type='dict', options=aruba_provider_spec)
-}
-
-aruba_top_spec = {
- 'host': dict(removed_in_version=2.9),
- 'port': dict(removed_in_version=2.9, type='int'),
- 'username': dict(removed_in_version=2.9),
- 'password': dict(removed_in_version=2.9, no_log=True),
- 'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
- 'timeout': dict(removed_in_version=2.9, type='int'),
-}
-
-aruba_argument_spec.update(aruba_top_spec)
-
-
-def get_provider_argspec():
- return aruba_provider_spec
-
-
-def check_args(module, warnings):
- pass
-
-
-def get_config(module, flags=None):
- flags = [] if flags is None else flags
-
- cmd = 'show running-config '
- cmd += ' '.join(flags)
- cmd = cmd.strip()
-
- try:
- return _DEVICE_CONFIGS[cmd]
- except KeyError:
- rc, out, err = exec_command(module, cmd)
- if rc != 0:
- module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_then_replace'))
- cfg = sanitize(to_text(out, errors='surrogate_then_replace').strip())
- _DEVICE_CONFIGS[cmd] = cfg
- return cfg
-
-
-def sanitize(resp):
- # Takes response from device and adjusts leading whitespace to just 1 space
- cleaned = []
- for line in resp.splitlines():
- cleaned.append(re.sub(r"^\s+", " ", line))
- return '\n'.join(cleaned).strip()
-
-
-def to_commands(module, commands):
- spec = {
- 'command': dict(key=True),
- 'prompt': dict(),
- 'answer': dict()
- }
- transform = ComplexList(spec, module)
- return transform(commands)
-
-
-def run_commands(module, commands, check_rc=True):
- responses = list()
- commands = to_commands(module, to_list(commands))
- for cmd in commands:
- cmd = module.jsonify(cmd)
- rc, out, err = exec_command(module, cmd)
- if check_rc and rc != 0:
- module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc)
- responses.append(to_text(out, errors='surrogate_then_replace'))
- return responses
-
-
-def load_config(module, commands):
-
- rc, out, err = exec_command(module, 'configure terminal')
- if rc != 0:
- module.fail_json(msg='unable to enter configuration mode', err=to_text(out, errors='surrogate_then_replace'))
-
- for command in to_list(commands):
- if command == 'end':
- continue
- rc, out, err = exec_command(module, command)
- if rc != 0:
- module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
-
- exec_command(module, 'end')
diff --git a/lib/ansible/module_utils/network/avi/__init__.py b/lib/ansible/module_utils/network/avi/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/avi/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/avi/ansible_utils.py b/lib/ansible/module_utils/network/avi/ansible_utils.py
deleted file mode 100644
index d458c7ada5..0000000000
--- a/lib/ansible/module_utils/network/avi/ansible_utils.py
+++ /dev/null
@@ -1,572 +0,0 @@
-from __future__ import absolute_import
-
-"""
-Created on Aug 16, 2016
-
-@author: Gaurav Rastogi (grastogi@avinetworks.com)
-"""
-import os
-import re
-import logging
-import sys
-from copy import deepcopy
-from ansible.module_utils.basic import env_fallback
-
-try:
- from ansible.module_utils.network.avi.avi_api import (
- ApiSession, ObjectNotFound, avi_sdk_syslog_logger, AviCredentials, HAS_AVI)
-except ImportError:
- HAS_AVI = False
-
-
-if os.environ.get('AVI_LOG_HANDLER', '') != 'syslog':
- log = logging.getLogger(__name__)
-else:
- # Ansible does not allow logging from the modules.
- log = avi_sdk_syslog_logger()
-
-
-def _check_type_string(x):
- """
- :param x:
- :return: True if it is of type string
- """
- if isinstance(x, str):
- return True
- if sys.version_info[0] < 3:
- try:
- return isinstance(x, unicode)
- except NameError:
- return False
-
-
-class AviCheckModeResponse(object):
- """
- Class to support ansible check mode.
- """
-
- def __init__(self, obj, status_code=200):
- self.obj = obj
- self.status_code = status_code
-
- def json(self):
- return self.obj
-
-
-def ansible_return(module, rsp, changed, req=None, existing_obj=None,
- api_context=None):
- """
- :param module: AnsibleModule
- :param rsp: ApiResponse from avi_api
- :param changed: boolean
- :param req: ApiRequest to avi_api
- :param existing_obj: object to be passed debug output
- :param api_context: api login context
-
- helper function to return the right ansible based on the error code and
- changed
- Returns: specific ansible module exit function
- """
-
- if rsp is not None and rsp.status_code > 299:
- return module.fail_json(
- msg='Error %d Msg %s req: %s api_context:%s ' % (
- rsp.status_code, rsp.text, req, api_context))
- api_creds = AviCredentials()
- api_creds.update_from_ansible_module(module)
- key = '%s:%s:%s' % (api_creds.controller, api_creds.username,
- api_creds.port)
- disable_fact = module.params.get('avi_disable_session_cache_as_fact')
-
- fact_context = None
- if not disable_fact:
- fact_context = module.params.get('api_context', {})
- if fact_context:
- fact_context.update({key: api_context})
- else:
- fact_context = {key: api_context}
-
- obj_val = rsp.json() if rsp else existing_obj
-
- if (obj_val and module.params.get("obj_username", None) and
- "username" in obj_val):
- obj_val["obj_username"] = obj_val["username"]
- if (obj_val and module.params.get("obj_password", None) and
- "password" in obj_val):
- obj_val["obj_password"] = obj_val["password"]
- old_obj_val = existing_obj if changed and existing_obj else None
- api_context_val = api_context if disable_fact else None
- ansible_facts_val = dict(
- avi_api_context=fact_context) if not disable_fact else {}
-
- return module.exit_json(
- changed=changed, obj=obj_val, old_obj=old_obj_val,
- ansible_facts=ansible_facts_val, api_context=api_context_val)
-
-
-def purge_optional_fields(obj, module):
- """
- It purges the optional arguments to be sent to the controller.
- :param obj: dictionary of the ansible object passed as argument.
- :param module: AnsibleModule
- return modified obj
- """
- purge_fields = []
- for param, spec in module.argument_spec.items():
- if not spec.get('required', False):
- if param not in obj:
- # these are ansible common items
- continue
- if obj[param] is None:
- purge_fields.append(param)
- log.debug('purging fields %s', purge_fields)
- for param in purge_fields:
- obj.pop(param, None)
- return obj
-
-
-def cleanup_absent_fields(obj):
- """
- cleans up any field that is marked as state: absent. It needs to be removed
- from the object if it is present.
- :param obj:
- :return: Purged object
- """
- if type(obj) != dict:
- return obj
- cleanup_keys = []
- for k, v in obj.items():
- if type(v) == dict:
- if (('state' in v and v['state'] == 'absent') or
- (v == "{'state': 'absent'}")):
- cleanup_keys.append(k)
- else:
- cleanup_absent_fields(v)
- if not v:
- cleanup_keys.append(k)
- elif type(v) == list:
- new_list = []
- for elem in v:
- elem = cleanup_absent_fields(elem)
- if elem:
- # remove the item from list
- new_list.append(elem)
- if new_list:
- obj[k] = new_list
- else:
- cleanup_keys.append(k)
- elif isinstance(v, str) or isinstance(v, str):
- if v == "{'state': 'absent'}":
- cleanup_keys.append(k)
- for k in cleanup_keys:
- del obj[k]
- return obj
-
-
-RE_REF_MATCH = re.compile(r'^/api/[\w/]+\?name\=[\w]+[^#<>]*$')
-# if HTTP ref match then strip out the #name
-HTTP_REF_MATCH = re.compile(r'https://[\w.0-9:-]+/api/.+')
-HTTP_REF_W_NAME_MATCH = re.compile(r'https://[\w.0-9:-]+/api/.*#.+')
-
-
-def ref_n_str_cmp(x, y):
- """
- compares two references
- 1. check for exact reference
- 2. check for obj_type/uuid
- 3. check for name
-
- if x is ref=name then extract uuid and name from y and use it.
- if x is http_ref then
- strip x and y
- compare them.
-
- if x and y are urls then match with split on #
- if x is a RE_REF_MATCH then extract name
- if y is a REF_MATCH then extract name
- :param x: first string
- :param y: second string from controller's object
-
- Returns
- True if they are equivalent else False
- """
- if type(y) in (int, float, bool, int, complex):
- y = str(y)
- x = str(x)
- if not (_check_type_string(x) and _check_type_string(y)):
- return False
- y_uuid = y_name = str(y)
- x = str(x)
- if RE_REF_MATCH.match(x):
- x = x.split('name=')[1]
- elif HTTP_REF_MATCH.match(x):
- x = x.rsplit('#', 1)[0]
- y = y.rsplit('#', 1)[0]
- elif RE_REF_MATCH.match(y):
- y = y.split('name=')[1]
-
- if HTTP_REF_W_NAME_MATCH.match(y):
- path = y.split('api/', 1)[1]
- # Fetching name or uuid from path /xxxx_xx/xx/xx_x/uuid_or_name
- uuid_or_name = path.split('/')[-1]
- parts = uuid_or_name.rsplit('#', 1)
- y_uuid = parts[0]
- y_name = parts[1] if len(parts) > 1 else ''
- # is just string but y is a url so match either uuid or name
- result = (x in (y, y_name, y_uuid))
- if not result:
- log.debug('x: %s y: %s y_name %s y_uuid %s',
- x, y, y_name, y_uuid)
- return result
-
-
-def avi_obj_cmp(x, y, sensitive_fields=None):
- """
- compares whether x is fully contained in y. The comparision is different
- from a simple dictionary compare for following reasons
- 1. Some fields could be references. The object in controller returns the
- full URL for those references. However, the ansible script would have
- it specified as /api/pool?name=blah. So, the reference fields need
- to match uuid, relative reference based on name and actual reference.
-
- 2. Optional fields with defaults: In case there are optional fields with
- defaults then controller automatically fills it up. This would
- cause the comparison with Ansible object specification to always return
- changed.
-
- 3. Optional fields without defaults: This is most tricky. The issue is
- how to specify deletion of such objects from ansible script. If the
- ansible playbook has object specified as Null then Avi controller will
- reject for non Message(dict) type fields. In addition, to deal with the
- defaults=null issue all the fields that are set with None are purged
- out before comparing with Avi controller's version
-
- So, the solution is to pass state: absent if any optional field needs
- to be deleted from the configuration. The script would return changed
- =true if it finds a key in the controller version and it is marked with
- state: absent in ansible playbook. Alternatively, it would return
- false if key is not present in the controller object. Before, doing
- put or post it would purge the fields that are marked state: absent.
-
- :param x: first string
- :param y: second string from controller's object
- :param sensitive_fields: sensitive fields to ignore for diff
-
- Returns:
- True if x is subset of y else False
- """
- if not sensitive_fields:
- sensitive_fields = set()
- if isinstance(x, str) or isinstance(x, str):
- # Special handling for strings as they can be references.
- return ref_n_str_cmp(x, y)
- if type(x) not in [list, dict]:
- # if it is not list or dict or string then simply compare the values
- return x == y
- if type(x) == list:
- # should compare each item in the list and that should match
- if len(x) != len(y):
- log.debug('x has %d items y has %d', len(x), len(y))
- return False
- for i in zip(x, y):
- if not avi_obj_cmp(i[0], i[1], sensitive_fields=sensitive_fields):
- # no need to continue
- return False
-
- if type(x) == dict:
- x.pop('_last_modified', None)
- x.pop('tenant', None)
- y.pop('_last_modified', None)
- x.pop('api_version', None)
- y.pop('api_verison', None)
- d_xks = [k for k in x.keys() if k in sensitive_fields]
-
- if d_xks:
- # if there is sensitive field then always return changed
- return False
- # pop the keys that are marked deleted but not present in y
- # return false if item is marked absent and is present in y
- d_x_absent_ks = []
- for k, v in x.items():
- if v is None:
- d_x_absent_ks.append(k)
- continue
- if isinstance(v, dict):
- if ('state' in v) and (v['state'] == 'absent'):
- if type(y) == dict and k not in y:
- d_x_absent_ks.append(k)
- else:
- return False
- elif not v:
- d_x_absent_ks.append(k)
- elif isinstance(v, list) and not v:
- d_x_absent_ks.append(k)
- # Added condition to check key in dict.
- elif isinstance(v, str) or (k in y and isinstance(y[k], str)):
- # this is the case when ansible converts the dictionary into a
- # string.
- if v == "{'state': 'absent'}" and k not in y:
- d_x_absent_ks.append(k)
- elif not v and k not in y:
- # this is the case when x has set the value that qualifies
- # as not but y does not have that value
- d_x_absent_ks.append(k)
- for k in d_x_absent_ks:
- x.pop(k)
- x_keys = set(x.keys())
- y_keys = set(y.keys())
- if not x_keys.issubset(y_keys):
- # log.debug('x has %s and y has %s keys', len(x_keys), len(y_keys))
- return False
- for k, v in x.items():
- if k not in y:
- # log.debug('k %s is not in y %s', k, y)
- return False
- if not avi_obj_cmp(v, y[k], sensitive_fields=sensitive_fields):
- # log.debug('k %s v %s did not match in y %s', k, v, y[k])
- return False
- return True
-
-
-POP_FIELDS = ['state', 'controller', 'username', 'password', 'api_version',
- 'avi_credentials', 'avi_api_update_method', 'avi_api_patch_op',
- 'api_context', 'tenant', 'tenant_uuid', 'avi_disable_session_cache_as_fact']
-
-
-def get_api_context(module, api_creds):
- api_context = module.params.get('api_context')
- if api_context and module.params.get('avi_disable_session_cache_as_fact'):
- return api_context
- elif api_context and not module.params.get(
- 'avi_disable_session_cache_as_fact'):
- key = '%s:%s:%s' % (api_creds.controller, api_creds.username,
- api_creds.port)
- return api_context.get(key)
- else:
- return None
-
-
-def avi_ansible_api(module, obj_type, sensitive_fields):
- """
- This converts the Ansible module into AVI object and invokes APIs
- :param module: Ansible module
- :param obj_type: string representing Avi object type
- :param sensitive_fields: sensitive fields to be excluded for comparison
- purposes.
- Returns:
- success: module.exit_json with obj=avi object
- faliure: module.fail_json
- """
-
- api_creds = AviCredentials()
- api_creds.update_from_ansible_module(module)
- api_context = get_api_context(module, api_creds)
- if api_context:
- api = ApiSession.get_session(
- api_creds.controller,
- api_creds.username,
- password=api_creds.password,
- timeout=api_creds.timeout,
- tenant=api_creds.tenant,
- tenant_uuid=api_creds.tenant_uuid,
- token=api_context['csrftoken'],
- port=api_creds.port,
- session_id=api_context['session_id'],
- csrftoken=api_context['csrftoken'])
- else:
- api = ApiSession.get_session(
- api_creds.controller,
- api_creds.username,
- password=api_creds.password,
- timeout=api_creds.timeout,
- tenant=api_creds.tenant,
- tenant_uuid=api_creds.tenant_uuid,
- token=api_creds.token,
- port=api_creds.port)
- state = module.params['state']
- # Get the api version.
- avi_update_method = module.params.get('avi_api_update_method', 'put')
- avi_patch_op = module.params.get('avi_api_patch_op', 'add')
-
- api_version = api_creds.api_version
- name = module.params.get('name', None)
- # Added Support to get uuid
- uuid = module.params.get('uuid', None)
- check_mode = module.check_mode
- if uuid and obj_type != 'cluster':
- obj_path = '%s/%s' % (obj_type, uuid)
- else:
- obj_path = '%s/' % obj_type
- obj = deepcopy(module.params)
- tenant = obj.pop('tenant', '')
- tenant_uuid = obj.pop('tenant_uuid', '')
- # obj.pop('cloud_ref', None)
- for k in POP_FIELDS:
- obj.pop(k, None)
- purge_optional_fields(obj, module)
-
- # Special code to handle situation where object has a field
- # named username. This is used in case of api/user
- # The following code copies the username and password
- # from the obj_username and obj_password fields.
- if 'obj_username' in obj:
- obj['username'] = obj['obj_username']
- obj.pop('obj_username')
- if 'obj_password' in obj:
- obj['password'] = obj['obj_password']
- obj.pop('obj_password')
- if 'full_name' not in obj and 'name' in obj and obj_type == "user":
- obj['full_name'] = obj['name']
- # Special case as name represent full_name in user module
- # As per API response, name is always same as username regardless of full_name
- obj['name'] = obj['username']
-
- log.info('passed object %s ', obj)
-
- if uuid:
- # Get the object based on uuid.
- try:
- existing_obj = api.get(
- obj_path, tenant=tenant, tenant_uuid=tenant_uuid,
- params={'include_refs': '', 'include_name': ''},
- api_version=api_version)
- existing_obj = existing_obj.json()
- except ObjectNotFound:
- existing_obj = None
- elif name:
- params = {'include_refs': '', 'include_name': ''}
- if obj.get('cloud_ref', None):
- # this is the case when gets have to be scoped with cloud
- cloud = obj['cloud_ref'].split('name=')[1]
- params['cloud_ref.name'] = cloud
- existing_obj = api.get_object_by_name(
- obj_type, name, tenant=tenant, tenant_uuid=tenant_uuid,
- params=params, api_version=api_version)
-
- # Need to check if tenant_ref was provided and the object returned
- # is actually in admin tenant.
- if existing_obj and 'tenant_ref' in obj and 'tenant_ref' in existing_obj:
- # https://10.10.25.42/api/tenant/admin#admin
- existing_obj_tenant = existing_obj['tenant_ref'].split('#')[1]
- obj_tenant = obj['tenant_ref'].split('name=')[1]
- if obj_tenant != existing_obj_tenant:
- existing_obj = None
- else:
- # added api version to avi api call.
- existing_obj = api.get(obj_path, tenant=tenant, tenant_uuid=tenant_uuid,
- params={'include_refs': '', 'include_name': ''},
- api_version=api_version).json()
-
- if state == 'absent':
- rsp = None
- changed = False
- err = False
- if not check_mode and existing_obj:
- try:
- if name is not None:
- # added api version to avi api call.
- rsp = api.delete_by_name(
- obj_type, name, tenant=tenant, tenant_uuid=tenant_uuid,
- api_version=api_version)
- else:
- # added api version to avi api call.
- rsp = api.delete(
- obj_path, tenant=tenant, tenant_uuid=tenant_uuid,
- api_version=api_version)
- except ObjectNotFound:
- pass
- if check_mode and existing_obj:
- changed = True
-
- if rsp:
- if rsp.status_code == 204:
- changed = True
- else:
- err = True
- if not err:
- return ansible_return(
- module, rsp, changed, existing_obj=existing_obj,
- api_context=api.get_context())
- elif rsp:
- return module.fail_json(msg=rsp.text)
-
- rsp = None
- req = None
- if existing_obj:
- # this is case of modify as object exists. should find out
- # if changed is true or not
- if name is not None and obj_type != 'cluster':
- obj_uuid = existing_obj['uuid']
- obj_path = '%s/%s' % (obj_type, obj_uuid)
- if avi_update_method == 'put':
- changed = not avi_obj_cmp(obj, existing_obj, sensitive_fields)
- obj = cleanup_absent_fields(obj)
- if changed:
- req = obj
- if check_mode:
- # No need to process any further.
- rsp = AviCheckModeResponse(obj=existing_obj)
- else:
- rsp = api.put(
- obj_path, data=req, tenant=tenant,
- tenant_uuid=tenant_uuid, api_version=api_version)
- elif check_mode:
- rsp = AviCheckModeResponse(obj=existing_obj)
- else:
- if check_mode:
- # No need to process any further.
- rsp = AviCheckModeResponse(obj=existing_obj)
- changed = True
- else:
- obj.pop('name', None)
- patch_data = {avi_patch_op: obj}
- rsp = api.patch(
- obj_path, data=patch_data, tenant=tenant,
- tenant_uuid=tenant_uuid, api_version=api_version)
- obj = rsp.json()
- changed = not avi_obj_cmp(obj, existing_obj)
- if changed:
- log.debug('EXISTING OBJ %s', existing_obj)
- log.debug('NEW OBJ %s', obj)
- else:
- changed = True
- req = obj
- if check_mode:
- rsp = AviCheckModeResponse(obj=None)
- else:
- rsp = api.post(obj_type, data=obj, tenant=tenant,
- tenant_uuid=tenant_uuid, api_version=api_version)
- return ansible_return(module, rsp, changed, req, existing_obj=existing_obj,
- api_context=api.get_context())
-
-
-def avi_common_argument_spec():
- """
- Returns common arguments for all Avi modules
- :return: dict
- """
- credentials_spec = dict(
- controller=dict(fallback=(env_fallback, ['AVI_CONTROLLER'])),
- username=dict(fallback=(env_fallback, ['AVI_USERNAME'])),
- password=dict(fallback=(env_fallback, ['AVI_PASSWORD']), no_log=True),
- api_version=dict(default='16.4.4', type='str'),
- tenant=dict(default='admin'),
- tenant_uuid=dict(default='', type='str'),
- port=dict(type='int'),
- timeout=dict(default=300, type='int'),
- token=dict(default='', type='str', no_log=True),
- session_id=dict(default='', type='str', no_log=True),
- csrftoken=dict(default='', type='str', no_log=True)
- )
-
- return dict(
- controller=dict(fallback=(env_fallback, ['AVI_CONTROLLER'])),
- username=dict(fallback=(env_fallback, ['AVI_USERNAME'])),
- password=dict(fallback=(env_fallback, ['AVI_PASSWORD']), no_log=True),
- tenant=dict(default='admin'),
- tenant_uuid=dict(default=''),
- api_version=dict(default='16.4.4', type='str'),
- avi_credentials=dict(default=None, type='dict',
- options=credentials_spec),
- api_context=dict(type='dict'),
- avi_disable_session_cache_as_fact=dict(default=False, type='bool'))
diff --git a/lib/ansible/module_utils/network/avi/avi.py b/lib/ansible/module_utils/network/avi/avi.py
deleted file mode 100644
index 74aaac4673..0000000000
--- a/lib/ansible/module_utils/network/avi/avi.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Gaurav Rastogi <grastogi@avinetworks.com>, 2017
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# This module initially matched the namespace of network module avi. However,
-# that causes namespace import error when other modules from avi namespaces
-# are imported. Added import of absolute_import to avoid import collisions for
-# avi.sdk.
-
-from __future__ import absolute_import
-
-from ansible.module_utils.network.avi.ansible_utils import (
- avi_ansible_api, avi_common_argument_spec, ansible_return,
- avi_obj_cmp, cleanup_absent_fields, AviCheckModeResponse, HAS_AVI)
diff --git a/lib/ansible/module_utils/network/avi/avi_api.py b/lib/ansible/module_utils/network/avi/avi_api.py
deleted file mode 100644
index 817f909dd0..0000000000
--- a/lib/ansible/module_utils/network/avi/avi_api.py
+++ /dev/null
@@ -1,972 +0,0 @@
-from __future__ import absolute_import
-import os
-import sys
-import copy
-import json
-import logging
-import time
-from datetime import datetime, timedelta
-from ssl import SSLError
-
-
-class MockResponse(object):
- def __init__(self, *args, **kwargs):
- raise Exception("Requests library Response object not found. Using fake one.")
-
-
-class MockRequestsConnectionError(Exception):
- pass
-
-
-class MockSession(object):
- def __init__(self, *args, **kwargs):
- raise Exception("Requests library Session object not found. Using fake one.")
-
-
-HAS_AVI = True
-try:
- from requests import ConnectionError as RequestsConnectionError
- from requests import Response
- from requests.sessions import Session
-except ImportError:
- HAS_AVI = False
- Response = MockResponse
- RequestsConnectionError = MockRequestsConnectionError
- Session = MockSession
-
-
-logger = logging.getLogger(__name__)
-
-sessionDict = {}
-
-
-def avi_timedelta(td):
- '''
- This is a wrapper class to workaround python 2.6 builtin datetime.timedelta
- does not have total_seconds method
- :param timedelta object
- '''
- if type(td) != timedelta:
- raise TypeError()
- if sys.version_info >= (2, 7):
- ts = td.total_seconds()
- else:
- ts = td.seconds + (24 * 3600 * td.days)
- return ts
-
-
-def avi_sdk_syslog_logger(logger_name='avi.sdk'):
- # The following sets up syslog module to log underlying avi SDK messages
- # based on the environment variables:
- # AVI_LOG_HANDLER: names the logging handler to use. Only syslog is
- # supported.
- # AVI_LOG_LEVEL: Logging level used for the avi SDK. Default is DEBUG
- # AVI_SYSLOG_ADDRESS: Destination address for the syslog handler.
- # Default is /dev/log
- from logging.handlers import SysLogHandler
- lf = '[%(asctime)s] %(levelname)s [%(module)s.%(funcName)s:%(lineno)d] %(message)s'
- log = logging.getLogger(logger_name)
- log_level = os.environ.get('AVI_LOG_LEVEL', 'DEBUG')
- if log_level:
- log.setLevel(getattr(logging, log_level))
- formatter = logging.Formatter(lf)
- sh = SysLogHandler(address=os.environ.get('AVI_SYSLOG_ADDRESS', '/dev/log'))
- sh.setFormatter(formatter)
- log.addHandler(sh)
- return log
-
-
-class ObjectNotFound(Exception):
- pass
-
-
-class APIError(Exception):
- def __init__(self, arg, rsp=None):
- self.args = [arg, rsp]
- self.rsp = rsp
-
-
-class AviServerError(APIError):
- def __init__(self, arg, rsp=None):
- super(AviServerError, self).__init__(arg, rsp)
-
-
-class APINotImplemented(Exception):
- pass
-
-
-class ApiResponse(Response):
- """
- Returns copy of the requests.Response object provides additional helper
- routines
- 1. obj: returns dictionary of Avi Object
- """
- def __init__(self, rsp):
- super(ApiResponse, self).__init__()
- for k, v in list(rsp.__dict__.items()):
- setattr(self, k, v)
-
- def json(self):
- """
- Extends the session default json interface to handle special errors
- and raise Exceptions
- returns the Avi object as a dictionary from rsp.text
- """
- if self.status_code in (200, 201):
- if not self.text:
- # In cases like status_code == 201 the response text could be
- # empty string.
- return None
- return super(ApiResponse, self).json()
- elif self.status_code == 204:
- # No response needed; e.g., delete operation
- return None
- elif self.status_code == 404:
- raise ObjectNotFound('HTTP Error: %s Error Msg %s' % (
- self.status_code, self.text), self)
- elif self.status_code >= 500:
- raise AviServerError('HTTP Error: %s Error Msg %s' % (
- self.status_code, self.text), self)
- else:
- raise APIError('HTTP Error: %s Error Msg %s' % (
- self.status_code, self.text), self)
-
- def count(self):
- """
- return the number of objects in the collection response. If it is not
- a collection response then it would simply return 1.
- """
- obj = self.json()
- if 'count' in obj:
- # this was a resposne to collection
- return obj['count']
- return 1
-
- @staticmethod
- def to_avi_response(resp):
- if type(resp) == Response:
- return ApiResponse(resp)
- return resp
-
-
-class AviCredentials(object):
- controller = ''
- username = ''
- password = ''
- api_version = '16.4.4'
- tenant = None
- tenant_uuid = None
- token = None
- port = None
- timeout = 300
- session_id = None
- csrftoken = None
-
- def __init__(self, **kwargs):
- for k, v in kwargs.items():
- setattr(self, k, v)
-
- def update_from_ansible_module(self, m):
- """
- :param m: ansible module
- :return:
- """
- if m.params.get('avi_credentials'):
- for k, v in m.params['avi_credentials'].items():
- if hasattr(self, k):
- setattr(self, k, v)
- if m.params['controller']:
- self.controller = m.params['controller']
- if m.params['username']:
- self.username = m.params['username']
- if m.params['password']:
- self.password = m.params['password']
- if (m.params['api_version'] and
- (m.params['api_version'] != '16.4.4')):
- self.api_version = m.params['api_version']
- if m.params['tenant']:
- self.tenant = m.params['tenant']
- if m.params['tenant_uuid']:
- self.tenant_uuid = m.params['tenant_uuid']
- if m.params.get('session_id'):
- self.session_id = m.params['session_id']
- if m.params.get('csrftoken'):
- self.csrftoken = m.params['csrftoken']
-
- def __str__(self):
- return 'controller %s user %s api %s tenant %s' % (
- self.controller, self.username, self.api_version, self.tenant)
-
-
-class ApiSession(Session):
- """
- Extends the Request library's session object to provide helper
- utilities to work with Avi Controller like authentication, api massaging
- etc.
- """
-
- # This keeps track of the process which created the cache.
- # At anytime the pid of the process changes then it would create
- # a new cache for that process.
- AVI_SLUG = 'Slug'
- SESSION_CACHE_EXPIRY = 20 * 60
- SHARED_USER_HDRS = ['X-CSRFToken', 'Session-Id', 'Referer', 'Content-Type']
- MAX_API_RETRIES = 3
-
- def __init__(self, controller_ip=None, username=None, password=None,
- token=None, tenant=None, tenant_uuid=None, verify=False,
- port=None, timeout=60, api_version=None,
- retry_conxn_errors=True, data_log=False,
- avi_credentials=None, session_id=None, csrftoken=None,
- lazy_authentication=False, max_api_retries=None):
- """
- ApiSession takes ownership of avi_credentials and may update the
- information inside it.
-
- Initialize new session object with authenticated token from login api.
- It also keeps a cache of user sessions that are cleaned up if inactive
- for more than 20 mins.
-
- Notes:
- 01. If mode is https and port is none or 443, we don't embed the
- port in the prefix. The prefix would be 'https://ip'. If port
- is a non-default value then we concatenate https://ip:port
- in the prefix.
- 02. If mode is http and the port is none or 80, we don't embed the
- port in the prefix. The prefix would be 'http://ip'. If port is
- a non-default value, then we concatenate http://ip:port in
- the prefix.
- """
- super(ApiSession, self).__init__()
- if not avi_credentials:
- tenant = tenant if tenant else "admin"
- self.avi_credentials = AviCredentials(
- controller=controller_ip, username=username, password=password,
- api_version=api_version, tenant=tenant, tenant_uuid=tenant_uuid,
- token=token, port=port, timeout=timeout,
- session_id=session_id, csrftoken=csrftoken)
- else:
- self.avi_credentials = avi_credentials
- self.headers = {}
- self.verify = verify
- self.retry_conxn_errors = retry_conxn_errors
- self.remote_api_version = {}
- self.session_cookie_name = ''
- self.user_hdrs = {}
- self.data_log = data_log
- self.num_session_retries = 0
- self.retry_wait_time = 0
- self.max_session_retries = (
- self.MAX_API_RETRIES if max_api_retries is None
- else int(max_api_retries))
- # Refer Notes 01 and 02
- k_port = port if port else 443
- if self.avi_credentials.controller.startswith('http'):
- k_port = 80 if not self.avi_credentials.port else k_port
- if self.avi_credentials.port is None or self.avi_credentials.port\
- == 80:
- self.prefix = self.avi_credentials.controller
- else:
- self.prefix = '{x}:{y}'.format(
- x=self.avi_credentials.controller,
- y=self.avi_credentials.port)
- else:
- if port is None or port == 443:
- self.prefix = 'https://{x}'.format(
- x=self.avi_credentials.controller)
- else:
- self.prefix = 'https://{x}:{y}'.format(
- x=self.avi_credentials.controller,
- y=self.avi_credentials.port)
- self.timeout = timeout
- self.key = '%s:%s:%s' % (self.avi_credentials.controller,
- self.avi_credentials.username, k_port)
- # Added api token and session id to sessionDict for handle single
- # session
- if self.avi_credentials.csrftoken:
- sessionDict[self.key] = {
- 'api': self,
- "csrftoken": self.avi_credentials.csrftoken,
- "session_id": self.avi_credentials.session_id,
- "last_used": datetime.utcnow()
- }
- elif lazy_authentication:
- sessionDict.get(self.key, {}).update(
- {'api': self, "last_used": datetime.utcnow()})
- else:
- self.authenticate_session()
-
- self.num_session_retries = 0
- self.pid = os.getpid()
- ApiSession._clean_inactive_sessions()
- return
-
- @property
- def controller_ip(self):
- return self.avi_credentials.controller
-
- @controller_ip.setter
- def controller_ip(self, controller_ip):
- self.avi_credentials.controller = controller_ip
-
- @property
- def username(self):
- return self.avi_credentials.username
-
- @property
- def connected(self):
- return sessionDict.get(self.key, {}).get('connected', False)
-
- @username.setter
- def username(self, username):
- self.avi_credentials.username = username
-
- @property
- def password(self):
- return self.avi_credentials.password
-
- @password.setter
- def password(self, password):
- self.avi_credentials.password = password
-
- @property
- def keystone_token(self):
- return sessionDict.get(self.key, {}).get('csrftoken', None)
-
- @keystone_token.setter
- def keystone_token(self, token):
- sessionDict[self.key]['csrftoken'] = token
-
- @property
- def tenant_uuid(self):
- self.avi_credentials.tenant_uuid
-
- @tenant_uuid.setter
- def tenant_uuid(self, tenant_uuid):
- self.avi_credentials.tenant_uuid = tenant_uuid
-
- @property
- def tenant(self):
- return self.avi_credentials.tenant
-
- @tenant.setter
- def tenant(self, tenant):
- if tenant:
- self.avi_credentials.tenant = tenant
- else:
- self.avi_credentials.tenant = 'admin'
-
- @property
- def port(self):
- self.avi_credentials.port
-
- @port.setter
- def port(self, port):
- self.avi_credentials.port = port
-
- @property
- def api_version(self):
- return self.avi_credentials.api_version
-
- @api_version.setter
- def api_version(self, api_version):
- self.avi_credentials.api_version = api_version
-
- @property
- def session_id(self):
- return sessionDict[self.key]['session_id']
-
- def get_context(self):
- return {
- 'session_id': sessionDict[self.key]['session_id'],
- 'csrftoken': sessionDict[self.key]['csrftoken']
- }
-
- @staticmethod
- def clear_cached_sessions():
- global sessionDict
- sessionDict = {}
-
- @staticmethod
- def get_session(
- controller_ip=None, username=None, password=None, token=None, tenant=None,
- tenant_uuid=None, verify=False, port=None, timeout=60,
- retry_conxn_errors=True, api_version=None, data_log=False,
- avi_credentials=None, session_id=None, csrftoken=None,
- lazy_authentication=False, max_api_retries=None):
- """
- returns the session object for same user and tenant
- calls init if session dose not exist and adds it to session cache
- :param controller_ip: controller IP address
- :param username:
- :param password:
- :param token: Token to use; example, a valid keystone token
- :param tenant: Name of the tenant on Avi Controller
- :param tenant_uuid: Don't specify tenant when using tenant_id
- :param port: Rest-API may use a different port other than 443
- :param timeout: timeout for API calls; Default value is 60 seconds
- :param retry_conxn_errors: retry on connection errors
- :param api_version: Controller API version
- """
- if not avi_credentials:
- tenant = tenant if tenant else "admin"
- avi_credentials = AviCredentials(
- controller=controller_ip, username=username, password=password,
- api_version=api_version, tenant=tenant, tenant_uuid=tenant_uuid,
- token=token, port=port, timeout=timeout,
- session_id=session_id, csrftoken=csrftoken)
-
- k_port = avi_credentials.port if avi_credentials.port else 443
- if avi_credentials.controller.startswith('http'):
- k_port = 80 if not avi_credentials.port else k_port
- key = '%s:%s:%s' % (avi_credentials.controller,
- avi_credentials.username, k_port)
- cached_session = sessionDict.get(key)
- if cached_session:
- user_session = cached_session['api']
- if not (user_session.avi_credentials.csrftoken or
- lazy_authentication):
- user_session.authenticate_session()
- else:
- user_session = ApiSession(
- controller_ip, username, password, token=token, tenant=tenant,
- tenant_uuid=tenant_uuid, verify=verify, port=port,
- timeout=timeout, retry_conxn_errors=retry_conxn_errors,
- api_version=api_version, data_log=data_log,
- avi_credentials=avi_credentials,
- lazy_authentication=lazy_authentication,
- max_api_retries=max_api_retries)
- ApiSession._clean_inactive_sessions()
- return user_session
-
- def reset_session(self):
- """
- resets and re-authenticates the current session.
- """
- sessionDict[self.key]['connected'] = False
- logger.info('resetting session for %s', self.key)
- self.user_hdrs = {}
- for k, v in self.headers.items():
- if k not in self.SHARED_USER_HDRS:
- self.user_hdrs[k] = v
- self.headers = {}
- self.authenticate_session()
-
- def authenticate_session(self):
- """
- Performs session authentication with Avi controller and stores
- session cookies and sets header options like tenant.
- """
- body = {"username": self.avi_credentials.username}
- if self.avi_credentials.password:
- body["password"] = self.avi_credentials.password
- elif self.avi_credentials.token:
- body["token"] = self.avi_credentials.token
- else:
- raise APIError("Neither user password or token provided")
- logger.debug('authenticating user %s prefix %s',
- self.avi_credentials.username, self.prefix)
- self.cookies.clear()
- err = None
- try:
- rsp = super(ApiSession, self).post(
- self.prefix + "/login", body, timeout=self.timeout, verify=self.verify)
-
- if rsp.status_code == 200:
- self.num_session_retries = 0
- self.remote_api_version = rsp.json().get('version', {})
- self.session_cookie_name = rsp.json().get('session_cookie_name', 'sessionid')
- self.headers.update(self.user_hdrs)
- if rsp.cookies and 'csrftoken' in rsp.cookies:
- csrftoken = rsp.cookies['csrftoken']
- sessionDict[self.key] = {
- 'csrftoken': csrftoken,
- 'session_id': rsp.cookies[self.session_cookie_name],
- 'last_used': datetime.utcnow(),
- 'api': self,
- 'connected': True
- }
- logger.debug("authentication success for user %s",
- self.avi_credentials.username)
- return
- # Check for bad request and invalid credentials response code
- elif rsp.status_code in [401, 403]:
- logger.error('Status Code %s msg %s', rsp.status_code, rsp.text)
- err = APIError('Status Code %s msg %s' % (
- rsp.status_code, rsp.text), rsp)
- raise err
- else:
- logger.error("Error status code %s msg %s", rsp.status_code,
- rsp.text)
- err = APIError('Status Code %s msg %s' % (
- rsp.status_code, rsp.text), rsp)
- except (RequestsConnectionError, SSLError) as e:
- if not self.retry_conxn_errors:
- raise
- logger.warning('Connection error retrying %s', e)
- err = e
- # comes here only if there was either exception or login was not
- # successful
- if self.retry_wait_time:
- time.sleep(self.retry_wait_time)
- self.num_session_retries += 1
- if self.num_session_retries > self.max_session_retries:
- self.num_session_retries = 0
- logger.error("giving up after %d retries connection failure %s",
- self.max_session_retries, True)
- ret_err = (
- err if err else APIError("giving up after %d retries connection failure %s" %
- (self.max_session_retries, True)))
- raise ret_err
- self.authenticate_session()
- return
-
- def _get_api_headers(self, tenant, tenant_uuid, timeout, headers,
- api_version):
- """
- returns the headers that are passed to the requests.Session api calls.
- """
- api_hdrs = copy.deepcopy(self.headers)
- api_hdrs.update({
- "Referer": self.prefix,
- "Content-Type": "application/json"
- })
- api_hdrs['timeout'] = str(timeout)
- if self.key in sessionDict and 'csrftoken' in sessionDict.get(self.key):
- api_hdrs['X-CSRFToken'] = sessionDict.get(self.key)['csrftoken']
- else:
- self.authenticate_session()
- api_hdrs['X-CSRFToken'] = sessionDict.get(self.key)['csrftoken']
- if api_version:
- api_hdrs['X-Avi-Version'] = api_version
- elif self.avi_credentials.api_version:
- api_hdrs['X-Avi-Version'] = self.avi_credentials.api_version
- if tenant:
- tenant_uuid = None
- elif tenant_uuid:
- tenant = None
- else:
- tenant = self.avi_credentials.tenant
- tenant_uuid = self.avi_credentials.tenant_uuid
- if tenant_uuid:
- api_hdrs.update({"X-Avi-Tenant-UUID": "%s" % tenant_uuid})
- api_hdrs.pop("X-Avi-Tenant", None)
- elif tenant:
- api_hdrs.update({"X-Avi-Tenant": "%s" % tenant})
- api_hdrs.pop("X-Avi-Tenant-UUID", None)
- # Override any user headers that were passed by users. We don't know
- # when the user had updated the user_hdrs
- if self.user_hdrs:
- api_hdrs.update(self.user_hdrs)
- if headers:
- # overwrite the headers passed via the API calls.
- api_hdrs.update(headers)
- return api_hdrs
-
- def _api(self, api_name, path, tenant, tenant_uuid, data=None,
- headers=None, timeout=None, api_version=None, **kwargs):
- """
- It calls the requests.Session APIs and handles session expiry
- and other situations where session needs to be reset.
- returns ApiResponse object
- :param path: takes relative path to the AVI api.
- :param tenant: overrides the tenant used during session creation
- :param tenant_uuid: overrides the tenant or tenant_uuid during session
- creation
- :param timeout: timeout for API calls; Default value is 60 seconds
- :param headers: dictionary of headers that override the session
- headers.
- """
- if self.pid != os.getpid():
- logger.info('pid %d change detected new %d. Closing session',
- self.pid, os.getpid())
- self.close()
- self.pid = os.getpid()
- if timeout is None:
- timeout = self.timeout
- fullpath = self._get_api_path(path)
- fn = getattr(super(ApiSession, self), api_name)
- api_hdrs = self._get_api_headers(tenant, tenant_uuid, timeout, headers,
- api_version)
- connection_error = False
- err = None
- cookies = {
- 'csrftoken': api_hdrs['X-CSRFToken'],
- }
- try:
- if self.session_cookie_name:
- cookies[self.session_cookie_name] = sessionDict[self.key]['session_id']
- except KeyError:
- pass
- try:
- if (data is not None) and (type(data) == dict):
- resp = fn(fullpath, data=json.dumps(data), headers=api_hdrs,
- timeout=timeout, cookies=cookies, **kwargs)
- else:
- resp = fn(fullpath, data=data, headers=api_hdrs,
- timeout=timeout, cookies=cookies, **kwargs)
- except (RequestsConnectionError, SSLError) as e:
- logger.warning('Connection error retrying %s', e)
- if not self.retry_conxn_errors:
- raise
- connection_error = True
- err = e
- except Exception as e:
- logger.error('Error in Requests library %s', e)
- raise
- if not connection_error:
- logger.debug('path: %s http_method: %s hdrs: %s params: '
- '%s data: %s rsp: %s', fullpath, api_name.upper(),
- api_hdrs, kwargs, data,
- (resp.text if self.data_log else 'None'))
- if connection_error or resp.status_code in (401, 419):
- if connection_error:
- try:
- self.close()
- except Exception:
- # ignoring exception in cleanup path
- pass
- logger.warning('Connection failed, retrying.')
- # Adding sleep before retrying
- if self.retry_wait_time:
- time.sleep(self.retry_wait_time)
- else:
- logger.info('received error %d %s so resetting connection',
- resp.status_code, resp.text)
- ApiSession.reset_session(self)
- self.num_session_retries += 1
- if self.num_session_retries > self.max_session_retries:
- # Added this such that any code which re-tries can succeed
- # eventually.
- self.num_session_retries = 0
- if not connection_error:
- err = APIError('Status Code %s msg %s' % (
- resp.status_code, resp.text), resp)
- logger.error(
- "giving up after %d retries conn failure %s err %s",
- self.max_session_retries, connection_error, err)
- ret_err = (
- err if err else APIError("giving up after %d retries connection failure %s" %
- (self.max_session_retries, True)))
- raise ret_err
- # should restore the updated_hdrs to one passed down
- resp = self._api(api_name, path, tenant, tenant_uuid, data,
- headers=headers, api_version=api_version,
- timeout=timeout, **kwargs)
- self.num_session_retries = 0
-
- if resp.cookies and 'csrftoken' in resp.cookies:
- csrftoken = resp.cookies['csrftoken']
- self.headers.update({"X-CSRFToken": csrftoken})
- self._update_session_last_used()
- return ApiResponse.to_avi_response(resp)
-
- def get_controller_details(self):
- result = {
- "controller_ip": self.controller_ip,
- "controller_api_version": self.remote_api_version
- }
- return result
-
- def get(self, path, tenant='', tenant_uuid='', timeout=None, params=None,
- api_version=None, **kwargs):
- """
- It extends the Session Library interface to add AVI API prefixes,
- handle session exceptions related to authentication and update
- the global user session cache.
- :param path: takes relative path to the AVI api.
- :param tenant: overrides the tenant used during session creation
- :param tenant_uuid: overrides the tenant or tenant_uuid during session
- creation
- :param timeout: timeout for API calls; Default value is 60 seconds
- :param params: dictionary of key value pairs to be sent as query
- parameters
- :param api_version: overrides x-avi-header in request header during
- session creation
- get method takes relative path to service and kwargs as per Session
- class get method
- returns session's response object
- """
- return self._api('get', path, tenant, tenant_uuid, timeout=timeout,
- params=params, api_version=api_version, **kwargs)
-
- def get_object_by_name(self, path, name, tenant='', tenant_uuid='',
- timeout=None, params=None, api_version=None,
- **kwargs):
- """
- Helper function to access Avi REST Objects using object
- type and name. It behaves like python dictionary interface where it
- returns None when the object is not present in the AviController.
- Internally, it transforms the request to api/path?name=<name>...
- :param path: relative path to service
- :param name: name of the object
- :param tenant: overrides the tenant used during session creation
- :param tenant_uuid: overrides the tenant or tenant_uuid during session
- creation
- :param timeout: timeout for API calls; Default value is 60 seconds
- :param params: dictionary of key value pairs to be sent as query
- parameters
- :param api_version: overrides x-avi-header in request header during
- session creation
- returns dictionary object if successful else None
- """
- obj = None
- if not params:
- params = {}
- params['name'] = name
- resp = self.get(path, tenant=tenant, tenant_uuid=tenant_uuid,
- timeout=timeout,
- params=params, api_version=api_version, **kwargs)
- if resp.status_code in (401, 419):
- ApiSession.reset_session(self)
- resp = self.get_object_by_name(
- path, name, tenant, tenant_uuid, timeout=timeout,
- params=params, **kwargs)
- if resp.status_code > 499 or 'Invalid version' in resp.text:
- logger.error('Error in get object by name for %s named %s. '
- 'Error: %s', path, name, resp.text)
- raise AviServerError(resp.text, rsp=resp)
- elif resp.status_code > 299:
- return obj
- try:
- if 'results' in resp.json():
- obj = resp.json()['results'][0]
- else:
- # For apis returning single object eg. api/cluster
- obj = resp.json()
- except IndexError:
- logger.warning('Warning: Object Not found for %s named %s',
- path, name)
- obj = None
- self._update_session_last_used()
- return obj
-
- def post(self, path, data=None, tenant='', tenant_uuid='', timeout=None,
- force_uuid=None, params=None, api_version=None, **kwargs):
- """
- It extends the Session Library interface to add AVI API prefixes,
- handle session exceptions related to authentication and update
- the global user session cache.
- :param path: takes relative path to the AVI api.It is modified by
- the library to conform to AVI Controller's REST API interface
- :param data: dictionary of the data. Support for json string
- is deprecated
- :param tenant: overrides the tenant used during session creation
- :param tenant_uuid: overrides the tenant or tenant_uuid during session
- creation
- :param timeout: timeout for API calls; Default value is 60 seconds
- :param params: dictionary of key value pairs to be sent as query
- parameters
- :param api_version: overrides x-avi-header in request header during
- session creation
- returns session's response object
- """
- if force_uuid is not None:
- headers = kwargs.get('headers', {})
- headers[self.AVI_SLUG] = force_uuid
- kwargs['headers'] = headers
- return self._api('post', path, tenant, tenant_uuid, data=data,
- timeout=timeout, params=params,
- api_version=api_version, **kwargs)
-
- def put(self, path, data=None, tenant='', tenant_uuid='',
- timeout=None, params=None, api_version=None, **kwargs):
- """
- It extends the Session Library interface to add AVI API prefixes,
- handle session exceptions related to authentication and update
- the global user session cache.
- :param path: takes relative path to the AVI api.It is modified by
- the library to conform to AVI Controller's REST API interface
- :param data: dictionary of the data. Support for json string
- is deprecated
- :param tenant: overrides the tenant used during session creation
- :param tenant_uuid: overrides the tenant or tenant_uuid during session
- creation
- :param timeout: timeout for API calls; Default value is 60 seconds
- :param params: dictionary of key value pairs to be sent as query
- parameters
- :param api_version: overrides x-avi-header in request header during
- session creation
- returns session's response object
- """
- return self._api('put', path, tenant, tenant_uuid, data=data,
- timeout=timeout, params=params,
- api_version=api_version, **kwargs)
-
- def patch(self, path, data=None, tenant='', tenant_uuid='',
- timeout=None, params=None, api_version=None, **kwargs):
- """
- It extends the Session Library interface to add AVI API prefixes,
- handle session exceptions related to authentication and update
- the global user session cache.
- :param path: takes relative path to the AVI api.It is modified by
- the library to conform to AVI Controller's REST API interface
- :param data: dictionary of the data. Support for json string
- is deprecated
- :param tenant: overrides the tenant used during session creation
- :param tenant_uuid: overrides the tenant or tenant_uuid during session
- creation
- :param timeout: timeout for API calls; Default value is 60 seconds
- :param params: dictionary of key value pairs to be sent as query
- parameters
- :param api_version: overrides x-avi-header in request header during
- session creation
- returns session's response object
- """
- return self._api('patch', path, tenant, tenant_uuid, data=data,
- timeout=timeout, params=params,
- api_version=api_version, **kwargs)
-
- def put_by_name(self, path, name, data=None, tenant='',
- tenant_uuid='', timeout=None, params=None,
- api_version=None, **kwargs):
- """
- Helper function to perform HTTP PUT on Avi REST Objects using object
- type and name.
- Internally, it transforms the request to api/path?name=<name>...
- :param path: relative path to service
- :param name: name of the object
- :param data: dictionary of the data. Support for json string
- is deprecated
- :param tenant: overrides the tenant used during session creation
- :param tenant_uuid: overrides the tenant or tenant_uuid during session
- creation
- :param timeout: timeout for API calls; Default value is 60 seconds
- :param params: dictionary of key value pairs to be sent as query
- parameters
- :param api_version: overrides x-avi-header in request header during
- session creation
- returns session's response object
- """
- uuid = self._get_uuid_by_name(
- path, name, tenant, tenant_uuid, api_version=api_version)
- path = '%s/%s' % (path, uuid)
- return self.put(path, data, tenant, tenant_uuid, timeout=timeout,
- params=params, api_version=api_version, **kwargs)
-
- def delete(self, path, tenant='', tenant_uuid='', timeout=None, params=None,
- data=None, api_version=None, **kwargs):
- """
- It extends the Session Library interface to add AVI API prefixes,
- handle session exceptions related to authentication and update
- the global user session cache.
- :param path: takes relative path to the AVI api.It is modified by
- the library to conform to AVI Controller's REST API interface
- :param tenant: overrides the tenant used during session creation
- :param tenant_uuid: overrides the tenant or tenant_uuid during session
- creation
- :param timeout: timeout for API calls; Default value is 60 seconds
- :param params: dictionary of key value pairs to be sent as query
- parameters
- :param data: dictionary of the data. Support for json string
- is deprecated
- :param api_version: overrides x-avi-header in request header during
- session creation
- returns session's response object
- """
- return self._api('delete', path, tenant, tenant_uuid, data=data,
- timeout=timeout, params=params,
- api_version=api_version, **kwargs)
-
- def delete_by_name(self, path, name, tenant='', tenant_uuid='',
- timeout=None, params=None, api_version=None, **kwargs):
- """
- Helper function to perform HTTP DELETE on Avi REST Objects using object
- type and name.Internally, it transforms the request to
- api/path?name=<name>...
- :param path: relative path to service
- :param name: name of the object
- :param tenant: overrides the tenant used during session creation
- :param tenant_uuid: overrides the tenant or tenant_uuid during session
- creation
- :param timeout: timeout for API calls; Default value is 60 seconds
- :param params: dictionary of key value pairs to be sent as query
- parameters
- :param api_version: overrides x-avi-header in request header during
- session creation
- returns session's response object
- """
- uuid = self._get_uuid_by_name(path, name, tenant, tenant_uuid,
- api_version=api_version)
- if not uuid:
- raise ObjectNotFound("%s/?name=%s" % (path, name))
- path = '%s/%s' % (path, uuid)
- return self.delete(path, tenant, tenant_uuid, timeout=timeout,
- params=params, api_version=api_version, **kwargs)
-
- def get_obj_ref(self, obj):
- """returns reference url from dict object"""
- if not obj:
- return None
- if isinstance(obj, Response):
- obj = json.loads(obj.text)
- if obj.get(0, None):
- return obj[0]['url']
- elif obj.get('url', None):
- return obj['url']
- elif obj.get('results', None):
- return obj['results'][0]['url']
- else:
- return None
-
- def get_obj_uuid(self, obj):
- """returns uuid from dict object"""
- if not obj:
- raise ObjectNotFound('Object %s Not found' % (obj))
- if isinstance(obj, Response):
- obj = json.loads(obj.text)
- if obj.get(0, None):
- return obj[0]['uuid']
- elif obj.get('uuid', None):
- return obj['uuid']
- elif obj.get('results', None):
- return obj['results'][0]['uuid']
- else:
- return None
-
- def _get_api_path(self, path, uuid=None):
- """
- This function returns the full url from relative path and uuid.
- """
- if path == 'logout':
- return self.prefix + '/' + path
- elif uuid:
- return self.prefix + '/api/' + path + '/' + uuid
- else:
- return self.prefix + '/api/' + path
-
- def _get_uuid_by_name(self, path, name, tenant='admin',
- tenant_uuid='', api_version=None):
- """gets object by name and service path and returns uuid"""
- resp = self.get_object_by_name(
- path, name, tenant, tenant_uuid, api_version=api_version)
- if not resp:
- raise ObjectNotFound("%s/%s" % (path, name))
- return self.get_obj_uuid(resp)
-
- def _update_session_last_used(self):
- if self.key in sessionDict:
- sessionDict[self.key]["last_used"] = datetime.utcnow()
-
- @staticmethod
- def _clean_inactive_sessions():
- """Removes sessions which are inactive more than 20 min"""
- session_cache = sessionDict
- logger.debug("cleaning inactive sessions in pid %d num elem %d",
- os.getpid(), len(session_cache))
- keys_to_delete = []
- for key, session in list(session_cache.items()):
- tdiff = avi_timedelta(datetime.utcnow() - session["last_used"])
- if tdiff < ApiSession.SESSION_CACHE_EXPIRY:
- continue
- keys_to_delete.append(key)
- for key in keys_to_delete:
- del session_cache[key]
- logger.debug("Removed session for : %s", key)
-
- def delete_session(self):
- """ Removes the session for cleanup"""
- logger.debug("Removed session for : %s", self.key)
- sessionDict.pop(self.key, None)
- return
-# End of file
diff --git a/lib/ansible/module_utils/network/bigswitch/__init__.py b/lib/ansible/module_utils/network/bigswitch/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/bigswitch/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/bigswitch/bigswitch.py b/lib/ansible/module_utils/network/bigswitch/bigswitch.py
deleted file mode 100644
index 299fcd3310..0000000000
--- a/lib/ansible/module_utils/network/bigswitch/bigswitch.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# (c) 2016, Ted Elhourani <ted@bigswitch.com>
-#
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import json
-
-from ansible.module_utils.urls import fetch_url
-
-
-class Response(object):
-
- def __init__(self, resp, info):
- self.body = None
- if resp:
- self.body = resp.read()
- self.info = info
-
- @property
- def json(self):
- if not self.body:
- if "body" in self.info:
- return json.loads(self.info["body"])
- return None
- try:
- return json.loads(self.body)
- except ValueError:
- return None
-
- @property
- def status_code(self):
- return self.info["status"]
-
-
-class Rest(object):
-
- def __init__(self, module, headers, baseurl):
- self.module = module
- self.headers = headers
- self.baseurl = baseurl
-
- def _url_builder(self, path):
- if path[0] == '/':
- path = path[1:]
- return '%s/%s' % (self.baseurl, path)
-
- def send(self, method, path, data=None, headers=None):
- url = self._url_builder(path)
- data = self.module.jsonify(data)
-
- resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method)
-
- return Response(resp, info)
-
- def get(self, path, data=None, headers=None):
- return self.send('GET', path, data, headers)
-
- def put(self, path, data=None, headers=None):
- return self.send('PUT', path, data, headers)
-
- def post(self, path, data=None, headers=None):
- return self.send('POST', path, data, headers)
-
- def patch(self, path, data=None, headers=None):
- return self.send('PATCH', path, data, headers)
-
- def delete(self, path, data=None, headers=None):
- return self.send('DELETE', path, data, headers)
diff --git a/lib/ansible/module_utils/network/checkpoint/__init__.py b/lib/ansible/module_utils/network/checkpoint/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/checkpoint/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/cloudengine/__init__.py b/lib/ansible/module_utils/network/cloudengine/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/cloudengine/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/cloudengine/ce.py b/lib/ansible/module_utils/network/cloudengine/ce.py
deleted file mode 100644
index 01df0e795d..0000000000
--- a/lib/ansible/module_utils/network/cloudengine/ce.py
+++ /dev/null
@@ -1,421 +0,0 @@
-#
-# This code is part of Ansible, but is an independent component.
-#
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# (c) 2017 Red Hat, Inc.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-import re
-import socket
-import sys
-import traceback
-
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.network.common.utils import to_list, ComplexList
-from ansible.module_utils.connection import exec_command, ConnectionError
-from ansible.module_utils.six import iteritems
-from ansible.module_utils._text import to_native
-from ansible.module_utils.network.common.netconf import NetconfConnection
-
-
-try:
- from ncclient.xml_ import to_xml, new_ele_ns
- HAS_NCCLIENT = True
-except ImportError:
- HAS_NCCLIENT = False
-
-
-try:
- from lxml import etree
-except ImportError:
- from xml.etree import ElementTree as etree
-
-_DEVICE_CLI_CONNECTION = None
-_DEVICE_NC_CONNECTION = None
-
-ce_provider_spec = {
- 'host': dict(),
- 'port': dict(type='int'),
- 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
- 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
- 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
- 'use_ssl': dict(type='bool'),
- 'validate_certs': dict(type='bool'),
- 'timeout': dict(type='int'),
- 'transport': dict(default='cli', choices=['cli', 'netconf']),
-}
-ce_argument_spec = {
- 'provider': dict(type='dict', options=ce_provider_spec),
-}
-ce_top_spec = {
- 'host': dict(removed_in_version=2.9),
- 'port': dict(removed_in_version=2.9, type='int'),
- 'username': dict(removed_in_version=2.9),
- 'password': dict(removed_in_version=2.9, no_log=True),
- 'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
- 'use_ssl': dict(removed_in_version=2.9, type='bool'),
- 'validate_certs': dict(removed_in_version=2.9, type='bool'),
- 'timeout': dict(removed_in_version=2.9, type='int'),
- 'transport': dict(removed_in_version=2.9, choices=['cli', 'netconf']),
-}
-ce_argument_spec.update(ce_top_spec)
-
-
-def to_string(data):
- return re.sub(r'<data\s+.+?(/>|>)', r'<data\1', data)
-
-
-def check_args(module, warnings):
- pass
-
-
-def load_params(module):
- """load_params"""
- provider = module.params.get('provider') or dict()
- for key, value in iteritems(provider):
- if key in ce_argument_spec:
- if module.params.get(key) is None and value is not None:
- module.params[key] = value
-
-
-def get_connection(module):
- """get_connection"""
- global _DEVICE_CLI_CONNECTION
- if not _DEVICE_CLI_CONNECTION:
- load_params(module)
- conn = Cli(module)
- _DEVICE_CLI_CONNECTION = conn
- return _DEVICE_CLI_CONNECTION
-
-
-def rm_config_prefix(cfg):
- if not cfg:
- return cfg
-
- cmds = cfg.split("\n")
- for i in range(len(cmds)):
- if not cmds[i]:
- continue
- if '~' in cmds[i]:
- index = cmds[i].index('~')
- if cmds[i][:index] == ' ' * index:
- cmds[i] = cmds[i].replace("~", "", 1)
- return '\n'.join(cmds)
-
-
-class Cli:
-
- def __init__(self, module):
- self._module = module
- self._device_configs = {}
-
- def exec_command(self, command):
- if isinstance(command, dict):
- command = self._module.jsonify(command)
-
- return exec_command(self._module, command)
-
- def get_config(self, flags=None):
- """Retrieves the current config from the device or cache
- """
- flags = [] if flags is None else flags
-
- cmd = 'display current-configuration '
- cmd += ' '.join(flags)
- cmd = cmd.strip()
-
- try:
- return self._device_configs[cmd]
- except KeyError:
- rc, out, err = self.exec_command(cmd)
- if rc != 0:
- self._module.fail_json(msg=err)
- cfg = str(out).strip()
- # remove default configuration prefix '~'
- for flag in flags:
- if "include-default" in flag:
- cfg = rm_config_prefix(cfg)
- break
-
- self._device_configs[cmd] = cfg
- return cfg
-
- def run_commands(self, commands, check_rc=True):
- """Run list of commands on remote device and return results
- """
- responses = list()
-
- for item in to_list(commands):
-
- rc, out, err = self.exec_command(item)
-
- if check_rc and rc != 0:
- self._module.fail_json(msg=cli_err_msg(item['command'].strip(), err))
-
- try:
- out = self._module.from_json(out)
- except ValueError:
- out = str(out).strip()
-
- responses.append(out)
- return responses
-
- def load_config(self, config):
- """Sends configuration commands to the remote device
- """
- rc, out, err = self.exec_command('mmi-mode enable')
- if rc != 0:
- self._module.fail_json(msg='unable to set mmi-mode enable', output=err)
- rc, out, err = self.exec_command('system-view immediately')
- if rc != 0:
- self._module.fail_json(msg='unable to enter system-view', output=err)
-
- for cmd in config:
- rc, out, err = self.exec_command(cmd)
- if rc != 0:
- self._module.fail_json(msg=cli_err_msg(cmd.strip(), err))
-
- self.exec_command('return')
-
-
-def cli_err_msg(cmd, err):
- """ get cli exception message"""
-
- if not err:
- return "Error: Fail to get cli exception message."
-
- msg = list()
- err_list = str(err).split("\r\n")
- for err in err_list:
- err = err.strip('.,\r\n\t ')
- if not err:
- continue
- if cmd and cmd == err:
- continue
- if " at '^' position" in err:
- err = err.replace(" at '^' position", "").strip()
- err = err.strip('.,\r\n\t ')
- if err == "^":
- continue
- if len(err) > 2 and err[0] in ["<", "["] and err[-1] in [">", "]"]:
- continue
- err.strip('.,\r\n\t ')
- if err:
- msg.append(err)
-
- if cmd:
- msg.insert(0, "Command: %s" % cmd)
-
- return ", ".join(msg).capitalize() + "."
-
-
-def to_command(module, commands):
- default_output = 'text'
- transform = ComplexList(dict(
- command=dict(key=True),
- output=dict(default=default_output),
- prompt=dict(),
- answer=dict()
- ), module)
-
- commands = transform(to_list(commands))
-
- return commands
-
-
-def get_config(module, flags=None):
- flags = [] if flags is None else flags
-
- conn = get_connection(module)
- return conn.get_config(flags)
-
-
-def run_commands(module, commands, check_rc=True):
- conn = get_connection(module)
- return conn.run_commands(to_command(module, commands), check_rc)
-
-
-def load_config(module, config):
- """load_config"""
- conn = get_connection(module)
- return conn.load_config(config)
-
-
-def ce_unknown_host_cb(host, fingerprint):
- """ ce_unknown_host_cb """
-
- return True
-
-
-def get_nc_set_id(xml_str):
- """get netconf set-id value"""
-
- result = re.findall(r'<rpc-reply.+?set-id=\"(\d+)\"', xml_str)
- if not result:
- return None
- return result[0]
-
-
-def get_xml_line(xml_list, index):
- """get xml specified line valid string data"""
-
- ele = None
- while xml_list and not ele:
- if index >= 0 and index >= len(xml_list):
- return None
- if index < 0 and abs(index) > len(xml_list):
- return None
-
- ele = xml_list[index]
- if not ele.replace(" ", ""):
- xml_list.pop(index)
- ele = None
- return ele
-
-
-def merge_nc_xml(xml1, xml2):
- """merge xml1 and xml2"""
-
- xml1_list = xml1.split("</data>")[0].split("\n")
- xml2_list = xml2.split("<data>")[1].split("\n")
-
- while True:
- xml1_ele1 = get_xml_line(xml1_list, -1)
- xml1_ele2 = get_xml_line(xml1_list, -2)
- xml2_ele1 = get_xml_line(xml2_list, 0)
- xml2_ele2 = get_xml_line(xml2_list, 1)
- if not xml1_ele1 or not xml1_ele2 or not xml2_ele1 or not xml2_ele2:
- return xml1
-
- if "xmlns" in xml2_ele1:
- xml2_ele1 = xml2_ele1.lstrip().split(" ")[0] + ">"
- if "xmlns" in xml2_ele2:
- xml2_ele2 = xml2_ele2.lstrip().split(" ")[0] + ">"
- if xml1_ele1.replace(" ", "").replace("/", "") == xml2_ele1.replace(" ", "").replace("/", ""):
- if xml1_ele2.replace(" ", "").replace("/", "") == xml2_ele2.replace(" ", "").replace("/", ""):
- xml1_list.pop()
- xml2_list.pop(0)
- else:
- break
- else:
- break
-
- return "\n".join(xml1_list + xml2_list)
-
-
-def get_nc_connection(module):
- global _DEVICE_NC_CONNECTION
- if not _DEVICE_NC_CONNECTION:
- load_params(module)
- conn = NetconfConnection(module._socket_path)
- _DEVICE_NC_CONNECTION = conn
- return _DEVICE_NC_CONNECTION
-
-
-def set_nc_config(module, xml_str):
- """ set_config """
-
- conn = get_nc_connection(module)
- try:
- out = conn.edit_config(target='running', config=xml_str, default_operation='merge',
- error_option='rollback-on-error')
- finally:
- # conn.unlock(target = 'candidate')
- pass
- return to_string(to_xml(out))
-
-
-def get_nc_next(module, xml_str):
- """ get_nc_next for exchange capability """
-
- conn = get_nc_connection(module)
- result = None
- if xml_str is not None:
- response = conn.get(xml_str, if_rpc_reply=True)
- result = response.find('./*')
- set_id = response.get('set-id')
- while True and set_id is not None:
- try:
- fetch_node = new_ele_ns('get-next', 'http://www.huawei.com/netconf/capability/base/1.0', {'set-id': set_id})
- next_xml = conn.dispatch_rpc(etree.tostring(fetch_node))
- if next_xml is not None:
- result.extend(next_xml.find('./*'))
- set_id = next_xml.get('set-id')
- except ConnectionError:
- break
- if result is not None:
- return etree.tostring(result)
- return result
-
-
-def get_nc_config(module, xml_str):
- """ get_config """
-
- conn = get_nc_connection(module)
- if xml_str is not None:
- response = conn.get(xml_str)
- else:
- return None
-
- return to_string(to_xml(response))
-
-
-def execute_nc_action(module, xml_str):
- """ huawei execute-action """
-
- conn = get_nc_connection(module)
- response = conn.execute_action(xml_str)
- return to_string(to_xml(response))
-
-
-def execute_nc_cli(module, xml_str):
- """ huawei execute-cli """
-
- if xml_str is not None:
- try:
- conn = get_nc_connection(module)
- out = conn.execute_nc_cli(command=xml_str)
- return to_string(to_xml(out))
- except Exception as exc:
- raise Exception(exc)
-
-
-def check_ip_addr(ipaddr):
- """ check ip address, Supports IPv4 and IPv6 """
-
- if not ipaddr or '\x00' in ipaddr:
- return False
-
- try:
- res = socket.getaddrinfo(ipaddr, 0, socket.AF_UNSPEC,
- socket.SOCK_STREAM,
- 0, socket.AI_NUMERICHOST)
- return bool(res)
- except socket.gaierror:
- err = sys.exc_info()[1]
- if err.args[0] == socket.EAI_NONAME:
- return False
- raise
diff --git a/lib/ansible/module_utils/network/cnos/__init__.py b/lib/ansible/module_utils/network/cnos/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/cnos/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/cnos/cnos.py b/lib/ansible/module_utils/network/cnos/cnos.py
deleted file mode 100644
index e986cdf7be..0000000000
--- a/lib/ansible/module_utils/network/cnos/cnos.py
+++ /dev/null
@@ -1,660 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by
-# Ansible still belong to the author of the module, and may assign their own
-# license to the complete work.
-#
-# Copyright (C) 2017 Lenovo, Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-# Contains utility methods
-# Lenovo Networking
-
-import time
-import socket
-import re
-import json
-try:
- from ansible.module_utils.network.cnos import cnos_errorcodes
- from ansible.module_utils.network.cnos import cnos_devicerules
- HAS_LIB = True
-except Exception:
- HAS_LIB = False
-from distutils.cmd import Command
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.network.common.utils import to_list, EntityCollection
-from ansible.module_utils.connection import Connection, exec_command
-from ansible.module_utils.connection import ConnectionError
-
-_DEVICE_CONFIGS = {}
-_CONNECTION = None
-_VALID_USER_ROLES = ['network-admin', 'network-operator']
-
-cnos_provider_spec = {
- 'host': dict(),
- 'port': dict(type='int'),
- 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
- 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']),
- no_log=True),
- 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']),
- type='path'),
- 'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']),
- type='bool'),
- 'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']),
- no_log=True),
- 'timeout': dict(type='int'),
- 'context': dict(),
- 'passwords': dict()
-}
-
-cnos_argument_spec = {
- 'provider': dict(type='dict', options=cnos_provider_spec),
-}
-
-command_spec = {
- 'command': dict(key=True),
- 'prompt': dict(),
- 'answer': dict(),
- 'check_all': dict()
-}
-
-
-def get_provider_argspec():
- return cnos_provider_spec
-
-
-def check_args(module, warnings):
- pass
-
-
-def get_user_roles():
- return _VALID_USER_ROLES
-
-
-def get_connection(module):
- global _CONNECTION
- if _CONNECTION:
- return _CONNECTION
- _CONNECTION = Connection(module._socket_path)
-
- context = None
- try:
- context = module.params['context']
- except KeyError:
- context = None
-
- if context:
- if context == 'system':
- command = 'changeto system'
- else:
- command = 'changeto context %s' % context
- _CONNECTION.get(command)
-
- return _CONNECTION
-
-
-def get_config(module, flags=None):
- flags = [] if flags is None else flags
-
- passwords = None
- try:
- passwords = module.params['passwords']
- except KeyError:
- passwords = None
- if passwords:
- cmd = 'more system:running-config'
- else:
- cmd = 'display running-config '
- cmd += ' '.join(flags)
- cmd = cmd.strip()
-
- try:
- return _DEVICE_CONFIGS[cmd]
- except KeyError:
- conn = get_connection(module)
- out = conn.get(cmd)
- cfg = to_text(out, errors='surrogate_then_replace').strip()
- _DEVICE_CONFIGS[cmd] = cfg
- return cfg
-
-
-def to_commands(module, commands):
- if not isinstance(commands, list):
- raise AssertionError('argument must be of type <list>')
-
- transform = EntityCollection(module, command_spec)
- commands = transform(commands)
-
- for index, item in enumerate(commands):
- if module.check_mode and not item['command'].startswith('show'):
- module.warn('only show commands are supported when using check '
- 'mode, not executing `%s`' % item['command'])
-
- return commands
-
-
-def run_commands(module, commands, check_rc=True):
- connection = get_connection(module)
- connection.get('enable')
- commands = to_commands(module, to_list(commands))
-
- responses = list()
-
- for cmd in commands:
- out = connection.get(**cmd)
- responses.append(to_text(out, errors='surrogate_then_replace'))
-
- return responses
-
-
-def run_cnos_commands(module, commands, check_rc=True):
- retVal = ''
- enter_config = {'command': 'configure terminal', 'prompt': None,
- 'answer': None}
- exit_config = {'command': 'end', 'prompt': None, 'answer': None}
- commands.insert(0, enter_config)
- commands.append(exit_config)
- for cmd in commands:
- retVal = retVal + '>> ' + cmd['command'] + '\n'
- try:
- responses = run_commands(module, commands, check_rc)
- for response in responses:
- retVal = retVal + '<< ' + response + '\n'
- except Exception as e:
- errMsg = ''
- if hasattr(e, 'message'):
- errMsg = e.message
- else:
- errMsg = str(e)
- # Exception in Exceptions
- if 'VLAN_ACCESS_MAP' in errMsg:
- return retVal + '<<' + errMsg + '\n'
- if 'confederation identifier' in errMsg:
- return retVal + '<<' + errMsg + '\n'
- # Add more here if required
- retVal = retVal + '<< ' + 'Error-101 ' + errMsg + '\n'
- return str(retVal)
-
-
-def get_capabilities(module):
- if hasattr(module, '_cnos_capabilities'):
- return module._cnos_capabilities
- try:
- capabilities = Connection(module._socket_path).get_capabilities()
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- module._cnos_capabilities = json.loads(capabilities)
- return module._cnos_capabilities
-
-
-def load_config(module, config):
- try:
- conn = get_connection(module)
- conn.get('enable')
- resp = conn.edit_config(config)
- return resp.get('response')
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc))
-
-
-def get_defaults_flag(module):
- rc, out, err = exec_command(module, 'display running-config ?')
- out = to_text(out, errors='surrogate_then_replace')
-
- commands = set()
- for line in out.splitlines():
- if line:
- commands.add(line.strip().split()[0])
-
- if 'all' in commands:
- return 'all'
- else:
- return 'full'
-
-
-def enterEnableModeForDevice(enablePassword, timeout, obj):
- command = "enable\n"
- pwdPrompt = "password:"
- # debugOutput(enablePassword)
- # debugOutput('\n')
- obj.settimeout(int(timeout))
- # Executing enable
- obj.send(command)
- flag = False
- retVal = ""
- count = 5
- while not flag:
- # If wait time is execeeded.
- if(count == 0):
- flag = True
- else:
- count = count - 1
- # A delay of one second
- time.sleep(1)
- try:
- buffByte = obj.recv(9999)
- buff = buffByte.decode()
- retVal = retVal + buff
- # debugOutput(buff)
- gotit = buff.find(pwdPrompt)
- if(gotit != -1):
- time.sleep(1)
- if(enablePassword is None or enablePassword == ""):
- return "\n Error-106"
- obj.send(enablePassword)
- obj.send("\r")
- obj.send("\n")
- time.sleep(1)
- innerBuffByte = obj.recv(9999)
- innerBuff = innerBuffByte.decode()
- retVal = retVal + innerBuff
- # debugOutput(innerBuff)
- innerGotit = innerBuff.find("#")
- if(innerGotit != -1):
- return retVal
- else:
- gotit = buff.find("#")
- if(gotit != -1):
- return retVal
- except Exception:
- retVal = retVal + "\n Error-101"
- flag = True
- if(retVal == ""):
- retVal = "\n Error-101"
- return retVal
-# EOM
-
-
-def waitForDeviceResponse(command, prompt, timeout, obj):
- obj.settimeout(int(timeout))
- obj.send(command)
- flag = False
- retVal = ""
- while not flag:
- time.sleep(1)
- try:
- buffByte = obj.recv(9999)
- buff = buffByte.decode()
- retVal = retVal + buff
- # debugOutput(retVal)
- gotit = buff.find(prompt)
- if(gotit != -1):
- flag = True
- except Exception:
- # debugOutput(prompt)
- if prompt == "(yes/no)?":
- pass
- elif prompt == "Password:":
- pass
- else:
- retVal = retVal + "\n Error-101"
- flag = True
- return retVal
-# EOM
-
-
-def checkOutputForError(output):
- retVal = ""
- index = output.lower().find('error')
- startIndex = index + 6
- if(index == -1):
- index = output.lower().find('invalid')
- startIndex = index + 8
- if(index == -1):
- index = output.lower().find('cannot be enabled in l2 interface')
- startIndex = index + 34
- if(index == -1):
- index = output.lower().find('incorrect')
- startIndex = index + 10
- if(index == -1):
- index = output.lower().find('failure')
- startIndex = index + 8
- if(index == -1):
- return None
-
- endIndex = startIndex + 3
- errorCode = output[startIndex:endIndex]
- result = errorCode.isdigit()
- if(result is not True):
- return "Device returned an Error. Please check Results for more \
- information"
-
- errorFile = "dictionary/ErrorCodes.lvo"
- try:
- # with open(errorFile, 'r') as f:
- f = open(errorFile, 'r')
- for line in f:
- if('=' in line):
- data = line.split('=')
- if(data[0].strip() == errorCode):
- errorString = data[1].strip()
- return errorString
- except Exception:
- errorString = cnos_errorcodes.getErrorString(errorCode)
- errorString = errorString.strip()
- return errorString
- return "Error Code Not Found"
-# EOM
-
-
-def checkSanityofVariable(deviceType, variableId, variableValue):
- retVal = ""
- ruleFile = "dictionary/" + deviceType + "_rules.lvo"
- ruleString = getRuleStringForVariable(deviceType, ruleFile, variableId)
- retVal = validateValueAgainstRule(ruleString, variableValue)
- return retVal
-# EOM
-
-
-def getRuleStringForVariable(deviceType, ruleFile, variableId):
- retVal = ""
- try:
- # with open(ruleFile, 'r') as f:
- f = open(ruleFile, 'r')
- for line in f:
- # debugOutput(line)
- if(':' in line):
- data = line.split(':')
- # debugOutput(data[0])
- if(data[0].strip() == variableId):
- retVal = line
- except Exception:
- ruleString = cnos_devicerules.getRuleString(deviceType, variableId)
- retVal = ruleString.strip()
- return retVal
-# EOM
-
-
-def validateValueAgainstRule(ruleString, variableValue):
-
- retVal = ""
- if(ruleString == ""):
- return 1
- rules = ruleString.split(':')
- variableType = rules[1].strip()
- varRange = rules[2].strip()
- if(variableType == "INTEGER"):
- result = checkInteger(variableValue)
- if(result is True):
- return "ok"
- else:
- return "Error-111"
- elif(variableType == "FLOAT"):
- result = checkFloat(variableValue)
- if(result is True):
- return "ok"
- else:
- return "Error-112"
-
- elif(variableType == "INTEGER_VALUE"):
- int_range = varRange.split('-')
- r = range(int(int_range[0].strip()), int(int_range[1].strip()))
- if(checkInteger(variableValue) is not True):
- return "Error-111"
- result = int(variableValue) in r
- if(result is True):
- return "ok"
- else:
- return "Error-113"
-
- elif(variableType == "INTEGER_VALUE_RANGE"):
- int_range = varRange.split('-')
- varLower = int_range[0].strip()
- varHigher = int_range[1].strip()
- r = range(int(varLower), int(varHigher))
- val_range = variableValue.split('-')
- try:
- valLower = val_range[0].strip()
- valHigher = val_range[1].strip()
- except Exception:
- return "Error-113"
- if((checkInteger(valLower) is not True) or
- (checkInteger(valHigher) is not True)):
- # debugOutput("Error-114")
- return "Error-114"
- result = (int(valLower) in r) and (int(valHigher)in r) \
- and (int(valLower) < int(valHigher))
- if(result is True):
- return "ok"
- else:
- # debugOutput("Error-113")
- return "Error-113"
-
- elif(variableType == "INTEGER_OPTIONS"):
- int_options = varRange.split(',')
- if(checkInteger(variableValue) is not True):
- return "Error-111"
- for opt in int_options:
- if(opt.strip() is variableValue):
- result = True
- break
- if(result is True):
- return "ok"
- else:
- return "Error-115"
-
- elif(variableType == "LONG"):
- result = checkLong(variableValue)
- if(result is True):
- return "ok"
- else:
- return "Error-116"
-
- elif(variableType == "LONG_VALUE"):
- long_range = varRange.split('-')
- r = range(int(long_range[0].strip()), int(long_range[1].strip()))
- if(checkLong(variableValue) is not True):
- # debugOutput(variableValue)
- return "Error-116"
- result = int(variableValue) in r
- if(result is True):
- return "ok"
- else:
- return "Error-113"
-
- elif(variableType == "LONG_VALUE_RANGE"):
- long_range = varRange.split('-')
- r = range(int(long_range[0].strip()), int(long_range[1].strip()))
- val_range = variableValue.split('-')
- if((checkLong(val_range[0]) is not True) or
- (checkLong(val_range[1]) is not True)):
- return "Error-117"
- result = (val_range[0] in r) and (
- val_range[1] in r) and (val_range[0] < val_range[1])
- if(result is True):
- return "ok"
- else:
- return "Error-113"
- elif(variableType == "LONG_OPTIONS"):
- long_options = varRange.split(',')
- if(checkLong(variableValue) is not True):
- return "Error-116"
- for opt in long_options:
- if(opt.strip() == variableValue):
- result = True
- break
- if(result is True):
- return "ok"
- else:
- return "Error-115"
-
- elif(variableType == "TEXT"):
- if(variableValue == ""):
- return "Error-118"
- if(True is isinstance(variableValue, str)):
- return "ok"
- else:
- return "Error-119"
-
- elif(variableType == "NO_VALIDATION"):
- if(variableValue == ""):
- return "Error-118"
- else:
- return "ok"
-
- elif(variableType == "TEXT_OR_EMPTY"):
- if(variableValue is None or variableValue == ""):
- return "ok"
- if(result == isinstance(variableValue, str)):
- return "ok"
- else:
- return "Error-119"
-
- elif(variableType == "MATCH_TEXT"):
- if(variableValue == ""):
- return "Error-118"
- if(isinstance(variableValue, str)):
- if(varRange == variableValue):
- return "ok"
- else:
- return "Error-120"
- else:
- return "Error-119"
-
- elif(variableType == "MATCH_TEXT_OR_EMPTY"):
- if(variableValue is None or variableValue == ""):
- return "ok"
- if(isinstance(variableValue, str)):
- if(varRange == variableValue):
- return "ok"
- else:
- return "Error-120"
- else:
- return "Error-119"
-
- elif(variableType == "TEXT_OPTIONS"):
- str_options = varRange.split(',')
- if(isinstance(variableValue, str) is not True):
- return "Error-119"
- result = False
- for opt in str_options:
- if(opt.strip() == variableValue):
- result = True
- break
- if(result is True):
- return "ok"
- else:
- return "Error-115"
-
- elif(variableType == "TEXT_OPTIONS_OR_EMPTY"):
- if(variableValue is None or variableValue == ""):
- return "ok"
- str_options = varRange.split(',')
- if(isinstance(variableValue, str) is not True):
- return "Error-119"
- for opt in str_options:
- if(opt.strip() == variableValue):
- result = True
- break
- if(result is True):
- return "ok"
- else:
- return "Error-115"
-
- elif(variableType == "IPV4Address"):
- try:
- socket.inet_pton(socket.AF_INET, variableValue)
- result = True
- except socket.error:
- result = False
- if(result is True):
- return "ok"
- else:
- return "Error-121"
- elif(variableType == "IPV4AddressWithMask"):
- if(variableValue is None or variableValue == ""):
- return "Error-119"
- str_options = variableValue.split('/')
- ipaddr = str_options[0]
- mask = str_options[1]
- try:
- socket.inet_pton(socket.AF_INET, ipaddr)
- if(checkInteger(mask) is True):
- result = True
- else:
- result = False
- except socket.error:
- result = False
- if(result is True):
- return "ok"
- else:
- return "Error-121"
-
- elif(variableType == "IPV6Address"):
- try:
- socket.inet_pton(socket.AF_INET6, variableValue)
- result = True
- except socket.error:
- result = False
- if(result is True):
- return "ok"
- else:
- return "Error-122"
-
- return retVal
-# EOM
-
-
-def disablePaging(remote_conn):
- remote_conn.send("terminal length 0\n")
- time.sleep(1)
- # Clear the buffer on the screen
- outputByte = remote_conn.recv(1000)
- output = outputByte.decode()
- return output
-# EOM
-
-
-def checkInteger(s):
- try:
- int(s)
- return True
- except ValueError:
- return False
-# EOM
-
-
-def checkFloat(s):
- try:
- float(s)
- return True
- except ValueError:
- return False
-# EOM
-
-
-def checkLong(s):
- try:
- int(s)
- return True
- except ValueError:
- return False
-
-
-def debugOutput(command):
- f = open('debugOutput.txt', 'a')
- f.write(str(command)) # python will convert \n to os.linesep
- f.close() # you can omit in most cases as the destructor will call it
-# EOM
diff --git a/lib/ansible/module_utils/network/cnos/cnos_devicerules.py b/lib/ansible/module_utils/network/cnos/cnos_devicerules.py
deleted file mode 100644
index f6c8f24ea7..0000000000
--- a/lib/ansible/module_utils/network/cnos/cnos_devicerules.py
+++ /dev/null
@@ -1,1921 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by
-# Ansible still belong to the author of the module, and may assign their
-# own license to the complete work.
-#
-# Copyright (C) 2017 Lenovo, Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-# Contains device rule and methods
-# Lenovo Networking
-
-
-def getRuleString(deviceType, variableId):
- retVal = variableId + ":"
- if(deviceType == 'g8272_cnos'):
- if variableId in g8272_cnos:
- retVal = retVal + g8272_cnos[variableId]
- else:
- retVal = "The variable " + variableId + " is not supported"
- elif(deviceType == 'g8296_cnos'):
- if variableId in g8296_cnos:
- retVal = retVal + g8296_cnos[variableId]
- else:
- retVal = "The variable " + variableId + " is not supported"
- elif(deviceType == 'g8332_cnos'):
- if variableId in g8332_cnos:
- retVal = retVal + g8332_cnos[variableId]
- else:
- retVal = "The variable " + variableId + " is not supported"
- elif(deviceType == 'NE1072T'):
- if variableId in NE1072T:
- retVal = retVal + NE1072T[variableId]
- else:
- retVal = "The variable " + variableId + " is not supported"
- elif(deviceType == 'NE1032'):
- if variableId in NE1032:
- retVal = retVal + NE1032[variableId]
- else:
- retVal = "The variable " + variableId + " is not supported"
- elif(deviceType == 'NE1032T'):
- if variableId in NE1032T:
- retVal = retVal + NE1032T[variableId]
- else:
- retVal = "The variable " + variableId + " is not supported"
- elif(deviceType == 'NE10032'):
- if variableId in NE10032:
- retVal = retVal + NE10032[variableId]
- else:
- retVal = "The variable " + variableId + " is not supported"
- elif(deviceType == 'NE2572'):
- if variableId in NE2572:
- retVal = retVal + NE2572[variableId]
- else:
- retVal = "The variable " + variableId + " is not supported"
- elif(deviceType == 'NE0152T'):
- if variableId in NE0152T:
- retVal = retVal + NE0152T[variableId]
- else:
- retVal = "The variable " + variableId + " is not supported"
- else:
- if variableId in default_cnos:
- retVal = retVal + default_cnos[variableId]
- else:
- retVal = "The variable " + variableId + " is not supported"
- return retVal
-# EOM
-
-
-default_cnos = {
- 'vlan_id': 'INTEGER_VALUE:1-3999',
- 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
- 'vlan_name': 'TEXT:',
- 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
- 'vlan_state': 'TEXT_OPTIONS:active,suspend',
- 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
- 'vlan_querier': 'IPV4Address:',
- 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
- 'vlan_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
- 'vlan_report_suppression': 'INTEGER_VALUE:1-25',
- 'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
- 'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
- 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_snooping_version': 'INTEGER_VALUE:2-3',
- 'vlan_access_map_name': 'TEXT: ',
- 'vlan_ethernet_interface': 'TEXT:',
- 'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
- 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
- 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
- 'vlan_filter_name': 'TEXT:',
- 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
- 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
- 'vlag_instance': 'INTEGER_VALUE:1-64',
- 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
- 'vlag_priority': 'INTEGER_VALUE:0-65535',
- 'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
- 'vlag_tier_id': 'INTEGER_VALUE:1-512',
- 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
- keepalive-interval,peer-ip,retry-interval',
- 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
- 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
- 'vlag_retry_interval': 'INTEGER_VALUE:1-300',
- 'vlag_peerip': 'IPV4Address:',
- 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
- 'bgp_as_number': 'NO_VALIDATION:1-4294967295',
- 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
- 'cluster_id_as_ip': 'IPV4Address:',
- 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
- 'confederation_identifier': 'INTEGER_VALUE:1-65535',
- 'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
- 'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
- 'maxas_limit_as': 'INTEGER_VALUE:1-2000',
- 'neighbor_ipaddress': 'IPV4Address:',
- 'neighbor_as': 'NO_VALIDATION:1-4294967295',
- 'router_id': 'IPV4Address:',
- 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
- 'bgp_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
- 'addrfamily_routemap_name': 'TEXT:',
- 'reachability_half_life': 'INTEGER_VALUE:1-45',
- 'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
- 'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
- 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
- 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
- 'distance_external_AS': 'INTEGER_VALUE:1-255',
- 'distance_internal_AS': 'INTEGER_VALUE:1-255',
- 'distance_local_routes': 'INTEGER_VALUE:1-255',
- 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
- 'maxpath_numbers': 'INTEGER_VALUE:2-32',
- 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
- 'network_ip_prefix_value': 'IPV4Address:',
- 'network_ip_prefix_mask': 'IPV4Address:',
- 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
- static',
- 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
- 'bgp_neighbor_af_filtername': 'TEXT:',
- 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
- 'bgp_neighbor_af_prefixname': 'TEXT:',
- 'bgp_neighbor_af_routemap': 'TEXT:',
- 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
- 'bgp_neighbor_description': 'TEXT:',
- 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
- 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
- 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
- 'bgp_neighbor_password': 'TEXT:',
- 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
- 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
- vlan',
- 'bgp_neighbor_update_ethernet': 'TEXT:',
- 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
- 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
- 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
- 'ethernet_interface_value': 'INTEGER_VALUE:1-32',
- 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-32',
- 'ethernet_interface_string': 'TEXT:',
- 'loopback_interface_value': 'INTEGER_VALUE:0-7',
- 'mgmt_interface_value': 'INTEGER_VALUE:0-0',
- 'vlan_interface_value': 'INTEGER_VALUE:1-4094',
- 'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
- 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
- 'portchannel_interface_string': 'TEXT:',
- 'aggregation_group_no': 'INTEGER_VALUE:1-4096',
- 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
- 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
- ipv6,neighbor',
- 'bfd_interval': 'INTEGER_VALUE:50-999',
- 'bfd_minrx': 'INTEGER_VALUE:50-999',
- 'bfd_ multiplier': 'INTEGER_VALUE:3-50',
- 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,\
- interval',
- 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
- meticulous-keyed-md5,meticulous-keyed-sha1,simple',
- 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
- 'bfd_key_chain': 'TEXT:',
- 'bfd_key_id': 'INTEGER_VALUE:0-255',
- 'bfd_key_name': 'TEXT:',
- 'bfd_neighbor_ip': 'TEXT:',
- 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
- non-persistent',
- 'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
- 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,\
- trunk',
- 'trunk_options': 'TEXT_OPTIONS:allowed,native',
- 'trunk_vlanid': 'INTEGER_VALUE:1-3999',
- 'portCh_description': 'TEXT:',
- 'duplex_option': 'TEXT_OPTIONS:auto,full,half',
- 'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
- 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
- arp,dhcp,ospf,port,port-unreachable,redirects,router,\
- unreachables',
- 'accessgroup_name': 'TEXT:',
- 'portchannel_ipv4': 'IPV4Address:',
- 'portchannel_ipv4_mask': 'TEXT:',
- 'arp_ipaddress': 'IPV4Address:',
- 'arp_macaddress': 'TEXT:',
- 'arp_timeout_value': 'INTEGER_VALUE:60-28800',
- 'relay_ipaddress': 'IPV4Address:',
- 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
- authentication-key,bfd,cost,database-filter,dead-interval,\
- hello-interval,message-digest-key,mtu,mtu-ignore,network,\
- passive-interface,priority,retransmit-interval,shutdown,\
- transmit-delay',
- 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
- 'ospf_id_ipaddres_value': 'IPV4Address:',
- 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
- timeout',
- 'port_priority': 'INTEGER_VALUE:1-65535',
- 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
- trap-notification',
- 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
- mac-phy-status,management-address,max-frame-size,\
- port-description,port-protocol-vlan,port-vlan,power-mdi,\
- protocol-identity,system-capabilities,system-description,\
- system-name,vid-management,vlan-name',
- 'load_interval_delay': 'INTEGER_VALUE:30-300',
- 'load_interval_counter': 'INTEGER_VALUE:1-3',
- 'mac_accessgroup_name': 'TEXT:',
- 'mac_address': 'TEXT:',
- 'microburst_threshold': 'NO_VALIDATION:1-4294967295',
- 'mtu_value': 'INTEGER_VALUE:64-9216',
- 'service_instance': 'NO_VALIDATION:1-4294967295',
- 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,\
- input,output,type',
- 'service_policy_name': 'TEXT:',
- 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
- cost,disable,enable,guard,link-type,mst,port,port-priority,\
- vlan',
- 'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
- 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
- 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
- 192,224',
- 'portchannel_ipv6_neighbor_mac': 'TEXT:',
- 'portchannel_ipv6_neighbor_address': 'IPV6Address:',
- 'portchannel_ipv6_linklocal': 'IPV6Address:',
- 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
- 'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
- 'portchannel_ipv6_dhcp': 'IPV6Address:',
- 'portchannel_ipv6_address': 'IPV6Address:',
- 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
- link-local,nd,neighbor',
- 'interface_speed': 'TEXT_OPTIONS:1000,10000,100000,25000,40000,50000,auto',
- 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
- unicast',
- 'stormcontrol_level': 'FLOAT:',
- 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
- egress-only',
- 'vrrp_id': 'INTEGER_VALUE:1-255',
-}
-NE0152T = {
- 'vlan_id': 'INTEGER_VALUE:1-3999',
- 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
- 'vlan_name': 'TEXT:',
- 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
- 'vlan_state': 'TEXT_OPTIONS:active,suspend',
- 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
- 'vlan_querier': 'IPV4Address:',
- 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
- 'vlan_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
- 'vlan_report_suppression': 'INTEGER_VALUE:1-25',
- 'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
- 'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
- 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_snooping_version': 'INTEGER_VALUE:2-3',
- 'vlan_access_map_name': 'TEXT: ',
- 'vlan_ethernet_interface': 'TEXT:',
- 'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
- 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
- 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
- 'vlan_filter_name': 'TEXT:',
- 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
- 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
- 'vlag_instance': 'INTEGER_VALUE:1-64',
- 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
- 'vlag_priority': 'INTEGER_VALUE:0-65535',
- 'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
- 'vlag_tier_id': 'INTEGER_VALUE:1-512',
- 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
- keepalive-interval,peer-ip,retry-interval',
- 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
- 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
- 'vlag_retry_interval': 'INTEGER_VALUE:1-300',
- 'vlag_peerip': 'IPV4Address:',
- 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
- 'bgp_as_number': 'NO_VALIDATION:1-4294967295',
- 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
- 'cluster_id_as_ip': 'IPV4Address:',
- 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
- 'confederation_identifier': 'INTEGER_VALUE:1-65535',
- 'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
- 'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
- 'maxas_limit_as': 'INTEGER_VALUE:1-2000',
- 'neighbor_ipaddress': 'IPV4Address:',
- 'neighbor_as': 'NO_VALIDATION:1-4294967295',
- 'router_id': 'IPV4Address:',
- 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
- 'bgp_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
- 'addrfamily_routemap_name': 'TEXT:',
- 'reachability_half_life': 'INTEGER_VALUE:1-45',
- 'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
- 'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
- 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
- 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
- 'distance_external_AS': 'INTEGER_VALUE:1-255',
- 'distance_internal_AS': 'INTEGER_VALUE:1-255',
- 'distance_local_routes': 'INTEGER_VALUE:1-255',
- 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
- 'maxpath_numbers': 'INTEGER_VALUE:2-32',
- 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
- 'network_ip_prefix_value': 'IPV4Address:',
- 'network_ip_prefix_mask': 'IPV4Address:',
- 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
- static',
- 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
- 'bgp_neighbor_af_filtername': 'TEXT:',
- 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
- 'bgp_neighbor_af_prefixname': 'TEXT:',
- 'bgp_neighbor_af_routemap': 'TEXT:',
- 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
- 'bgp_neighbor_description': 'TEXT:',
- 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
- 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
- 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
- 'bgp_neighbor_password': 'TEXT:',
- 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
- 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
- vlan',
- 'bgp_neighbor_update_ethernet': 'TEXT:',
- 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
- 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
- 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
- 'ethernet_interface_value': 'INTEGER_VALUE:1-52',
- 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-52',
- 'ethernet_interface_string': 'TEXT:',
- 'loopback_interface_value': 'INTEGER_VALUE:0-7',
- 'mgmt_interface_value': 'INTEGER_VALUE:0-0',
- 'vlan_interface_value': 'INTEGER_VALUE:1-4094',
- 'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
- 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
- 'portchannel_interface_string': 'TEXT:',
- 'aggregation_group_no': 'INTEGER_VALUE:1-4096',
- 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
- 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
- ipv6,neighbor',
- 'bfd_interval': 'INTEGER_VALUE:50-999',
- 'bfd_minrx': 'INTEGER_VALUE:50-999',
- 'bfd_ multiplier': 'INTEGER_VALUE:3-50',
- 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,\
- interval',
- 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
- meticulous-keyed-md5,meticulous-keyed-sha1,simple',
- 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
- 'bfd_key_chain': 'TEXT:',
- 'bfd_key_id': 'INTEGER_VALUE:0-255',
- 'bfd_key_name': 'TEXT:',
- 'bfd_neighbor_ip': 'TEXT:',
- 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
- non-persistent',
- 'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
- 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,\
- trunk',
- 'trunk_options': 'TEXT_OPTIONS:allowed,native',
- 'trunk_vlanid': 'INTEGER_VALUE:1-3999',
- 'portCh_description': 'TEXT:',
- 'duplex_option': 'TEXT_OPTIONS:auto,full,half',
- 'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
- 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
- arp,dhcp,ospf,port,port-unreachable,redirects,router,\
- unreachables',
- 'accessgroup_name': 'TEXT:',
- 'portchannel_ipv4': 'IPV4Address:',
- 'portchannel_ipv4_mask': 'TEXT:',
- 'arp_ipaddress': 'IPV4Address:',
- 'arp_macaddress': 'TEXT:',
- 'arp_timeout_value': 'INTEGER_VALUE:60-28800',
- 'relay_ipaddress': 'IPV4Address:',
- 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
- authentication-key,bfd,cost,database-filter,dead-interval,\
- hello-interval,message-digest-key,mtu,mtu-ignore,network,\
- passive-interface,priority,retransmit-interval,shutdown,\
- transmit-delay',
- 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
- 'ospf_id_ipaddres_value': 'IPV4Address:',
- 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
- timeout',
- 'port_priority': 'INTEGER_VALUE:1-65535',
- 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
- trap-notification',
- 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
- mac-phy-status,management-address,max-frame-size,\
- port-description,port-protocol-vlan,port-vlan,power-mdi,\
- protocol-identity,system-capabilities,system-description,\
- system-name,vid-management,vlan-name',
- 'load_interval_delay': 'INTEGER_VALUE:30-300',
- 'load_interval_counter': 'INTEGER_VALUE:1-3',
- 'mac_accessgroup_name': 'TEXT:',
- 'mac_address': 'TEXT:',
- 'microburst_threshold': 'NO_VALIDATION:1-4294967295',
- 'mtu_value': 'INTEGER_VALUE:64-9216',
- 'service_instance': 'NO_VALIDATION:1-4294967295',
- 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,\
- input,output,type',
- 'service_policy_name': 'TEXT:',
- 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
- cost,disable,enable,guard,link-type,mst,port,port-priority,\
- vlan',
- 'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
- 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
- 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
- 192,224',
- 'portchannel_ipv6_neighbor_mac': 'TEXT:',
- 'portchannel_ipv6_neighbor_address': 'IPV6Address:',
- 'portchannel_ipv6_linklocal': 'IPV6Address:',
- 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
- 'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
- 'portchannel_ipv6_dhcp': 'IPV6Address:',
- 'portchannel_ipv6_address': 'IPV6Address:',
- 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
- link-local,nd,neighbor',
- 'interface_speed': 'TEXT_OPTIONS:10,100,1000,10000,auto',
- 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
- unicast',
- 'stormcontrol_level': 'FLOAT:',
- 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
- egress-only',
- 'vrrp_id': 'INTEGER_VALUE:1-255',
-}
-NE2572 = {
- 'vlan_id': 'INTEGER_VALUE:1-3999',
- 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
- 'vlan_name': 'TEXT:',
- 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
- 'vlan_state': 'TEXT_OPTIONS:active,suspend',
- 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
- 'vlan_querier': 'IPV4Address:',
- 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
- 'vlan_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
- 'vlan_report_suppression': 'INTEGER_VALUE:1-25',
- 'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
- 'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
- 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_snooping_version': 'INTEGER_VALUE:2-3',
- 'vlan_access_map_name': 'TEXT: ',
- 'vlan_ethernet_interface': 'TEXT:',
- 'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
- 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
- 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
- 'vlan_filter_name': 'TEXT:',
- 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
- 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
- 'vlag_instance': 'INTEGER_VALUE:1-64',
- 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
- 'vlag_priority': 'INTEGER_VALUE:0-65535',
- 'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
- 'vlag_tier_id': 'INTEGER_VALUE:1-512',
- 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
- keepalive-interval,peer-ip,retry-interval',
- 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
- 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
- 'vlag_retry_interval': 'INTEGER_VALUE:1-300',
- 'vlag_peerip': 'IPV4Address:',
- 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
- 'bgp_as_number': 'NO_VALIDATION:1-4294967295',
- 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
- 'cluster_id_as_ip': 'IPV4Address:',
- 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
- 'confederation_identifier': 'INTEGER_VALUE:1-65535',
- 'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
- 'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
- 'maxas_limit_as': 'INTEGER_VALUE:1-2000',
- 'neighbor_ipaddress': 'IPV4Address:',
- 'neighbor_as': 'NO_VALIDATION:1-4294967295',
- 'router_id': 'IPV4Address:',
- 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
- 'bgp_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
- 'addrfamily_routemap_name': 'TEXT:',
- 'reachability_half_life': 'INTEGER_VALUE:1-45',
- 'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
- 'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
- 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
- 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
- 'distance_external_AS': 'INTEGER_VALUE:1-255',
- 'distance_internal_AS': 'INTEGER_VALUE:1-255',
- 'distance_local_routes': 'INTEGER_VALUE:1-255',
- 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
- 'maxpath_numbers': 'INTEGER_VALUE:2-32',
- 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
- 'network_ip_prefix_value': 'IPV4Address:',
- 'network_ip_prefix_mask': 'IPV4Address:',
- 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
- static',
- 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
- 'bgp_neighbor_af_filtername': 'TEXT:',
- 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
- 'bgp_neighbor_af_prefixname': 'TEXT:',
- 'bgp_neighbor_af_routemap': 'TEXT:',
- 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
- 'bgp_neighbor_description': 'TEXT:',
- 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
- 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
- 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
- 'bgp_neighbor_password': 'TEXT:',
- 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
- 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
- vlan',
- 'bgp_neighbor_update_ethernet': 'TEXT:',
- 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
- 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
- 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
- 'ethernet_interface_value': 'INTEGER_VALUE:1-54',
- 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-54',
- 'ethernet_interface_string': 'TEXT:',
- 'loopback_interface_value': 'INTEGER_VALUE:0-7',
- 'mgmt_interface_value': 'INTEGER_VALUE:0-0',
- 'vlan_interface_value': 'INTEGER_VALUE:1-4094',
- 'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
- 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
- 'portchannel_interface_string': 'TEXT:',
- 'aggregation_group_no': 'INTEGER_VALUE:1-4096',
- 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
- 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
- ipv6,neighbor',
- 'bfd_interval': 'INTEGER_VALUE:50-999',
- 'bfd_minrx': 'INTEGER_VALUE:50-999',
- 'bfd_ multiplier': 'INTEGER_VALUE:3-50',
- 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
- 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
- meticulous-keyed-md5,meticulous-keyed-sha1,simple',
- 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
- 'bfd_key_chain': 'TEXT:',
- 'bfd_key_id': 'INTEGER_VALUE:0-255',
- 'bfd_key_name': 'TEXT:',
- 'bfd_neighbor_ip': 'TEXT:',
- 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
- non-persistent',
- 'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
- 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
- 'trunk_options': 'TEXT_OPTIONS:allowed,native',
- 'trunk_vlanid': 'INTEGER_VALUE:1-3999',
- 'portCh_description': 'TEXT:',
- 'duplex_option': 'TEXT_OPTIONS:auto,full,half',
- 'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
- 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
- arp,dhcp,ospf,port,port-unreachable,redirects,router,\
- unreachables',
- 'accessgroup_name': 'TEXT:',
- 'portchannel_ipv4': 'IPV4Address:',
- 'portchannel_ipv4_mask': 'TEXT:',
- 'arp_ipaddress': 'IPV4Address:',
- 'arp_macaddress': 'TEXT:',
- 'arp_timeout_value': 'INTEGER_VALUE:60-28800',
- 'relay_ipaddress': 'IPV4Address:',
- 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
- authentication-key,bfd,cost,database-filter,dead-interval,\
- hello-interval,message-digest-key,mtu,mtu-ignore,network,\
- passive-interface,priority,retransmit-interval,shutdown,\
- transmit-delay',
- 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
- 'ospf_id_ipaddres_value': 'IPV4Address:',
- 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
- timeout',
- 'port_priority': 'INTEGER_VALUE:1-65535',
- 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
- trap-notification',
- 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
- mac-phy-status,management-address,max-frame-size,\
- port-description,port-protocol-vlan,port-vlan,power-mdi,\
- protocol-identity,system-capabilities,system-description,\
- system-name,vid-management,vlan-name',
- 'load_interval_delay': 'INTEGER_VALUE:30-300',
- 'load_interval_counter': 'INTEGER_VALUE:1-3',
- 'mac_accessgroup_name': 'TEXT:',
- 'mac_address': 'TEXT:',
- 'microburst_threshold': 'NO_VALIDATION:1-4294967295',
- 'mtu_value': 'INTEGER_VALUE:64-9216',
- 'service_instance': 'NO_VALIDATION:1-4294967295',
- 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\
- output,type',
- 'service_policy_name': 'TEXT:',
- 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
- cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
- 'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
- 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
- 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
- 192,224',
- 'portchannel_ipv6_neighbor_mac': 'TEXT:',
- 'portchannel_ipv6_neighbor_address': 'IPV6Address:',
- 'portchannel_ipv6_linklocal': 'IPV6Address:',
- 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
- 'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
- 'portchannel_ipv6_dhcp': 'IPV6Address:',
- 'portchannel_ipv6_address': 'IPV6Address:',
- 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
- link-local,nd,neighbor',
- 'interface_speed': 'TEXT_OPTIONS:10000,100000,25000,40000,50000,auto',
- 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
- unicast',
- 'stormcontrol_level': 'FLOAT:',
- 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
- egress-only',
- 'vrrp_id': 'INTEGER_VALUE:1-255',
-}
-NE1032T = {
- 'vlan_id': 'INTEGER_VALUE:1-3999',
- 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
- 'vlan_name': 'TEXT:',
- 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
- 'vlan_state': 'TEXT_OPTIONS:active,suspend',
- 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
- 'vlan_querier': 'IPV4Address:',
- 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
- 'vlan_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
- 'vlan_report_suppression': 'INTEGER_VALUE:1-25',
- 'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
- 'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
- 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_snooping_version': 'INTEGER_VALUE:2-3',
- 'vlan_access_map_name': 'TEXT: ',
- 'vlan_ethernet_interface': 'TEXT:',
- 'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
- 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
- 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
- 'vlan_filter_name': 'TEXT:',
- 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
- 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
- 'vlag_instance': 'INTEGER_VALUE:1-64',
- 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
- 'vlag_priority': 'INTEGER_VALUE:0-65535',
- 'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
- 'vlag_tier_id': 'INTEGER_VALUE:1-512',
- 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
- keepalive-interval,peer-ip,retry-interval',
- 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
- 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
- 'vlag_retry_interval': 'INTEGER_VALUE:1-300',
- 'vlag_peerip': 'IPV4Address:',
- 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
- 'bgp_as_number': 'NO_VALIDATION:1-4294967295',
- 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
- 'cluster_id_as_ip': 'IPV4Address:',
- 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
- 'confederation_identifier': 'INTEGER_VALUE:1-65535',
- 'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
- 'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
- 'maxas_limit_as': 'INTEGER_VALUE:1-2000',
- 'neighbor_ipaddress': 'IPV4Address:',
- 'neighbor_as': 'NO_VALIDATION:1-4294967295',
- 'router_id': 'IPV4Address:',
- 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
- 'bgp_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
- 'addrfamily_routemap_name': 'TEXT:',
- 'reachability_half_life': 'INTEGER_VALUE:1-45',
- 'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
- 'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
- 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
- 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
- 'distance_external_AS': 'INTEGER_VALUE:1-255',
- 'distance_internal_AS': 'INTEGER_VALUE:1-255',
- 'distance_local_routes': 'INTEGER_VALUE:1-255',
- 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
- 'maxpath_numbers': 'INTEGER_VALUE:2-32',
- 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
- 'network_ip_prefix_value': 'IPV4Address:',
- 'network_ip_prefix_mask': 'IPV4Address:',
- 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
- static',
- 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
- 'bgp_neighbor_af_filtername': 'TEXT:',
- 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
- 'bgp_neighbor_af_prefixname': 'TEXT:',
- 'bgp_neighbor_af_routemap': 'TEXT:',
- 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
- 'bgp_neighbor_description': 'TEXT:',
- 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
- 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
- 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
- 'bgp_neighbor_password': 'TEXT:',
- 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
- 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
- vlan',
- 'bgp_neighbor_update_ethernet': 'TEXT:',
- 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
- 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
- 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
- 'ethernet_interface_value': 'INTEGER_VALUE:1-32',
- 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-32',
- 'ethernet_interface_string': 'TEXT:',
- 'loopback_interface_value': 'INTEGER_VALUE:0-7',
- 'mgmt_interface_value': 'INTEGER_VALUE:0-0',
- 'vlan_interface_value': 'INTEGER_VALUE:1-4094',
- 'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
- 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
- 'portchannel_interface_string': 'TEXT:',
- 'aggregation_group_no': 'INTEGER_VALUE:1-4096',
- 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
- 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
- ipv6,neighbor',
- 'bfd_interval': 'INTEGER_VALUE:50-999',
- 'bfd_minrx': 'INTEGER_VALUE:50-999',
- 'bfd_ multiplier': 'INTEGER_VALUE:3-50',
- 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
- 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
- meticulous-keyed-md5,meticulous-keyed-sha1,simple',
- 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
- 'bfd_key_chain': 'TEXT:',
- 'bfd_key_id': 'INTEGER_VALUE:0-255',
- 'bfd_key_name': 'TEXT:',
- 'bfd_neighbor_ip': 'TEXT:',
- 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
- non-persistent',
- 'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
- 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
- 'trunk_options': 'TEXT_OPTIONS:allowed,native',
- 'trunk_vlanid': 'INTEGER_VALUE:1-3999',
- 'portCh_description': 'TEXT:',
- 'duplex_option': 'TEXT_OPTIONS:auto,full,half',
- 'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
- 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
- arp,dhcp,ospf,port,port-unreachable,redirects,router,\
- unreachables',
- 'accessgroup_name': 'TEXT:',
- 'portchannel_ipv4': 'IPV4Address:',
- 'portchannel_ipv4_mask': 'TEXT:',
- 'arp_ipaddress': 'IPV4Address:',
- 'arp_macaddress': 'TEXT:',
- 'arp_timeout_value': 'INTEGER_VALUE:60-28800',
- 'relay_ipaddress': 'IPV4Address:',
- 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
- authentication-key,bfd,cost,database-filter,dead-interval,\
- hello-interval,message-digest-key,mtu,mtu-ignore,network,\
- passive-interface,priority,retransmit-interval,shutdown,\
- transmit-delay',
- 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
- 'ospf_id_ipaddres_value': 'IPV4Address:',
- 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
- timeout',
- 'port_priority': 'INTEGER_VALUE:1-65535',
- 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
- trap-notification',
- 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
- mac-phy-status,management-address,max-frame-size,\
- port-description,port-protocol-vlan,port-vlan,power-mdi,\
- protocol-identity,system-capabilities,system-description,\
- system-name,vid-management,vlan-name',
- 'load_interval_delay': 'INTEGER_VALUE:30-300',
- 'load_interval_counter': 'INTEGER_VALUE:1-3',
- 'mac_accessgroup_name': 'TEXT:',
- 'mac_address': 'TEXT:',
- 'microburst_threshold': 'NO_VALIDATION:1-4294967295',
- 'mtu_value': 'INTEGER_VALUE:64-9216',
- 'service_instance': 'NO_VALIDATION:1-4294967295',
- 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\
- output,type',
- 'service_policy_name': 'TEXT:',
- 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
- cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
- 'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
- 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
- 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
- 192,224',
- 'portchannel_ipv6_neighbor_mac': 'TEXT:',
- 'portchannel_ipv6_neighbor_address': 'IPV6Address:',
- 'portchannel_ipv6_linklocal': 'IPV6Address:',
- 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
- 'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
- 'portchannel_ipv6_dhcp': 'IPV6Address:',
- 'portchannel_ipv6_address': 'IPV6Address:',
- 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
- link-local,nd,neighbor',
- 'interface_speed': 'TEXT_OPTIONS:1000,10000,100000,25000,40000,50000,auto',
- 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
- unicast',
- 'stormcontrol_level': 'FLOAT:',
- 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
- egress-only',
- 'vrrp_id': 'INTEGER_VALUE:1-255',
-}
-NE1032 = {
- 'vlan_id': 'INTEGER_VALUE:1-3999',
- 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
- 'vlan_name': 'TEXT:',
- 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
- 'vlan_state': 'TEXT_OPTIONS:active,suspend',
- 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
- 'vlan_querier': 'IPV4Address:',
- 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
- 'vlan_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
- 'vlan_report_suppression': 'INTEGER_VALUE:1-25',
- 'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
- 'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
- 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_snooping_version': 'INTEGER_VALUE:2-3',
- 'vlan_access_map_name': 'TEXT: ',
- 'vlan_ethernet_interface': 'TEXT:',
- 'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
- 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
- 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
- 'vlan_filter_name': 'TEXT:',
- 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
- 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
- 'vlag_instance': 'INTEGER_VALUE:1-64',
- 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
- 'vlag_priority': 'INTEGER_VALUE:0-65535',
- 'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
- 'vlag_tier_id': 'INTEGER_VALUE:1-512',
- 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
- keepalive-interval,peer-ip,retry-interval',
- 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
- 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
- 'vlag_retry_interval': 'INTEGER_VALUE:1-300',
- 'vlag_peerip': 'IPV4Address:',
- 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
- 'bgp_as_number': 'NO_VALIDATION:1-4294967295',
- 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
- 'cluster_id_as_ip': 'IPV4Address:',
- 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
- 'confederation_identifier': 'INTEGER_VALUE:1-65535',
- 'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
- 'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
- 'maxas_limit_as': 'INTEGER_VALUE:1-2000',
- 'neighbor_ipaddress': 'IPV4Address:',
- 'neighbor_as': 'NO_VALIDATION:1-4294967295',
- 'router_id': 'IPV4Address:',
- 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
- 'bgp_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
- 'addrfamily_routemap_name': 'TEXT:',
- 'reachability_half_life': 'INTEGER_VALUE:1-45',
- 'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
- 'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
- 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
- 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
- 'distance_external_AS': 'INTEGER_VALUE:1-255',
- 'distance_internal_AS': 'INTEGER_VALUE:1-255',
- 'distance_local_routes': 'INTEGER_VALUE:1-255',
- 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
- 'maxpath_numbers': 'INTEGER_VALUE:2-32',
- 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
- 'network_ip_prefix_value': 'IPV4Address:',
- 'network_ip_prefix_mask': 'IPV4Address:',
- 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
- static',
- 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
- 'bgp_neighbor_af_filtername': 'TEXT:',
- 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
- 'bgp_neighbor_af_prefixname': 'TEXT:',
- 'bgp_neighbor_af_routemap': 'TEXT:',
- 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
- 'bgp_neighbor_description': 'TEXT:',
- 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
- 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
- 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
- 'bgp_neighbor_password': 'TEXT:',
- 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
- 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
- vlan',
- 'bgp_neighbor_update_ethernet': 'TEXT:',
- 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
- 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
- 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
- 'ethernet_interface_value': 'INTEGER_VALUE:1-32',
- 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-32',
- 'ethernet_interface_string': 'TEXT:',
- 'loopback_interface_value': 'INTEGER_VALUE:0-7',
- 'mgmt_interface_value': 'INTEGER_VALUE:0-0',
- 'vlan_interface_value': 'INTEGER_VALUE:1-4094',
- 'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
- 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
- 'portchannel_interface_string': 'TEXT:',
- 'aggregation_group_no': 'INTEGER_VALUE:1-4096',
- 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
- 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
- ipv6,neighbor',
- 'bfd_interval': 'INTEGER_VALUE:50-999',
- 'bfd_minrx': 'INTEGER_VALUE:50-999',
- 'bfd_ multiplier': 'INTEGER_VALUE:3-50',
- 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
- 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
- meticulous-keyed-md5,meticulous-keyed-sha1,simple',
- 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
- 'bfd_key_chain': 'TEXT:',
- 'bfd_key_id': 'INTEGER_VALUE:0-255',
- 'bfd_key_name': 'TEXT:',
- 'bfd_neighbor_ip': 'TEXT:',
- 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
- non-persistent',
- 'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
- 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
- 'trunk_options': 'TEXT_OPTIONS:allowed,native',
- 'trunk_vlanid': 'INTEGER_VALUE:1-3999',
- 'portCh_description': 'TEXT:',
- 'duplex_option': 'TEXT_OPTIONS:auto,full,half',
- 'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
- 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
- arp,dhcp,ospf,port,port-unreachable,redirects,router,\
- unreachables',
- 'accessgroup_name': 'TEXT:',
- 'portchannel_ipv4': 'IPV4Address:',
- 'portchannel_ipv4_mask': 'TEXT:',
- 'arp_ipaddress': 'IPV4Address:',
- 'arp_macaddress': 'TEXT:',
- 'arp_timeout_value': 'INTEGER_VALUE:60-28800',
- 'relay_ipaddress': 'IPV4Address:',
- 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
- authentication-key,bfd,cost,database-filter,dead-interval,\
- hello-interval,message-digest-key,mtu,mtu-ignore,network,\
- passive-interface,priority,retransmit-interval,shutdown,\
- transmit-delay',
- 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
- 'ospf_id_ipaddres_value': 'IPV4Address:',
- 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
- timeout',
- 'port_priority': 'INTEGER_VALUE:1-65535',
- 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
- trap-notification',
- 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
- mac-phy-status,management-address,max-frame-size,\
- port-description,port-protocol-vlan,port-vlan,power-mdi,\
- protocol-identity,system-capabilities,system-description,\
- system-name,vid-management,vlan-name',
- 'load_interval_delay': 'INTEGER_VALUE:30-300',
- 'load_interval_counter': 'INTEGER_VALUE:1-3',
- 'mac_accessgroup_name': 'TEXT:',
- 'mac_address': 'TEXT:',
- 'microburst_threshold': 'NO_VALIDATION:1-4294967295',
- 'mtu_value': 'INTEGER_VALUE:64-9216',
- 'service_instance': 'NO_VALIDATION:1-4294967295',
- 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\
- output,type',
- 'service_policy_name': 'TEXT:',
- 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
- cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
- 'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
- 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
- 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
- 192,224',
- 'portchannel_ipv6_neighbor_mac': 'TEXT:',
- 'portchannel_ipv6_neighbor_address': 'IPV6Address:',
- 'portchannel_ipv6_linklocal': 'IPV6Address:',
- 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
- 'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
- 'portchannel_ipv6_dhcp': 'IPV6Address:',
- 'portchannel_ipv6_address': 'IPV6Address:',
- 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
- link-local,nd,neighbor',
- 'interface_speed': 'TEXT_OPTIONS:1000,10000,100000,25000,40000,50000,auto',
- 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
- unicast',
- 'stormcontrol_level': 'FLOAT:',
- 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
- egress-only',
- 'vrrp_id': 'INTEGER_VALUE:1-255',
-}
-NE1072T = {
- 'vlan_id': 'INTEGER_VALUE:1-3999',
- 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
- 'vlan_name': 'TEXT:',
- 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
- 'vlan_state': 'TEXT_OPTIONS:active,suspend',
- 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
- 'vlan_querier': 'IPV4Address:',
- 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
- 'vlan_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
- 'vlan_report_suppression': 'INTEGER_VALUE:1-25',
- 'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
- 'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
- 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_snooping_version': 'INTEGER_VALUE:2-3',
- 'vlan_access_map_name': 'TEXT: ',
- 'vlan_ethernet_interface': 'TEXT:',
- 'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
- 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
- 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
- 'vlan_filter_name': 'TEXT:',
- 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
- 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
- 'vlag_instance': 'INTEGER_VALUE:1-64',
- 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
- 'vlag_priority': 'INTEGER_VALUE:0-65535',
- 'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
- 'vlag_tier_id': 'INTEGER_VALUE:1-512',
- 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
- keepalive-interval,peer-ip,retry-interval',
- 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
- 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
- 'vlag_retry_interval': 'INTEGER_VALUE:1-300',
- 'vlag_peerip': 'IPV4Address:',
- 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
- 'bgp_as_number': 'NO_VALIDATION:1-4294967295',
- 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
- 'cluster_id_as_ip': 'IPV4Address:',
- 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
- 'confederation_identifier': 'INTEGER_VALUE:1-65535',
- 'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
- 'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
- 'maxas_limit_as': 'INTEGER_VALUE:1-2000',
- 'neighbor_ipaddress': 'IPV4Address:',
- 'neighbor_as': 'NO_VALIDATION:1-4294967295',
- 'router_id': 'IPV4Address:',
- 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
- 'bgp_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
- 'addrfamily_routemap_name': 'TEXT:',
- 'reachability_half_life': 'INTEGER_VALUE:1-45',
- 'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
- 'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
- 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
- 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
- 'distance_external_AS': 'INTEGER_VALUE:1-255',
- 'distance_internal_AS': 'INTEGER_VALUE:1-255',
- 'distance_local_routes': 'INTEGER_VALUE:1-255',
- 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
- 'maxpath_numbers': 'INTEGER_VALUE:2-32',
- 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
- 'network_ip_prefix_value': 'IPV4Address:',
- 'network_ip_prefix_mask': 'IPV4Address:',
- 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
- static',
- 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
- 'bgp_neighbor_af_filtername': 'TEXT:',
- 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
- 'bgp_neighbor_af_prefixname': 'TEXT:',
- 'bgp_neighbor_af_routemap': 'TEXT:',
- 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
- 'bgp_neighbor_description': 'TEXT:',
- 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
- 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
- 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
- 'bgp_neighbor_password': 'TEXT:',
- 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
- 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
- vlan',
- 'bgp_neighbor_update_ethernet': 'TEXT:',
- 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
- 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
- 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
- 'ethernet_interface_value': 'INTEGER_VALUE:1-54',
- 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-54',
- 'ethernet_interface_string': 'TEXT:',
- 'loopback_interface_value': 'INTEGER_VALUE:0-7',
- 'mgmt_interface_value': 'INTEGER_VALUE:0-0',
- 'vlan_interface_value': 'INTEGER_VALUE:1-4094',
- 'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
- 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
- 'portchannel_interface_string': 'TEXT:',
- 'aggregation_group_no': 'INTEGER_VALUE:1-4096',
- 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
- 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
- ipv6,neighbor',
- 'bfd_interval': 'INTEGER_VALUE:50-999',
- 'bfd_minrx': 'INTEGER_VALUE:50-999',
- 'bfd_ multiplier': 'INTEGER_VALUE:3-50',
- 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
- 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
- meticulous-keyed-md5,meticulous-keyed-sha1,simple',
- 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
- 'bfd_key_chain': 'TEXT:',
- 'bfd_key_id': 'INTEGER_VALUE:0-255',
- 'bfd_key_name': 'TEXT:',
- 'bfd_neighbor_ip': 'TEXT:',
- 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
- non-persistent',
- 'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
- 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
- 'trunk_options': 'TEXT_OPTIONS:allowed,native',
- 'trunk_vlanid': 'INTEGER_VALUE:1-3999',
- 'portCh_description': 'TEXT:',
- 'duplex_option': 'TEXT_OPTIONS:auto,full,half',
- 'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
- 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
- arp,dhcp,ospf,port,port-unreachable,redirects,router,\
- unreachables',
- 'accessgroup_name': 'TEXT:',
- 'portchannel_ipv4': 'IPV4Address:',
- 'portchannel_ipv4_mask': 'TEXT:',
- 'arp_ipaddress': 'IPV4Address:',
- 'arp_macaddress': 'TEXT:',
- 'arp_timeout_value': 'INTEGER_VALUE:60-28800',
- 'relay_ipaddress': 'IPV4Address:',
- 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
- authentication-key,bfd,cost,database-filter,dead-interval,\
- hello-interval,message-digest-key,mtu,mtu-ignore,network,\
- passive-interface,priority,retransmit-interval,shutdown,\
- transmit-delay',
- 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
- 'ospf_id_ipaddres_value': 'IPV4Address:',
- 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
- timeout',
- 'port_priority': 'INTEGER_VALUE:1-65535',
- 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
- trap-notification',
- 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
- mac-phy-status,management-address,max-frame-size,\
- port-description,port-protocol-vlan,port-vlan,power-mdi,\
- protocol-identity,system-capabilities,system-description,\
- system-name,vid-management,vlan-name',
- 'load_interval_delay': 'INTEGER_VALUE:30-300',
- 'load_interval_counter': 'INTEGER_VALUE:1-3',
- 'mac_accessgroup_name': 'TEXT:',
- 'mac_address': 'TEXT:',
- 'microburst_threshold': 'NO_VALIDATION:1-4294967295',
- 'mtu_value': 'INTEGER_VALUE:64-9216',
- 'service_instance': 'NO_VALIDATION:1-4294967295',
- 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\
- output,type',
- 'service_policy_name': 'TEXT:',
- 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
- cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
- 'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
- 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
- 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
- 192,224',
- 'portchannel_ipv6_neighbor_mac': 'TEXT:',
- 'portchannel_ipv6_neighbor_address': 'IPV6Address:',
- 'portchannel_ipv6_linklocal': 'IPV6Address:',
- 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
- 'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
- 'portchannel_ipv6_dhcp': 'IPV6Address:',
- 'portchannel_ipv6_address': 'IPV6Address:',
- 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
- link-local,nd,neighbor',
- 'interface_speed': 'TEXT_OPTIONS:1000,10000,100000,25000,40000,50000,auto',
- 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
- unicast',
- 'stormcontrol_level': 'FLOAT:',
- 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
- egress-only',
- 'vrrp_id': 'INTEGER_VALUE:1-255',
-}
-NE10032 = {
- 'vlan_id': 'INTEGER_VALUE:1-3999',
- 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
- 'vlan_name': 'TEXT:',
- 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
- 'vlan_state': 'TEXT_OPTIONS:active,suspend',
- 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
- 'vlan_querier': 'IPV4Address:',
- 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
- 'vlan_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
- 'vlan_report_suppression': 'INTEGER_VALUE:1-25',
- 'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
- 'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
- 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_snooping_version': 'INTEGER_VALUE:2-3',
- 'vlan_access_map_name': 'TEXT: ',
- 'vlan_ethernet_interface': 'TEXT:',
- 'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
- 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
- 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
- 'vlan_filter_name': 'TEXT:',
- 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
- 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
- 'vlag_instance': 'INTEGER_VALUE:1-64',
- 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
- 'vlag_priority': 'INTEGER_VALUE:0-65535',
- 'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
- 'vlag_tier_id': 'INTEGER_VALUE:1-512',
- 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
- keepalive-interval,peer-ip,retry-interval',
- 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
- 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
- 'vlag_retry_interval': 'INTEGER_VALUE:1-300',
- 'vlag_peerip': 'IPV4Address:',
- 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
- 'bgp_as_number': 'NO_VALIDATION:1-4294967295',
- 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
- 'cluster_id_as_ip': 'IPV4Address:',
- 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
- 'confederation_identifier': 'INTEGER_VALUE:1-65535',
- 'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
- 'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
- 'maxas_limit_as': 'INTEGER_VALUE:1-2000',
- 'neighbor_ipaddress': 'IPV4Address:',
- 'neighbor_as': 'NO_VALIDATION:1-4294967295',
- 'router_id': 'IPV4Address:',
- 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
- 'bgp_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
- 'addrfamily_routemap_name': 'TEXT:',
- 'reachability_half_life': 'INTEGER_VALUE:1-45',
- 'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
- 'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
- 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
- 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
- 'distance_external_AS': 'INTEGER_VALUE:1-255',
- 'distance_internal_AS': 'INTEGER_VALUE:1-255',
- 'distance_local_routes': 'INTEGER_VALUE:1-255',
- 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
- 'maxpath_numbers': 'INTEGER_VALUE:2-32',
- 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
- 'network_ip_prefix_value': 'IPV4Address:',
- 'network_ip_prefix_mask': 'IPV4Address:',
- 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
- static',
- 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
- 'bgp_neighbor_af_filtername': 'TEXT:',
- 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
- 'bgp_neighbor_af_prefixname': 'TEXT:',
- 'bgp_neighbor_af_routemap': 'TEXT:',
- 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
- 'bgp_neighbor_description': 'TEXT:',
- 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
- 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
- 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
- 'bgp_neighbor_password': 'TEXT:',
- 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
- 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
- vlan',
- 'bgp_neighbor_update_ethernet': 'TEXT:',
- 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
- 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
- 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
- 'ethernet_interface_value': 'INTEGER_VALUE:1-32',
- 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-32',
- 'ethernet_interface_string': 'TEXT:',
- 'loopback_interface_value': 'INTEGER_VALUE:0-7',
- 'mgmt_interface_value': 'INTEGER_VALUE:0-0',
- 'vlan_interface_value': 'INTEGER_VALUE:1-4094',
- 'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
- 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
- 'portchannel_interface_string': 'TEXT:',
- 'aggregation_group_no': 'INTEGER_VALUE:1-4096',
- 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
- 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
- ipv6,neighbor',
- 'bfd_interval': 'INTEGER_VALUE:50-999',
- 'bfd_minrx': 'INTEGER_VALUE:50-999',
- 'bfd_ multiplier': 'INTEGER_VALUE:3-50',
- 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
- 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
- meticulous-keyed-md5,meticulous-keyed-sha1,simple',
- 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
- 'bfd_key_chain': 'TEXT:',
- 'bfd_key_id': 'INTEGER_VALUE:0-255',
- 'bfd_key_name': 'TEXT:',
- 'bfd_neighbor_ip': 'TEXT:',
- 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
- non-persistent',
- 'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
- 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
- 'trunk_options': 'TEXT_OPTIONS:allowed,native',
- 'trunk_vlanid': 'INTEGER_VALUE:1-3999',
- 'portCh_description': 'TEXT:',
- 'duplex_option': 'TEXT_OPTIONS:auto,full,half',
- 'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
- 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
- arp,dhcp,ospf,port,port-unreachable,redirects,router,\
- unreachables',
- 'accessgroup_name': 'TEXT:',
- 'portchannel_ipv4': 'IPV4Address:',
- 'portchannel_ipv4_mask': 'TEXT:',
- 'arp_ipaddress': 'IPV4Address:',
- 'arp_macaddress': 'TEXT:',
- 'arp_timeout_value': 'INTEGER_VALUE:60-28800',
- 'relay_ipaddress': 'IPV4Address:',
- 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
- authentication-key,bfd,cost,database-filter,dead-interval,\
- hello-interval,message-digest-key,mtu,mtu-ignore,network,\
- passive-interface,priority,retransmit-interval,shutdown,\
- transmit-delay',
- 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
- 'ospf_id_ipaddres_value': 'IPV4Address:',
- 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
- timeout',
- 'port_priority': 'INTEGER_VALUE:1-65535',
- 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
- trap-notification',
- 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
- mac-phy-status,management-address,max-frame-size,\
- port-description,port-protocol-vlan,port-vlan,power-mdi,\
- protocol-identity,system-capabilities,system-description,\
- system-name,vid-management,vlan-name',
- 'load_interval_delay': 'INTEGER_VALUE:30-300',
- 'load_interval_counter': 'INTEGER_VALUE:1-3',
- 'mac_accessgroup_name': 'TEXT:',
- 'mac_address': 'TEXT:',
- 'microburst_threshold': 'NO_VALIDATION:1-4294967295',
- 'mtu_value': 'INTEGER_VALUE:64-9216',
- 'service_instance': 'NO_VALIDATION:1-4294967295',
- 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\
- output,type',
- 'service_policy_name': 'TEXT:',
- 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
- cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
- 'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
- 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
- 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
- 192,224',
- 'portchannel_ipv6_neighbor_mac': 'TEXT:',
- 'portchannel_ipv6_neighbor_address': 'IPV6Address:',
- 'portchannel_ipv6_linklocal': 'IPV6Address:',
- 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
- 'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
- 'portchannel_ipv6_dhcp': 'IPV6Address:',
- 'portchannel_ipv6_address': 'IPV6Address:',
- 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
- link-local,nd,neighbor',
- 'interface_speed': 'TEXT_OPTIONS:10000,100000,25000,40000,50000,auto',
- 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
- unicast',
- 'stormcontrol_level': 'FLOAT:',
- 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
- egress-only',
- 'vrrp_id': 'INTEGER_VALUE:1-255',
-}
-g8272_cnos = {'vlan_id': 'INTEGER_VALUE:1-3999',
- 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
- 'vlan_name': 'TEXT:',
- 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
- 'vlan_state': 'TEXT_OPTIONS:active,suspend',
- 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
- 'vlan_querier': 'IPV4Address:',
- 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
- 'vlan_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
- 'vlan_report_suppression': 'INTEGER_VALUE:1-25',
- 'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
- 'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
- 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_snooping_version': 'INTEGER_VALUE:2-3',
- 'vlan_access_map_name': 'TEXT: ',
- 'vlan_ethernet_interface': 'TEXT:',
- 'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
- 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
- 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
- 'vlan_filter_name': 'TEXT:',
- 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
- 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
- 'vlag_instance': 'INTEGER_VALUE:1-64',
- 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
- 'vlag_priority': 'INTEGER_VALUE:0-65535',
- 'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
- 'vlag_tier_id': 'INTEGER_VALUE:1-512',
- 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
- keepalive-interval,peer-ip,retry-interval',
- 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
- 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
- 'vlag_retry_interval': 'INTEGER_VALUE:1-300',
- 'vlag_peerip': 'IPV4Address:',
- 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
- 'bgp_as_number': 'NO_VALIDATION:1-4294967295',
- 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
- 'cluster_id_as_ip': 'IPV4Address:',
- 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
- 'confederation_identifier': 'INTEGER_VALUE:1-65535',
- 'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
- 'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
- 'maxas_limit_as': 'INTEGER_VALUE:1-2000',
- 'neighbor_ipaddress': 'IPV4Address:',
- 'neighbor_as': 'NO_VALIDATION:1-4294967295',
- 'router_id': 'IPV4Address:',
- 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
- 'bgp_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
- 'addrfamily_routemap_name': 'TEXT:',
- 'reachability_half_life': 'INTEGER_VALUE:1-45',
- 'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
- 'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
- 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
- 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
- 'distance_external_AS': 'INTEGER_VALUE:1-255',
- 'distance_internal_AS': 'INTEGER_VALUE:1-255',
- 'distance_local_routes': 'INTEGER_VALUE:1-255',
- 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
- 'maxpath_numbers': 'INTEGER_VALUE:2-32',
- 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
- 'network_ip_prefix_value': 'IPV4Address:',
- 'network_ip_prefix_mask': 'IPV4Address:',
- 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
- static',
- 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
- 'bgp_neighbor_af_filtername': 'TEXT:',
- 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
- 'bgp_neighbor_af_prefixname': 'TEXT:',
- 'bgp_neighbor_af_routemap': 'TEXT:',
- 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
- 'bgp_neighbor_description': 'TEXT:',
- 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
- 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
- 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
- 'bgp_neighbor_password': 'TEXT:',
- 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
- 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
- vlan',
- 'bgp_neighbor_update_ethernet': 'TEXT:',
- 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
- 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
- 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
- 'ethernet_interface_value': 'INTEGER_VALUE:1-54',
- 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-54',
- 'ethernet_interface_string': 'TEXT:',
- 'loopback_interface_value': 'INTEGER_VALUE:0-7',
- 'mgmt_interface_value': 'INTEGER_VALUE:0-0',
- 'vlan_interface_value': 'INTEGER_VALUE:1-4094',
- 'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
- 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
- 'portchannel_interface_string': 'TEXT:',
- 'aggregation_group_no': 'INTEGER_VALUE:1-4096',
- 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
- 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
- ipv6,neighbor',
- 'bfd_interval': 'INTEGER_VALUE:50-999',
- 'bfd_minrx': 'INTEGER_VALUE:50-999',
- 'bfd_ multiplier': 'INTEGER_VALUE:3-50',
- 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
- 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
- meticulous-keyed-md5,meticulous-keyed-sha1,simple',
- 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
- 'bfd_key_chain': 'TEXT:',
- 'bfd_key_id': 'INTEGER_VALUE:0-255',
- 'bfd_key_name': 'TEXT:',
- 'bfd_neighbor_ip': 'TEXT:',
- 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
- non-persistent',
- 'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
- 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
- 'trunk_options': 'TEXT_OPTIONS:allowed,native',
- 'trunk_vlanid': 'INTEGER_VALUE:1-3999',
- 'portCh_description': 'TEXT:',
- 'duplex_option': 'TEXT_OPTIONS:auto,full,half',
- 'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
- 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
- arp,dhcp,ospf,port,port-unreachable,redirects,router,\
- unreachables',
- 'accessgroup_name': 'TEXT:',
- 'portchannel_ipv4': 'IPV4Address:',
- 'portchannel_ipv4_mask': 'TEXT:',
- 'arp_ipaddress': 'IPV4Address:',
- 'arp_macaddress': 'TEXT:',
- 'arp_timeout_value': 'INTEGER_VALUE:60-28800',
- 'relay_ipaddress': 'IPV4Address:',
- 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
- authentication-key,bfd,cost,database-filter,dead-interval,\
- hello-interval,message-digest-key,mtu,mtu-ignore,network,\
- passive-interface,priority,retransmit-interval,shutdown,\
- transmit-delay',
- 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
- 'ospf_id_ipaddres_value': 'IPV4Address:',
- 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
- timeout',
- 'port_priority': 'INTEGER_VALUE:1-65535',
- 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
- trap-notification',
- 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
- mac-phy-status,management-address,max-frame-size,\
- port-description,port-protocol-vlan,port-vlan,power-mdi,\
- protocol-identity,system-capabilities,system-description,\
- system-name,vid-management,vlan-name',
- 'load_interval_delay': 'INTEGER_VALUE:30-300',
- 'load_interval_counter': 'INTEGER_VALUE:1-3',
- 'mac_accessgroup_name': 'TEXT:',
- 'mac_address': 'TEXT:',
- 'microburst_threshold': 'NO_VALIDATION:1-4294967295',
- 'mtu_value': 'INTEGER_VALUE:64-9216',
- 'service_instance': 'NO_VALIDATION:1-4294967295',
- 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,input,\
- output,type',
- 'service_policy_name': 'TEXT:',
- 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
- cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
- 'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
- 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
- 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
- 192,224',
- 'portchannel_ipv6_neighbor_mac': 'TEXT:',
- 'portchannel_ipv6_neighbor_address': 'IPV6Address:',
- 'portchannel_ipv6_linklocal': 'IPV6Address:',
- 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
- 'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
- 'portchannel_ipv6_dhcp': 'IPV6Address:',
- 'portchannel_ipv6_address': 'IPV6Address:',
- 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
- link-local,nd,neighbor',
- 'interface_speed': 'TEXT_OPTIONS:1000,10000,40000',
- 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
- unicast',
- 'stormcontrol_level': 'FLOAT:',
- 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
- egress-only',
- 'vrrp_id': 'INTEGER_VALUE:1-255',
- }
-g8296_cnos = {'vlan_id': 'INTEGER_VALUE:1-3999',
- 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
- 'vlan_name': 'TEXT:',
- 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
- 'vlan_state': 'TEXT_OPTIONS:active,suspend',
- 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
- 'vlan_querier': 'IPV4Address:',
- 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
- 'vlan_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
- 'vlan_report_suppression': 'INTEGER_VALUE:1-25',
- 'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
- 'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
- 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_snooping_version': 'INTEGER_VALUE:2-3',
- 'vlan_access_map_name': 'TEXT: ',
- 'vlan_ethernet_interface': 'TEXT:',
- 'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
- 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
- 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
- 'vlan_filter_name': 'TEXT:',
- 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
- 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
- 'vlag_instance': 'INTEGER_VALUE:1-128',
- 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
- 'vlag_priority': 'INTEGER_VALUE:0-65535',
- 'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
- 'vlag_tier_id': 'INTEGER_VALUE:1-512',
- 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
- keepalive-interval,peer-ip,retry-interval',
- 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
- 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
- 'vlag_retry_interval': 'INTEGER_VALUE:1-300',
- 'vlag_peerip': 'IPV4Address:',
- 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
- 'bgp_as_number': 'NO_VALIDATION:1-4294967295',
- 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
- 'cluster_id_as_ip': 'IPV4Address:',
- 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
- 'confederation_identifier': 'INTEGER_VALUE:1-65535',
- 'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
- 'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
- 'maxas_limit_as': 'INTEGER_VALUE:1-2000',
- 'neighbor_ipaddress': 'IPV4Address:',
- 'neighbor_as': 'NO_VALIDATION:1-4294967295',
- 'router_id': 'IPV4Address:',
- 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
- 'bgp_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
- 'addrfamily_routemap_name': 'TEXT:',
- 'reachability_half_life': 'INTEGER_VALUE:1-45',
- 'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
- 'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
- 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
- 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
- 'distance_external_AS': 'INTEGER_VALUE:1-255',
- 'distance_internal_AS': 'INTEGER_VALUE:1-255',
- 'distance_local_routes': 'INTEGER_VALUE:1-255',
- 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
- 'maxpath_numbers': 'INTEGER_VALUE:2-32',
- 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
- 'network_ip_prefix_value': 'IPV4Address:',
- 'network_ip_prefix_mask': 'IPV4Address:',
- 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
- static',
- 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
- 'bgp_neighbor_af_filtername': 'TEXT:',
- 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
- 'bgp_neighbor_af_prefixname': 'TEXT:',
- 'bgp_neighbor_af_routemap': 'TEXT:',
- 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
- 'bgp_neighbor_description': 'TEXT:',
- 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
- 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
- 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
- 'bgp_neighbor_password': 'TEXT:',
- 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
- 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
- vlan',
- 'bgp_neighbor_update_ethernet': 'TEXT:',
- 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
- 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
- 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
- 'ethernet_interface_value': 'INTEGER_VALUE:1-96',
- 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-96',
- 'ethernet_interface_string': 'TEXT:',
- 'loopback_interface_value': 'INTEGER_VALUE:0-7',
- 'mgmt_interface_value': 'INTEGER_VALUE:0-0',
- 'vlan_interface_value': 'INTEGER_VALUE:1-4094',
- 'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
- 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
- 'portchannel_interface_string': 'TEXT:',
- 'aggregation_group_no': 'INTEGER_VALUE:1-4096',
- 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
- 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
- ipv6,neighbor',
- 'bfd_interval': 'INTEGER_VALUE:50-999',
- 'bfd_minrx': 'INTEGER_VALUE:50-999',
- 'bfd_ multiplier': 'INTEGER_VALUE:3-50',
- 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
- 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
- meticulous-keyed-md5,meticulous-keyed-sha1,simple',
- 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
- 'bfd_key_chain': 'TEXT:',
- 'bfd_key_id': 'INTEGER_VALUE:0-255',
- 'bfd_key_name': 'TEXT:',
- 'bfd_neighbor_ip': 'TEXT:',
- 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
- non-persistent',
- 'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
- 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
- 'trunk_options': 'TEXT_OPTIONS:allowed,native',
- 'trunk_vlanid': 'INTEGER_VALUE:1-3999',
- 'portCh_description': 'TEXT:',
- 'duplex_option': 'TEXT_OPTIONS:auto,full,half',
- 'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
- 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,\
- arp,dhcp,ospf,port,port-unreachable,redirects,router,\
- unreachables',
- 'accessgroup_name': 'TEXT:',
- 'portchannel_ipv4': 'IPV4Address:',
- 'portchannel_ipv4_mask': 'TEXT:',
- 'arp_ipaddress': 'IPV4Address:',
- 'arp_macaddress': 'TEXT:',
- 'arp_timeout_value': 'INTEGER_VALUE:60-28800',
- 'relay_ipaddress': 'IPV4Address:',
- 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
- authentication-key,bfd,cost,database-filter,dead-interval,\
- hello-interval,message-digest-key,mtu,mtu-ignore,network,\
- passive-interface,priority,retransmit-interval,shutdown,\
- transmit-delay',
- 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
- 'ospf_id_ipaddres_value': 'IPV4Address:',
- 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
- timeout',
- 'port_priority': 'INTEGER_VALUE:1-65535',
- 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
- trap-notification',
- 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
- mac-phy-status,management-address,max-frame-size,\
- port-description,port-protocol-vlan,port-vlan,power-mdi,\
- protocol-identity,system-capabilities,system-description,\
- system-name,vid-management,vlan-name',
- 'load_interval_delay': 'INTEGER_VALUE:30-300',
- 'load_interval_counter': 'INTEGER_VALUE:1-3',
- 'mac_accessgroup_name': 'TEXT:',
- 'mac_address': 'TEXT:',
- 'microburst_threshold': 'NO_VALIDATION:1-4294967295',
- 'mtu_value': 'INTEGER_VALUE:64-9216',
- 'service_instance': 'NO_VALIDATION:1-4294967295',
- 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,\
- input,output,type',
- 'service_policy_name': 'TEXT:',
- 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
- cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
- 'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
- 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
- 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
- 192,224',
- 'portchannel_ipv6_neighbor_mac': 'TEXT:',
- 'portchannel_ipv6_neighbor_address': 'IPV6Address:',
- 'portchannel_ipv6_linklocal': 'IPV6Address:',
- 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
- 'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
- 'portchannel_ipv6_dhcp': 'IPV6Address:',
- 'portchannel_ipv6_address': 'IPV6Address:',
- 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
- link-local,nd,neighbor',
- 'interface_speed': 'TEXT_OPTIONS:1000,10000,40000,auto',
- 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
- unicast',
- 'stormcontrol_level': 'FLOAT:',
- 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
- egress-only',
- 'vrrp_id': 'INTEGER_VALUE:1-255',
- }
-g8332_cnos = {'vlan_id': 'INTEGER_VALUE:1-3999',
- 'vlan_id_range': 'INTEGER_VALUE_RANGE:1-3999',
- 'vlan_name': 'TEXT:',
- 'vlan_flood': 'TEXT_OPTIONS:ipv4,ipv6',
- 'vlan_state': 'TEXT_OPTIONS:active,suspend',
- 'vlan_last_member_query_interval': 'INTEGER_VALUE:1-25',
- 'vlan_querier': 'IPV4Address:',
- 'vlan_querier_timeout': 'INTEGER_VALUE:1-65535',
- 'vlan_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_query_max_response_time': 'INTEGER_VALUE:1-25',
- 'vlan_report_suppression': 'INTEGER_VALUE:1-25',
- 'vlan_robustness_variable': 'INTEGER_VALUE:1-7',
- 'vlan_startup_query_count': 'INTEGER_VALUE:1-10',
- 'vlan_startup_query_interval': 'INTEGER_VALUE:1-18000',
- 'vlan_snooping_version': 'INTEGER_VALUE:2-3',
- 'vlan_access_map_name': 'TEXT: ',
- 'vlan_ethernet_interface': 'TEXT:',
- 'vlan_portagg_number': 'INTEGER_VALUE:1-4096',
- 'vlan_accessmap_action': 'TEXT_OPTIONS:drop,forward,redirect',
- 'vlan_dot1q_tag': 'MATCH_TEXT_OR_EMPTY:egress-only',
- 'vlan_filter_name': 'TEXT:',
- 'vlag_auto_recovery': 'INTEGER_VALUE:240-3600',
- 'vlag_config_consistency': 'TEXT_OPTIONS:disable,strict',
- 'vlag_instance': 'INTEGER_VALUE:1-128',
- 'vlag_port_aggregation': 'INTEGER_VALUE:1-4096',
- 'vlag_priority': 'INTEGER_VALUE:0-65535',
- 'vlag_startup_delay': 'INTEGER_VALUE:0-3600',
- 'vlag_tier_id': 'INTEGER_VALUE:1-512',
- 'vlag_hlthchk_options': 'TEXT_OPTIONS:keepalive-attempts,\
- keepalive-interval,peer-ip,retry-interval',
- 'vlag_keepalive_attempts': 'INTEGER_VALUE:1-24',
- 'vlag_keepalive_interval': 'INTEGER_VALUE:2-300',
- 'vlag_retry_interval': 'INTEGER_VALUE:1-300',
- 'vlag_peerip': 'IPV4Address:',
- 'vlag_peerip_vrf': 'TEXT_OPTIONS:default,management',
- 'bgp_as_number': 'NO_VALIDATION:1-4294967295',
- 'bgp_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_bgp_local_count': 'INTEGER_VALUE:2-64',
- 'cluster_id_as_ip': 'IPV4Address:',
- 'cluster_id_as_number': 'NO_VALIDATION:1-4294967295',
- 'confederation_identifier': 'INTEGER_VALUE:1-65535',
- 'condeferation_peers_as': 'INTEGER_VALUE:1-65535',
- 'stalepath_delay_value': 'INTEGER_VALUE:1-3600',
- 'maxas_limit_as': 'INTEGER_VALUE:1-2000',
- 'neighbor_ipaddress': 'IPV4Address:',
- 'neighbor_as': 'NO_VALIDATION:1-4294967295',
- 'router_id': 'IPV4Address:',
- 'bgp_keepalive_interval': 'INTEGER_VALUE:0-3600',
- 'bgp_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_aggregate_prefix': 'IPV4AddressWithMask:',
- 'addrfamily_routemap_name': 'TEXT:',
- 'reachability_half_life': 'INTEGER_VALUE:1-45',
- 'start_reuse_route_value': 'INTEGER_VALUE:1-20000',
- 'start_suppress_route_value': 'INTEGER_VALUE:1-20000',
- 'max_duration_to_suppress_route': 'INTEGER_VALUE:1-255',
- 'unreachability_halftime_for_penalty': 'INTEGER_VALUE:1-45',
- 'distance_external_AS': 'INTEGER_VALUE:1-255',
- 'distance_internal_AS': 'INTEGER_VALUE:1-255',
- 'distance_local_routes': 'INTEGER_VALUE:1-255',
- 'maxpath_option': 'TEXT_OPTIONS:ebgp,ibgp',
- 'maxpath_numbers': 'INTEGER_VALUE:2-32',
- 'network_ip_prefix_with_mask': 'IPV4AddressWithMask:',
- 'network_ip_prefix_value': 'IPV4Address:',
- 'network_ip_prefix_mask': 'IPV4Address:',
- 'nexthop_crtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'nexthop_noncrtitical_delay': 'NO_VALIDATION:1-4294967295',
- 'addrfamily_redistribute_option': 'TEXT_OPTIONS:direct,ospf,\
- static',
- 'bgp_neighbor_af_occurances': 'INTEGER_VALUE:1-10',
- 'bgp_neighbor_af_filtername': 'TEXT:',
- 'bgp_neighbor_af_maxprefix': 'INTEGER_VALUE:1-15870',
- 'bgp_neighbor_af_prefixname': 'TEXT:',
- 'bgp_neighbor_af_routemap': 'TEXT:',
- 'bgp_neighbor_address_family': 'TEXT_OPTIONS:ipv4,ipv6',
- 'bgp_neighbor_connection_retrytime': 'INTEGER_VALUE:1-65535',
- 'bgp_neighbor_description': 'TEXT:',
- 'bgp_neighbor_maxhopcount': 'INTEGER_VALUE:1-255',
- 'bgp_neighbor_local_as': 'NO_VALIDATION:1-4294967295',
- 'bgp_neighbor_maxpeers': 'INTEGER_VALUE:1-96',
- 'bgp_neighbor_password': 'TEXT:',
- 'bgp_neighbor_timers_Keepalive': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_timers_holdtime': 'INTEGER_VALUE:0-3600',
- 'bgp_neighbor_ttl_hops': 'INTEGER_VALUE:1-254',
- 'bgp_neighbor_update_options': 'TEXT_OPTIONS:ethernet,loopback,\
- vlan',
- 'bgp_neighbor_update_ethernet': 'TEXT:',
- 'bgp_neighbor_update_loopback': 'INTEGER_VALUE:0-7',
- 'bgp_neighbor_update_vlan': 'INTEGER_VALUE:1-4094',
- 'bgp_neighbor_weight': 'INTEGER_VALUE:0-65535',
- 'ethernet_interface_value': 'INTEGER_VALUE:1-32',
- 'ethernet_interface_range': 'INTEGER_VALUE_RANGE:1-32',
- 'ethernet_interface_string': 'TEXT:',
- 'loopback_interface_value': 'INTEGER_VALUE:0-7',
- 'mgmt_interface_value': 'INTEGER_VALUE:0-0',
- 'vlan_interface_value': 'INTEGER_VALUE:1-4094',
- 'portchannel_interface_value': 'INTEGER_VALUE:1-4096',
- 'portchannel_interface_range': 'INTEGER_VALUE_RANGE:1-4096',
- 'portchannel_interface_string': 'TEXT:',
- 'aggregation_group_no': 'INTEGER_VALUE:1-4096',
- 'aggregation_group_mode': 'TEXT_OPTIONS:active,on,passive',
- 'bfd_options': 'TEXT_OPTIONS:authentication,echo,interval,ipv4,\
- ipv6,neighbor',
- 'bfd_interval': 'INTEGER_VALUE:50-999',
- 'bfd_minrx': 'INTEGER_VALUE:50-999',
- 'bfd_ multiplier': 'INTEGER_VALUE:3-50',
- 'bfd_ipv4_options': 'TEXT_OPTIONS:authentication,echo,interval',
- 'bfd_auth_options': 'TEXT_OPTIONS:keyed-md5,keyed-sha1,\
- meticulous-keyed-md5,meticulous-keyed-sha1,simple',
- 'bfd_key_options': 'TEXT_OPTIONS:key-chain,key-id',
- 'bfd_key_chain': 'TEXT:',
- 'bfd_key_id': 'INTEGER_VALUE:0-255',
- 'bfd_key_name': 'TEXT:',
- 'bfd_neighbor_ip': 'TEXT:',
- 'bfd_neighbor_options': 'TEXT_OPTIONS:admin-down,multihop,\
- non-persistent',
- 'bfd_access_vlan': 'INTEGER_VALUE:1-3999',
- 'bfd_bridgeport_mode': 'TEXT_OPTIONS:access,dot1q-tunnel,trunk',
- 'trunk_options': 'TEXT_OPTIONS:allowed,native',
- 'trunk_vlanid': 'INTEGER_VALUE:1-3999',
- 'portCh_description': 'TEXT:',
- 'duplex_option': 'TEXT_OPTIONS:auto,full,half',
- 'flowcontrol_options': 'TEXT_OPTIONS:receive,send',
- 'portchannel_ip_options': 'TEXT_OPTIONS:access-group,address,arp,\
- dhcp,ospf,port,port-unreachable,redirects,router,unreachables',
- 'accessgroup_name': 'TEXT:',
- 'portchannel_ipv4': 'IPV4Address:',
- 'portchannel_ipv4_mask': 'TEXT:',
- 'arp_ipaddress': 'IPV4Address:',
- 'arp_macaddress': 'TEXT:',
- 'arp_timeout_value': 'INTEGER_VALUE:60-28800',
- 'relay_ipaddress': 'IPV4Address:',
- 'ip_ospf_options': 'TEXT_OPTIONS:authentication,\
- authentication-key,bfd,cost,database-filter,dead-interval,\
- hello-interval,message-digest-key,mtu,mtu-ignore,network,\
- passive-interface,priority,retransmit-interval,shutdown,\
- transmit-delay',
- 'ospf_id_decimal_value': 'NO_VALIDATION:1-4294967295',
- 'ospf_id_ipaddres_value': 'IPV4Address:',
- 'lacp_options': 'TEXT_OPTIONS:port-priority,suspend-individual,\
- timeout',
- 'port_priority': 'INTEGER_VALUE:1-65535',
- 'lldp_options': 'TEXT_OPTIONS:receive,tlv-select,transmit,\
- trap-notification',
- 'lldp_tlv_options': 'TEXT_OPTIONS:link-aggregation,\
- mac-phy-status,management-address,max-frame-size,\
- port-description,port-protocol-vlan,port-vlan,power-mdi,\
- protocol-identity,system-capabilities,system-description,\
- system-name,vid-management,vlan-name',
- 'load_interval_delay': 'INTEGER_VALUE:30-300',
- 'load_interval_counter': 'INTEGER_VALUE:1-3',
- 'mac_accessgroup_name': 'TEXT:',
- 'mac_address': 'TEXT:',
- 'microburst_threshold': 'NO_VALIDATION:1-4294967295',
- 'mtu_value': 'INTEGER_VALUE:64-9216',
- 'service_instance': 'NO_VALIDATION:1-4294967295',
- 'service_policy_options': 'TEXT_OPTIONS:copp-system-policy,\
- input,output,type',
- 'service_policy_name': 'TEXT:',
- 'spanning_tree_options': 'TEXT_OPTIONS:bpdufilter,bpduguard,\
- cost,disable,enable,guard,link-type,mst,port,port-priority,vlan',
- 'spanning_tree_cost': 'NO_VALIDATION:1-200000000',
- 'spanning_tree_interfacerange': 'INTEGER_VALUE_RANGE:1-3999',
- 'spanning_tree_portpriority': 'TEXT_OPTIONS:0,32,64,96,128,160,\
- 192,224',
- 'portchannel_ipv6_neighbor_mac': 'TEXT:',
- 'portchannel_ipv6_neighbor_address': 'IPV6Address:',
- 'portchannel_ipv6_linklocal': 'IPV6Address:',
- 'portchannel_ipv6_dhcp_vlan': 'INTEGER_VALUE:1-4094',
- 'portchannel_ipv6_dhcp_ethernet': 'TEXT:',
- 'portchannel_ipv6_dhcp': 'IPV6Address:',
- 'portchannel_ipv6_address': 'IPV6Address:',
- 'portchannel_ipv6_options': 'TEXT_OPTIONS:address,dhcp,\
- link-local,nd,neighbor',
- 'interface_speed': 'TEXT_OPTIONS:1000,10000,40000,50000,auto',
- 'stormcontrol_options': 'TEXT_OPTIONS:broadcast,multicast,\
- unicast',
- 'stormcontrol_level': 'FLOAT:',
- 'portchannel_dot1q_tag': 'TEXT_OPTIONS:disable,enable,\
- egress-only',
- 'vrrp_id': 'INTEGER_VALUE:1-255',
- }
diff --git a/lib/ansible/module_utils/network/cnos/cnos_errorcodes.py b/lib/ansible/module_utils/network/cnos/cnos_errorcodes.py
deleted file mode 100644
index 3a83af00fc..0000000000
--- a/lib/ansible/module_utils/network/cnos/cnos_errorcodes.py
+++ /dev/null
@@ -1,256 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by
-# Ansible still belong to the author of the module, and may assign their own
-# license to the complete work.
-#
-# Copyright (C) 2017 Lenovo, Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-# Contains error codes and methods
-# Lenovo Networking
-
-errorDict = {0: 'Success',
- 1: 'NOK',
- 101: 'Device Response Timed out',
- 102: 'Command Not supported - Use CLI command',
- 103: 'Invalid Context',
- 104: 'Command Value Not Supported as of Now. Use vlan Id only',
- 105: 'Invalid interface Range',
- 106: 'Please provide Enable Password.',
- 108: '',
- 109: '',
- 110: 'Invalid protocol option',
- 111: 'The Value is not Integer',
- 112: 'The Value is not Float',
- 113: 'Value is not in Range',
- 114: 'Range value is not Integer',
- 115: 'Value is not in Options',
- 116: 'The Value is not Long',
- 117: 'Range value is not Long',
- 118: 'The Value cannot be empty',
- 119: 'The Value is not String',
- 120: 'The Value is not Matching',
- 121: 'The Value is not IPV4 Address',
- 122: 'The Value is not IPV6 Address',
- 123: '',
- 124: '',
- 125: '',
- 126: '',
- 127: '',
- 128: '',
- 129: '',
- 130: 'Invalid Access Map Name',
- 131: 'Invalid Vlan Dot1q Tag',
- 132: 'Invalid Vlan filter value',
- 133: 'Invalid Vlan Range Value',
- 134: 'Invalid Vlan Id',
- 135: 'Invalid Vlan Access Map Action',
- 136: 'Invalid Vlan Access Map Name',
- 137: 'Invalid Access List',
- 138: 'Invalid Vlan Access Map parameter',
- 139: 'Invalid Vlan Name',
- 140: 'Invalid Vlan Flood value,',
- 141: 'Invalid Vlan State Value',
- 142: 'Invalid Vlan Last Member query Interval',
- 143: 'Invalid Querier IP address',
- 144: 'Invalid Querier Time out',
- 145: 'Invalid Query Interval',
- 146: 'Invalid Vlan query max response time',
- 147: 'Invalid vlan robustness variable',
- 148: 'Invalid Vlan Startup Query count',
- 149: 'Invalid vlan Startup Query Interval',
- 150: 'Invalid Vlan snooping version',
- 151: 'Invalid Vlan Ethernet Interface',
- 152: 'Invalid Vlan Port Tag Number',
- 153: 'Invalid mrouter option',
- 154: 'Invalid Vlan Option',
- 155: '',
- 156: '',
- 157: '',
- 158: '',
- 159: '',
- 160: 'Invalid Vlag Auto Recovery Value',
- 161: 'Invalid Vlag Config Consistency Value',
- 162: 'Invalid Vlag Port Aggregation Number',
- 163: 'Invalid Vlag Priority Value',
- 164: 'Invalid Vlag Startup delay value',
- 165: 'Invalid Vlag Trie Id',
- 166: 'Invalid Vlag Instance Option',
- 167: 'Invalid Vlag Keep Alive Attempts',
- 168: 'Invalid Vlag Keep Alive Interval',
- 169: 'Invalid Vlag Retry Interval',
- 170: 'Invalid Vlag Peer Ip VRF Value',
- 171: 'Invalid Vlag Health Check Options',
- 172: 'Invalid Vlag Option',
- 173: '',
- 174: '',
- 175: '',
- 176: 'Invalid BGP As Number',
- 177: 'Invalid Routing protocol option',
- 178: 'Invalid BGP Address Family',
- 179: 'Invalid AS Path options',
- 180: 'Invalid BGP med options',
- 181: 'Invalid Best Path option',
- 182: 'Invalid BGP Local count number',
- 183: 'Cluster Id has to either IP or AS Number',
- 184: 'Invalid confederation identifier',
- 185: 'Invalid Confederation Peer AS Value',
- 186: 'Invalid Confederation Option',
- 187: 'Invalid state path relay value',
- 188: 'Invalid Maxas Limit AS Value',
- 189: 'Invalid Neighbor IP Address or Neighbor AS Number',
- 190: 'Invalid Router Id',
- 191: 'Invalid BGP Keep Alive Interval',
- 192: 'Invalid BGP Hold time',
- 193: 'Invalid BGP Option',
- 194: 'Invalid BGP Address Family option',
- 195: 'Invalid BGP Address Family Redistribution option. ',
- 196: 'Invalid BGP Address Family Route Map Name',
- 197: 'Invalid Next Hop Critical Delay',
- 198: 'Invalid Next Hop Non Critical Delay',
- 199: 'Invalid Multipath Number Value',
- 200: 'Invalid Aggegation Group Mode',
- 201: 'Invalid Aggregation Group No',
- 202: 'Invalid BFD Access Vlan',
- 203: 'Invalid CFD Bridgeport Mode',
- 204: 'Invalid Trunk Option',
- 205: 'Invalid BFD Option',
- 206: 'Invalid Portchannel description',
- 207: 'Invalid Portchannel duplex option',
- 208: 'Invalid Flow control option state',
- 209: 'Invalid Flow control option',
- 210: 'Invalid LACP Port priority',
- 211: 'Invalid LACP Time out options',
- 212: 'Invalid LACP Command options',
- 213: 'Invalid LLDP TLV Option',
- 214: 'Invalid LLDP Option',
- 215: 'Invalid Load interval delay',
- 216: 'Invalid Load interval Counter Number',
- 217: 'Invalid Load Interval option',
- 218: 'Invalid Mac Access Group Name',
- 219: 'Invalid Mac Address',
- 220: 'Invalid Microburst threshold value',
- 221: 'Invalid MTU Value',
- 222: 'Invalid Service instance value',
- 223: 'Invalid service policy name',
- 224: 'Invalid service policy options',
- 225: 'Invalid Interface speed value',
- 226: 'Invalid Storm control level value',
- 227: 'Invalid Storm control option',
- 228: 'Invalid Portchannel dot1q tag',
- 229: 'Invalid VRRP Id Value',
- 230: 'Invalid VRRP Options',
- 231: 'Invalid portchannel source interface option',
- 232: 'Invalid portchannel load balance options',
- 233: 'Invalid Portchannel configuration attribute',
- 234: 'Invalid BFD Interval Value',
- 235: 'Invalid BFD minrx Value',
- 236: 'Invalid BFD multiplier Value',
- 237: 'Invalid Key Chain Value',
- 238: 'Invalid key name option',
- 239: 'Invalid key id value',
- 240: 'Invalid Key Option',
- 241: 'Invalid authentication option',
- 242: 'Invalid destination Ip',
- 243: 'Invalid source Ip',
- 244: 'Invalid IP Option',
- 245: 'Invalid Access group option',
- 246: 'Invalid Access group name',
- 247: 'Invalid ARP MacAddress Value',
- 248: 'Invalid ARP timeout value',
- 249: 'Invalid ARP Option',
- 250: 'Invalid dhcp request option',
- 251: 'Invalid dhcp Client option',
- 252: 'Invalid relay Ip Address',
- 253: 'Invalid dhcp Option',
- 254: 'Invalid OSPF Option',
- 255: 'Invalid OSPF Id IP Address Value',
- 256: 'Invalid Ip Router Option',
- 257: 'Invalid Spanning tree bpdufilter Options',
- 258: 'Invalid Spanning tree bpduguard Options',
- 259: 'Invalid Spanning tree cost Options',
- 260: 'Invalid Spanning tree guard Options',
- 261: 'Invalid Spanning tree link-type Options',
- 262: 'Invalid Spanning tree link-type Options',
- 263: 'Invalid Spanning tree options',
- 264: 'Port-priority in increments of 32 is required',
- 265: 'Invalid Spanning tree vlan options',
- 266: 'Invalid IPv6 option',
- 267: 'Invalid IPV6 neighbor IP Address',
- 268: 'Invalid IPV6 neighbor mac address',
- 269: 'Invalid IPV6 dhcp option',
- 270: 'Invalid IPV6 relay address option',
- 271: 'Invalid IPV6 Ethernet option',
- 272: 'Invalid IPV6 Vlan option',
- 273: 'Invalid IPV6 Link Local option',
- 274: 'Invalid IPV6 dhcp option',
- 275: 'Invalid IPV6 Address',
- 276: 'Invalid IPV6 Address option',
- 277: 'Invalid BFD neighbor options',
- 278: 'Invalid Secondary option',
- 289: 'Invalid PortChannel IPV4 address',
- 290: 'Invalid Max Path Options',
- 291: 'Invalid Distance Local Route value',
- 292: 'Invalid Distance Internal AS value',
- 293: 'Invalid Distance External AS value',
- 294: 'Invalid BGP Reachability Half Life',
- 295: 'Invalid BGP Dampening parameter',
- 296: 'Invalid BGP Aggregate Prefix value',
- 297: 'Invalid BGP Aggregate Prefix Option',
- 298: 'Invalid BGP Address Family Route Map Name',
- 299: 'Invalid BGP Net IP Mask Value',
- 300: 'Invalid BGP Net IP Prefix Value',
- 301: 'Invalid BGP Neighbor configuration option',
- 302: 'Invalid BGP Neighbor Weight Value',
- 303: 'Invalid Neigbor update source option',
- 304: 'Invalid Ethernet slot/chassis number',
- 305: 'Invalid Loopback Interface number',
- 306: 'Invalid vlan id',
- 307: 'Invalid Number of hops',
- 308: 'Invalid Neighbor Keepalive interval',
- 309: 'Invalid Neighbor timer hold time',
- 310: 'Invalid neighbor password ',
- 311: 'Invalid Max peer limit',
- 312: 'Invalid Local AS Number',
- 313: 'Invalid maximum hop count',
- 314: 'Invalid neighbor description',
- 315: 'Invalid Neighbor connect timer value',
- 316: 'Invalid Neighbor address family option',
- 317: 'Invalid neighbor address family option',
- 318: 'Invalid route-map name',
- 319: 'Invalid route-map',
- 320: 'Invalid Name of a prefix list',
- 321: 'Invalid Filter incoming option',
- 322: 'Invalid AS path access-list name',
- 323: 'Invalid Filter route option',
- 324: 'Invalid route-map name',
- 325: 'Invalid Number of occurrences of AS number',
- 326: 'Invalid Prefix Limit'}
-
-
-def getErrorString(errorCode):
- retVal = errorDict[int(errorCode)]
- return retVal
-# EOM
diff --git a/lib/ansible/module_utils/network/edgeos/__init__.py b/lib/ansible/module_utils/network/edgeos/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/edgeos/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/edgeos/edgeos.py b/lib/ansible/module_utils/network/edgeos/edgeos.py
deleted file mode 100644
index 7d5a8430c2..0000000000
--- a/lib/ansible/module_utils/network/edgeos/edgeos.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# (c) 2018 Red Hat Inc.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-import json
-from ansible.module_utils._text import to_text
-from ansible.module_utils.network.common.utils import to_list
-from ansible.module_utils.connection import Connection, ConnectionError
-
-_DEVICE_CONFIGS = None
-
-
-def get_connection(module):
- if hasattr(module, '_edgeos_connection'):
- return module._edgeos_connection
-
- capabilities = get_capabilities(module)
- network_api = capabilities.get('network_api')
- if network_api == 'cliconf':
- module._edgeos_connection = Connection(module._socket_path)
- else:
- module.fail_json(msg='Invalid connection type %s' % network_api)
-
- return module._edgeos_connection
-
-
-def get_capabilities(module):
- if hasattr(module, '_edgeos_capabilities'):
- return module._edgeos_capabilities
-
- capabilities = Connection(module._socket_path).get_capabilities()
- module._edgeos_capabilities = json.loads(capabilities)
- return module._edgeos_capabilities
-
-
-def get_config(module):
- global _DEVICE_CONFIGS
-
- if _DEVICE_CONFIGS is not None:
- return _DEVICE_CONFIGS
- else:
- connection = get_connection(module)
- out = connection.get_config()
- cfg = to_text(out, errors='surrogate_then_replace').strip()
- _DEVICE_CONFIGS = cfg
- return cfg
-
-
-def run_commands(module, commands, check_rc=True):
- responses = list()
- connection = get_connection(module)
-
- for cmd in to_list(commands):
- if isinstance(cmd, dict):
- command = cmd['command']
- prompt = cmd['prompt']
- answer = cmd['answer']
- else:
- command = cmd
- prompt = None
- answer = None
-
- try:
- out = connection.get(command, prompt, answer)
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc))
-
- try:
- out = to_text(out, errors='surrogate_or_strict')
- except UnicodeError:
- module.fail_json(msg=u'Failed to decode output from %s: %s' %
- (cmd, to_text(out)))
-
- responses.append(out)
-
- return responses
-
-
-def load_config(module, commands, commit=False, comment=None):
- connection = get_connection(module)
-
- try:
- out = connection.edit_config(commands)
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc))
-
- diff = None
- if module._diff:
- out = connection.get('compare')
- out = to_text(out, errors='surrogate_or_strict')
-
- if not out.startswith('No changes'):
- out = connection.get('show')
- diff = to_text(out, errors='surrogate_or_strict').strip()
-
- if commit:
- try:
- out = connection.commit(comment)
- except ConnectionError:
- connection.discard_changes()
- module.fail_json(msg='commit failed: %s' % out)
-
- if not commit:
- connection.discard_changes()
- else:
- connection.get('exit')
-
- if diff:
- return diff
diff --git a/lib/ansible/module_utils/network/edgeswitch/__init__.py b/lib/ansible/module_utils/network/edgeswitch/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/edgeswitch/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/edgeswitch/edgeswitch.py b/lib/ansible/module_utils/network/edgeswitch/edgeswitch.py
deleted file mode 100644
index dafcc9fd0d..0000000000
--- a/lib/ansible/module_utils/network/edgeswitch/edgeswitch.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# (c) 2018 Red Hat Inc.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-import json
-import re
-
-from copy import deepcopy
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.network.common.utils import to_list, ComplexList
-from ansible.module_utils.connection import Connection, ConnectionError
-from ansible.module_utils.network.common.utils import remove_default_spec
-
-_DEVICE_CONFIGS = {}
-
-
-def build_aggregate_spec(element_spec, required, *extra_spec):
- aggregate_spec = deepcopy(element_spec)
- for elt in required:
- aggregate_spec[elt] = dict(required=True)
- remove_default_spec(aggregate_spec)
- argument_spec = dict(
- aggregate=dict(type='list', elements='dict', options=aggregate_spec)
- )
- argument_spec.update(element_spec)
- for elt in extra_spec:
- argument_spec.update(elt)
- return argument_spec
-
-
-def map_params_to_obj(module):
- obj = []
- aggregate = module.params.get('aggregate')
- if aggregate:
- for item in aggregate:
- for key in item:
- if item.get(key) is None:
- item[key] = module.params[key]
-
- d = item.copy()
- obj.append(d)
- else:
- obj.append(module.params)
-
- return obj
-
-
-def get_connection(module):
- if hasattr(module, '_edgeswitch_connection'):
- return module._edgeswitch_connection
-
- capabilities = get_capabilities(module)
- network_api = capabilities.get('network_api')
- if network_api == 'cliconf':
- module._edgeswitch_connection = Connection(module._socket_path)
- else:
- module.fail_json(msg='Invalid connection type %s' % network_api)
-
- return module._edgeswitch_connection
-
-
-def get_capabilities(module):
- if hasattr(module, '_edgeswitch_capabilities'):
- return module._edgeswitch_capabilities
- try:
- capabilities = Connection(module._socket_path).get_capabilities()
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- module._edgeswitch_capabilities = json.loads(capabilities)
- return module._edgeswitch_capabilities
-
-
-def get_defaults_flag(module):
- connection = get_connection(module)
- try:
- out = connection.get_defaults_flag()
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- return to_text(out, errors='surrogate_then_replace').strip()
-
-
-def get_config(module, flags=None):
- flag_str = ' '.join(to_list(flags))
-
- try:
- return _DEVICE_CONFIGS[flag_str]
- except KeyError:
- connection = get_connection(module)
- try:
- out = connection.get_config(flags=flags)
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- cfg = to_text(out, errors='surrogate_then_replace').strip()
- _DEVICE_CONFIGS[flag_str] = cfg
- return cfg
-
-
-def get_interfaces_config(module):
- config = get_config(module)
- lines = config.split('\n')
- interfaces = {}
- interface = None
- for line in lines:
- if line == 'exit':
- if interface:
- interfaces[interface[0]] = interface
- interface = None
- elif interface:
- interface.append(line)
- else:
- match = re.match(r'^interface (.*)$', line)
- if match:
- interface = list()
- interface.append(line)
-
- return interfaces
-
-
-def to_commands(module, commands):
- spec = {
- 'command': dict(key=True),
- 'prompt': dict(),
- 'answer': dict()
- }
- transform = ComplexList(spec, module)
- return transform(commands)
-
-
-def run_commands(module, commands, check_rc=True):
- connection = get_connection(module)
- try:
- return connection.run_commands(commands=commands, check_rc=check_rc)
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc))
-
-
-def load_config(module, commands):
- connection = get_connection(module)
-
- try:
- resp = connection.edit_config(commands)
- return resp.get('response')
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc))
diff --git a/lib/ansible/module_utils/network/edgeswitch/edgeswitch_interface.py b/lib/ansible/module_utils/network/edgeswitch/edgeswitch_interface.py
deleted file mode 100644
index 793d0e0831..0000000000
--- a/lib/ansible/module_utils/network/edgeswitch/edgeswitch_interface.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# (c) 2018 Red Hat Inc.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-import re
-
-
-class InterfaceConfiguration:
- def __init__(self):
- self.commands = []
- self.merged = False
-
- def has_same_commands(self, interface):
- len1 = len(self.commands)
- len2 = len(interface.commands)
- return len1 == len2 and len1 == len(frozenset(self.commands).intersection(interface.commands))
-
-
-def merge_interfaces(interfaces):
- """ to reduce commands generated by an edgeswitch module
- we take interfaces one by one and we try to merge them with neighbors if everyone has same commands to run
- """
- merged = {}
-
- for i, interface in interfaces.items():
- if interface.merged:
- continue
- interface.merged = True
-
- match = re.match(r'(\d+)\/(\d+)', i)
- group = int(match.group(1))
- start = int(match.group(2))
- end = start
-
- while True:
- try:
- start = start - 1
- key = '{0}/{1}'.format(group, start)
- neighbor = interfaces[key]
- if not neighbor.merged and interface.has_same_commands(neighbor):
- neighbor.merged = True
- else:
- break
- except KeyError:
- break
- start = start + 1
-
- while True:
- try:
- end = end + 1
- key = '{0}/{1}'.format(group, end)
- neighbor = interfaces[key]
- if not neighbor.merged and interface.has_same_commands(neighbor):
- neighbor.merged = True
- else:
- break
- except KeyError:
- break
- end = end - 1
-
- if end == start:
- key = '{0}/{1}'.format(group, start)
- else:
- key = '{0}/{1}-{2}/{3}'.format(group, start, group, end)
-
- merged[key] = interface
- return merged
diff --git a/lib/ansible/module_utils/network/enos/__init__.py b/lib/ansible/module_utils/network/enos/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/enos/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/enos/enos.py b/lib/ansible/module_utils/network/enos/enos.py
deleted file mode 100644
index 1cb97f2821..0000000000
--- a/lib/ansible/module_utils/network/enos/enos.py
+++ /dev/null
@@ -1,172 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by
-# Ansible still belong to the author of the module, and may assign their own
-# license to the complete work.
-#
-# Copyright (C) 2017 Lenovo.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-# Contains utility methods
-# Lenovo Networking
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.network.common.utils import to_list, EntityCollection
-from ansible.module_utils.connection import Connection, exec_command
-from ansible.module_utils.connection import ConnectionError
-
-_DEVICE_CONFIGS = {}
-_CONNECTION = None
-
-enos_provider_spec = {
- 'host': dict(),
- 'port': dict(type='int'),
- 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
- 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
- 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
- 'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
- 'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
- 'timeout': dict(type='int'),
- 'context': dict(),
- 'passwords': dict()
-}
-
-enos_argument_spec = {
- 'provider': dict(type='dict', options=enos_provider_spec),
-}
-
-command_spec = {
- 'command': dict(key=True),
- 'prompt': dict(),
- 'answer': dict()
-}
-
-
-def get_provider_argspec():
- return enos_provider_spec
-
-
-def check_args(module, warnings):
- pass
-
-
-def get_connection(module):
- global _CONNECTION
- if _CONNECTION:
- return _CONNECTION
- _CONNECTION = Connection(module._socket_path)
-
- context = None
- try:
- context = module.params['context']
- except KeyError:
- context = None
-
- if context:
- if context == 'system':
- command = 'changeto system'
- else:
- command = 'changeto context %s' % context
- _CONNECTION.get(command)
-
- return _CONNECTION
-
-
-def get_config(module, flags=None):
- flags = [] if flags is None else flags
-
- passwords = None
- try:
- passwords = module.params['passwords']
- except KeyError:
- passwords = None
- if passwords:
- cmd = 'more system:running-config'
- else:
- cmd = 'show running-config '
- cmd += ' '.join(flags)
- cmd = cmd.strip()
-
- try:
- return _DEVICE_CONFIGS[cmd]
- except KeyError:
- conn = get_connection(module)
- out = conn.get(cmd)
- cfg = to_text(out, errors='surrogate_then_replace').strip()
- _DEVICE_CONFIGS[cmd] = cfg
- return cfg
-
-
-def to_commands(module, commands):
- if not isinstance(commands, list):
- raise AssertionError('argument must be of type <list>')
-
- transform = EntityCollection(module, command_spec)
- commands = transform(commands)
-
- for index, item in enumerate(commands):
- if module.check_mode and not item['command'].startswith('show'):
- module.warn('only show commands are supported when using check '
- 'mode, not executing `%s`' % item['command'])
-
- return commands
-
-
-def run_commands(module, commands, check_rc=True):
- connection = get_connection(module)
-
- commands = to_commands(module, to_list(commands))
-
- responses = list()
-
- for cmd in commands:
- out = connection.get(**cmd)
- responses.append(to_text(out, errors='surrogate_then_replace'))
-
- return responses
-
-
-def load_config(module, config):
- try:
- conn = get_connection(module)
- conn.get('enable')
- conn.edit_config(config)
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc))
-
-
-def get_defaults_flag(module):
- rc, out, err = exec_command(module, 'show running-config ?')
- out = to_text(out, errors='surrogate_then_replace')
-
- commands = set()
- for line in out.splitlines():
- if line:
- commands.add(line.strip().split()[0])
-
- if 'all' in commands:
- return 'all'
- else:
- return 'full'
diff --git a/lib/ansible/module_utils/network/eric_eccli/__init__.py b/lib/ansible/module_utils/network/eric_eccli/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/eric_eccli/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/eric_eccli/eric_eccli.py b/lib/ansible/module_utils/network/eric_eccli/eric_eccli.py
deleted file mode 100644
index ed4e0a08a9..0000000000
--- a/lib/ansible/module_utils/network/eric_eccli/eric_eccli.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#
-# Copyright (c) 2019 Ericsson AB.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.network.common.utils import to_list, ComplexList
-from ansible.module_utils.connection import Connection, ConnectionError
-
-_DEVICE_CONFIGS = {}
-
-
-def get_connection(module):
- if hasattr(module, '_eric_eccli_connection'):
- return module._eric_eccli_connection
-
- capabilities = get_capabilities(module)
- network_api = capabilities.get('network_api')
- if network_api == 'cliconf':
- module._eric_eccli_connection = Connection(module._socket_path)
- else:
- module.fail_json(msg='Invalid connection type %s' % network_api)
-
- return module._eric_eccli_connection
-
-
-def get_capabilities(module):
- if hasattr(module, '_eric_eccli_capabilities'):
- return module._eric_eccli_capabilities
- try:
- capabilities = Connection(module._socket_path).get_capabilities()
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- module._eric_eccli_capabilities = json.loads(capabilities)
- return module._eric_eccli_capabilities
-
-
-def run_commands(module, commands, check_rc=True):
- connection = get_connection(module)
- try:
- return connection.run_commands(commands=commands, check_rc=check_rc)
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc))
diff --git a/lib/ansible/module_utils/network/exos/__init__.py b/lib/ansible/module_utils/network/exos/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/argspec/__init__.py b/lib/ansible/module_utils/network/exos/argspec/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/argspec/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/argspec/facts/__init__.py b/lib/ansible/module_utils/network/exos/argspec/facts/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/argspec/facts/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/argspec/facts/facts.py b/lib/ansible/module_utils/network/exos/argspec/facts/facts.py
deleted file mode 100644
index 4ab2e934ea..0000000000
--- a/lib/ansible/module_utils/network/exos/argspec/facts/facts.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-"""
-The arg spec for the exos facts module.
-"""
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-class FactsArgs(object): # pylint: disable=R0903
- """ The arg spec for the exos facts module
- """
-
- def __init__(self, **kwargs):
- pass
-
- argument_spec = {
- 'gather_subset': dict(default=['!config'], type='list'),
- 'gather_network_resources': dict(type='list'),
- }
diff --git a/lib/ansible/module_utils/network/exos/argspec/l2_interfaces/__init__.py b/lib/ansible/module_utils/network/exos/argspec/l2_interfaces/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/argspec/l2_interfaces/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/argspec/l2_interfaces/l2_interfaces.py b/lib/ansible/module_utils/network/exos/argspec/l2_interfaces/l2_interfaces.py
deleted file mode 100644
index 3c6f250811..0000000000
--- a/lib/ansible/module_utils/network/exos/argspec/l2_interfaces/l2_interfaces.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#
-# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-#############################################
-# WARNING #
-#############################################
-#
-# This file is auto generated by the resource
-# module builder playbook.
-#
-# Do not edit this file manually.
-#
-# Changes to this file will be over written
-# by the resource module builder.
-#
-# Changes should be made in the model used to
-# generate this file or in the resource module
-# builder template.
-#
-#############################################
-"""
-The arg spec for the exos_l2_interfaces module
-"""
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-class L2_interfacesArgs(object): # pylint: disable=R0903
- """The arg spec for the exos_l2_interfaces module
- """
- def __init__(self, **kwargs):
- pass
-
- argument_spec = {
- 'config': {
- 'elements': 'dict',
- 'options': {
- 'access': {'options': {'vlan': {'type': 'int'}},
- 'type': 'dict'},
- 'name': {'required': True, 'type': 'str'},
- 'trunk': {'options': {'native_vlan': {'type': 'int'}, 'trunk_allowed_vlans': {'type': 'list'}},
- 'type': 'dict'}},
- 'type': 'list'},
- 'state': {'choices': ['merged', 'replaced', 'overridden', 'deleted'], 'default': 'merged', 'type': 'str'}
- } # pylint: disable=C0301
diff --git a/lib/ansible/module_utils/network/exos/argspec/lldp_global/__init__.py b/lib/ansible/module_utils/network/exos/argspec/lldp_global/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/argspec/lldp_global/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/argspec/lldp_global/lldp_global.py b/lib/ansible/module_utils/network/exos/argspec/lldp_global/lldp_global.py
deleted file mode 100644
index 4106c53428..0000000000
--- a/lib/ansible/module_utils/network/exos/argspec/lldp_global/lldp_global.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-#############################################
-# WARNING #
-#############################################
-#
-# This file is auto generated by the resource
-# module builder playbook.
-#
-# Do not edit this file manually.
-#
-# Changes to this file will be over written
-# by the resource module builder.
-#
-# Changes should be made in the model used to
-# generate this file or in the resource module
-# builder template.
-#
-#############################################
-
-"""
-The arg spec for the exos_lldp_global module
-"""
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-class Lldp_globalArgs(object): # pylint: disable=R0903
- """The arg spec for the exos_lldp_global module
- """
-
- def __init__(self, **kwargs):
- pass
-
- argument_spec = {
- 'config': {
- 'options': {
- 'interval': {'default': 30, 'type': 'int'},
- 'tlv_select': {
- 'options': {
- 'management_address': {'type': 'bool'},
- 'port_description': {'type': 'bool'},
- 'system_capabilities': {'type': 'bool'},
- 'system_description': {
- 'default': True,
- 'type': 'bool'},
- 'system_name': {'default': True, 'type': 'bool'}},
- 'type': 'dict'}},
- 'type': 'dict'},
- 'state': {
- 'choices': ['merged', 'replaced', 'deleted'],
- 'default': 'merged',
- 'type': 'str'}} # pylint: disable=C0301
diff --git a/lib/ansible/module_utils/network/exos/argspec/lldp_interfaces/__init__.py b/lib/ansible/module_utils/network/exos/argspec/lldp_interfaces/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/argspec/lldp_interfaces/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/argspec/lldp_interfaces/lldp_interfaces.py b/lib/ansible/module_utils/network/exos/argspec/lldp_interfaces/lldp_interfaces.py
deleted file mode 100644
index c2a981f919..0000000000
--- a/lib/ansible/module_utils/network/exos/argspec/lldp_interfaces/lldp_interfaces.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#
-# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-#############################################
-# WARNING #
-#############################################
-#
-# This file is auto generated by the resource
-# module builder playbook.
-#
-# Do not edit this file manually.
-#
-# Changes to this file will be over written
-# by the resource module builder.
-#
-# Changes should be made in the model used to
-# generate this file or in the resource module
-# builder template.
-#
-#############################################
-
-"""
-The arg spec for the exos_lldp_interfaces module
-"""
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-class Lldp_interfacesArgs(object): # pylint: disable=R0903
- """The arg spec for the exos_lldp_interfaces module
- """
-
- def __init__(self, **kwargs):
- pass
-
- argument_spec = {
- 'config': {
- 'elements': 'dict',
- 'options': {
- 'enabled': {'type': 'bool'},
- 'name': {'required': True, 'type': 'str'}},
- 'type': 'list'},
- 'state': {
- 'choices': ['merged', 'replaced', 'overridden', 'deleted'],
- 'default': 'merged',
- 'type': 'str'}} # pylint: disable=C0301
diff --git a/lib/ansible/module_utils/network/exos/argspec/vlans/__init__.py b/lib/ansible/module_utils/network/exos/argspec/vlans/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/argspec/vlans/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/argspec/vlans/vlans.py b/lib/ansible/module_utils/network/exos/argspec/vlans/vlans.py
deleted file mode 100644
index 538a155a7d..0000000000
--- a/lib/ansible/module_utils/network/exos/argspec/vlans/vlans.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-#############################################
-# WARNING #
-#############################################
-#
-# This file is auto generated by the resource
-# module builder playbook.
-#
-# Do not edit this file manually.
-#
-# Changes to this file will be over written
-# by the resource module builder.
-#
-# Changes should be made in the model used to
-# generate this file or in the resource module
-# builder template.
-#
-#############################################
-
-"""
-The arg spec for the exos_vlans module
-"""
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-class VlansArgs(object): # pylint: disable=R0903
- """The arg spec for the exos_vlans module
- """
-
- def __init__(self, **kwargs):
- pass
-
- argument_spec = {
- 'config': {
- 'elements': 'dict',
- 'options': {
- 'name': {'type': 'str'},
- 'state': {
- 'choices': ['active', 'suspend'],
- 'default': 'active',
- 'type': 'str'},
- 'vlan_id': {'required': True, 'type': 'int'}},
- 'type': 'list'},
- 'state': {
- 'choices': ['merged', 'replaced', 'overridden', 'deleted'],
- 'default': 'merged',
- 'type': 'str'}} # pylint: disable=C0301
diff --git a/lib/ansible/module_utils/network/exos/config/__init__.py b/lib/ansible/module_utils/network/exos/config/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/config/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/config/l2_interfaces/__init__.py b/lib/ansible/module_utils/network/exos/config/l2_interfaces/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/config/l2_interfaces/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/config/l2_interfaces/l2_interfaces.py b/lib/ansible/module_utils/network/exos/config/l2_interfaces/l2_interfaces.py
deleted file mode 100644
index 3644ac4501..0000000000
--- a/lib/ansible/module_utils/network/exos/config/l2_interfaces/l2_interfaces.py
+++ /dev/null
@@ -1,294 +0,0 @@
-#
-# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-"""
-The exos_l2_interfaces class
-It is in this file where the current configuration (as dict)
-is compared to the provided configuration (as dict) and the command set
-necessary to bring the current configuration to it's desired end-state is
-created
-"""
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import json
-from copy import deepcopy
-from ansible.module_utils.network.common.cfg.base import ConfigBase
-from ansible.module_utils.network.common.utils import to_list, dict_diff
-from ansible.module_utils.network.exos.facts.facts import Facts
-from ansible.module_utils.network.exos.exos import send_requests
-
-
-class L2_interfaces(ConfigBase):
- """
- The exos_l2_interfaces class
- """
-
- gather_subset = [
- '!all',
- '!min',
- ]
-
- gather_network_resources = [
- 'l2_interfaces',
- ]
-
- L2_INTERFACE_NATIVE = {
- "data": {
- "openconfig-vlan:config": {
- "interface-mode": "TRUNK",
- "native-vlan": None,
- "trunk-vlans": []
- }
- },
- "method": "PATCH",
- "path": None
- }
-
- L2_INTERFACE_TRUNK = {
- "data": {
- "openconfig-vlan:config": {
- "interface-mode": "TRUNK",
- "trunk-vlans": []
- }
- },
- "method": "PATCH",
- "path": None
- }
-
- L2_INTERFACE_ACCESS = {
- "data": {
- "openconfig-vlan:config": {
- "interface-mode": "ACCESS",
- "access-vlan": None
- }
- },
- "method": "PATCH",
- "path": None
- }
-
- L2_PATH = "/rest/restconf/data/openconfig-interfaces:interfaces/interface="
-
- def __init__(self, module):
- super(L2_interfaces, self).__init__(module)
-
- def get_l2_interfaces_facts(self):
- """ Get the 'facts' (the current configuration)
-
- :rtype: A dictionary
- :returns: The current configuration as a dictionary
- """
- facts, _warnings = Facts(self._module).get_facts(
- self.gather_subset, self.gather_network_resources)
- l2_interfaces_facts = facts['ansible_network_resources'].get(
- 'l2_interfaces')
- if not l2_interfaces_facts:
- return []
- return l2_interfaces_facts
-
- def execute_module(self):
- """ Execute the module
-
- :rtype: A dictionary
- :returns: The result from module execution
- """
- result = {'changed': False}
- warnings = list()
- requests = list()
-
- existing_l2_interfaces_facts = self.get_l2_interfaces_facts()
- requests.extend(self.set_config(existing_l2_interfaces_facts))
- if requests:
- if not self._module.check_mode:
- send_requests(self._module, requests=requests)
- result['changed'] = True
- result['requests'] = requests
-
- changed_l2_interfaces_facts = self.get_l2_interfaces_facts()
-
- result['before'] = existing_l2_interfaces_facts
- if result['changed']:
- result['after'] = changed_l2_interfaces_facts
-
- result['warnings'] = warnings
- return result
-
- def set_config(self, existing_l2_interfaces_facts):
- """ Collect the configuration from the args passed to the module,
- collect the current configuration (as a dict from facts)
-
- :rtype: A list
- :returns: the requests necessary to migrate the current configuration
- to the desired configuration
- """
- want = self._module.params['config']
- have = existing_l2_interfaces_facts
- resp = self.set_state(want, have)
- return to_list(resp)
-
- def set_state(self, want, have):
- """ Select the appropriate function based on the state provided
-
- :param want: the desired configuration as a dictionary
- :param have: the current configuration as a dictionary
- :rtype: A list
- :returns: the requests necessary to migrate the current configuration
- to the desired configuration
- """
- state = self._module.params['state']
- if state == 'overridden':
- requests = self._state_overridden(want, have)
- elif state == 'deleted':
- requests = self._state_deleted(want, have)
- elif state == 'merged':
- requests = self._state_merged(want, have)
- elif state == 'replaced':
- requests = self._state_replaced(want, have)
- return requests
-
- def _state_replaced(self, want, have):
- """ The request generator when state is replaced
-
- :rtype: A list
- :returns: the requests necessary to migrate the current configuration
- to the desired configuration
- """
- requests = []
- for w in want:
- for h in have:
- if w["name"] == h["name"]:
- if dict_diff(w, h):
- l2_request = self._update_patch_request(w, h)
- l2_request["data"] = json.dumps(l2_request["data"])
- requests.append(l2_request)
- break
-
- return requests
-
- def _state_overridden(self, want, have):
- """ The request generator when state is overridden
-
- :rtype: A list
- :returns: the requests necessary to migrate the current configuration
- to the desired configuration
- """
- requests = []
- have_copy = []
- for w in want:
- for h in have:
- if w["name"] == h["name"]:
- if dict_diff(w, h):
- l2_request = self._update_patch_request(w, h)
- l2_request["data"] = json.dumps(l2_request["data"])
- requests.append(l2_request)
- have_copy.append(h)
- break
-
- for h in have:
- if h not in have_copy:
- l2_delete = self._update_delete_request(h)
- if l2_delete["path"]:
- l2_delete["data"] = json.dumps(l2_delete["data"])
- requests.append(l2_delete)
-
- return requests
-
- def _state_merged(self, want, have):
- """ The request generator when state is merged
-
- :rtype: A list
- :returns: the requests necessary to merge the provided into
- the current configuration
- """
- requests = []
- for w in want:
- for h in have:
- if w["name"] == h["name"]:
- if dict_diff(h, w):
- l2_request = self._update_patch_request(w, h)
- l2_request["data"] = json.dumps(l2_request["data"])
- requests.append(l2_request)
- break
-
- return requests
-
- def _state_deleted(self, want, have):
- """ The request generator when state is deleted
-
- :rtype: A list
- :returns: the requests necessary to remove the current configuration
- of the provided objects
- """
- requests = []
- if want:
- for w in want:
- for h in have:
- if w["name"] == h["name"]:
- l2_delete = self._update_delete_request(h)
- if l2_delete["path"]:
- l2_delete["data"] = json.dumps(l2_delete["data"])
- requests.append(l2_delete)
- break
-
- else:
- for h in have:
- l2_delete = self._update_delete_request(h)
- if l2_delete["path"]:
- l2_delete["data"] = json.dumps(l2_delete["data"])
- requests.append(l2_delete)
-
- return requests
-
- def _update_patch_request(self, want, have):
-
- facts, _warnings = Facts(self._module).get_facts(
- self.gather_subset, ['vlans', ])
- vlans_facts = facts['ansible_network_resources'].get('vlans')
-
- vlan_id = []
-
- for vlan in vlans_facts:
- vlan_id.append(vlan['vlan_id'])
-
- if want.get("access"):
- if want["access"]["vlan"] in vlan_id:
- l2_request = deepcopy(self.L2_INTERFACE_ACCESS)
- l2_request["data"]["openconfig-vlan:config"]["access-vlan"] = want["access"]["vlan"]
- l2_request["path"] = self.L2_PATH + str(want["name"]) + "/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config"
- else:
- self._module.fail_json(msg="VLAN %s does not exist" % (want["access"]["vlan"]))
-
- elif want.get("trunk"):
- if want["trunk"]["native_vlan"]:
- if want["trunk"]["native_vlan"] in vlan_id:
- l2_request = deepcopy(self.L2_INTERFACE_NATIVE)
- l2_request["data"]["openconfig-vlan:config"]["native-vlan"] = want["trunk"]["native_vlan"]
- l2_request["path"] = self.L2_PATH + str(want["name"]) + "/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config"
- for vlan in want["trunk"]["trunk_allowed_vlans"]:
- if int(vlan) in vlan_id:
- l2_request["data"]["openconfig-vlan:config"]["trunk-vlans"].append(int(vlan))
- else:
- self._module.fail_json(msg="VLAN %s does not exist" % (vlan))
- else:
- self._module.fail_json(msg="VLAN %s does not exist" % (want["trunk"]["native_vlan"]))
- else:
- l2_request = deepcopy(self.L2_INTERFACE_TRUNK)
- l2_request["path"] = self.L2_PATH + str(want["name"]) + "/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config"
- for vlan in want["trunk"]["trunk_allowed_vlans"]:
- if int(vlan) in vlan_id:
- l2_request["data"]["openconfig-vlan:config"]["trunk-vlans"].append(int(vlan))
- else:
- self._module.fail_json(msg="VLAN %s does not exist" % (vlan))
- return l2_request
-
- def _update_delete_request(self, have):
-
- l2_request = deepcopy(self.L2_INTERFACE_ACCESS)
-
- if have["access"] and have["access"]["vlan"] != 1 or have["trunk"] or not have["access"]:
- l2_request["data"]["openconfig-vlan:config"]["access-vlan"] = 1
- l2_request["path"] = self.L2_PATH + str(have["name"]) + "/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config"
-
- return l2_request
diff --git a/lib/ansible/module_utils/network/exos/config/lldp_global/__init__.py b/lib/ansible/module_utils/network/exos/config/lldp_global/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/config/lldp_global/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/config/lldp_global/lldp_global.py b/lib/ansible/module_utils/network/exos/config/lldp_global/lldp_global.py
deleted file mode 100644
index b466ccb125..0000000000
--- a/lib/ansible/module_utils/network/exos/config/lldp_global/lldp_global.py
+++ /dev/null
@@ -1,199 +0,0 @@
-#
-# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-"""
-The exos_lldp_global class
-It is in this file where the current configuration (as dict)
-is compared to the provided configuration (as dict) and the command set
-necessary to bring the current configuration to it's desired end-state is
-created
-"""
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-from ansible.module_utils.network.common.cfg.base import ConfigBase
-from ansible.module_utils.network.common.utils import to_list
-from ansible.module_utils.network.exos.facts.facts import Facts
-from ansible.module_utils.network.exos.exos import send_requests
-
-import json
-from copy import deepcopy
-
-
-class Lldp_global(ConfigBase):
- """
- The exos_lldp_global class
- """
-
- gather_subset = [
- '!all',
- '!min',
- ]
-
- gather_network_resources = [
- 'lldp_global',
- ]
-
- LLDP_DEFAULT_INTERVAL = 30
- LLDP_DEFAULT_TLV = {
- 'system_name': True,
- 'system_description': True,
- 'system_capabilities': False,
- 'port_description': False,
- 'management_address': False
- }
- LLDP_REQUEST = {
- "data": {"openconfig-lldp:config": {}},
- "method": "PUT",
- "path": "/rest/restconf/data/openconfig-lldp:lldp/config"
- }
-
- def __init__(self, module):
- super(Lldp_global, self).__init__(module)
-
- def get_lldp_global_facts(self):
- """ Get the 'facts' (the current configuration)
-
- :rtype: A dictionary
- :returns: The current configuration as a dictionary
- """
- facts, _warnings = Facts(self._module).get_facts(
- self.gather_subset, self.gather_network_resources)
- lldp_global_facts = facts['ansible_network_resources'].get('lldp_global')
- if not lldp_global_facts:
- return {}
- return lldp_global_facts
-
- def execute_module(self):
- """ Execute the module
-
- :rtype: A dictionary
- :returns: The result from module execution
- """
- result = {'changed': False}
- warnings = list()
- requests = list()
-
- existing_lldp_global_facts = self.get_lldp_global_facts()
- requests.extend(self.set_config(existing_lldp_global_facts))
- if requests:
- if not self._module.check_mode:
- send_requests(self._module, requests)
- result['changed'] = True
- result['requests'] = requests
-
- changed_lldp_global_facts = self.get_lldp_global_facts()
-
- result['before'] = existing_lldp_global_facts
- if result['changed']:
- result['after'] = changed_lldp_global_facts
-
- result['warnings'] = warnings
- return result
-
- def set_config(self, existing_lldp_global_facts):
- """ Collect the configuration from the args passed to the module,
- collect the current configuration (as a dict from facts)
-
- :rtype: A list
- :returns: the requests necessary to migrate the current configuration
- to the desired configuration
- """
- want = self._module.params['config']
- have = existing_lldp_global_facts
- resp = self.set_state(want, have)
- return to_list(resp)
-
- def set_state(self, want, have):
- """ Select the appropriate function based on the state provided
-
- :param want: the desired configuration as a dictionary
- :param have: the current configuration as a dictionary
- :rtype: A list
- :returns: the requests necessary to migrate the current configuration
- to the desired configuration
- """
- state = self._module.params['state']
-
- if state == 'deleted':
- requests = self._state_deleted(want, have)
- elif state == 'merged':
- requests = self._state_merged(want, have)
- elif state == 'replaced':
- requests = self._state_replaced(want, have)
-
- return requests
-
- def _state_replaced(self, want, have):
- """ The request generator when state is replaced
-
- :rtype: A list
- :returns: the requests necessary to migrate the current configuration
- to the desired configuration
- """
- requests = []
- requests.extend(self._state_deleted(want, have))
- requests.extend(self._state_merged(want, have))
- return requests
-
- def _state_merged(self, want, have):
- """ The request generator when state is merged
-
- :rtype: A list
- :returns: the requests necessary to merge the provided into
- the current configuration
- """
- requests = []
-
- request = deepcopy(self.LLDP_REQUEST)
- self._update_lldp_config_body_if_diff(want, have, request)
-
- if len(request["data"]["openconfig-lldp:config"]):
- request["data"] = json.dumps(request["data"])
- requests.append(request)
-
- return requests
-
- def _state_deleted(self, want, have):
- """ The request generator when state is deleted
-
- :rtype: A list
- :returns: the requests necessary to remove the current configuration
- of the provided objects
- """
- requests = []
-
- request = deepcopy(self.LLDP_REQUEST)
- if want:
- self._update_lldp_config_body_if_diff(want, have, request)
- else:
- if self.LLDP_DEFAULT_INTERVAL != have['interval']:
- request["data"]["openconfig-lldp:config"].update(
- {"hello-timer": self.LLDP_DEFAULT_INTERVAL})
-
- if have['tlv_select'] != self.LLDP_DEFAULT_TLV:
- request["data"]["openconfig-lldp:config"].update(
- {"suppress-tlv-advertisement": [key.upper() for key, value in self.LLDP_DEFAULT_TLV.items() if not value]})
- request["data"]["openconfig-lldp:config"]["suppress-tlv-advertisement"].sort()
- if len(request["data"]["openconfig-lldp:config"]):
- request["data"] = json.dumps(request["data"])
- requests.append(request)
-
- return requests
-
- def _update_lldp_config_body_if_diff(self, want, have, request):
- if want.get('interval'):
- if want['interval'] != have['interval']:
- request["data"]["openconfig-lldp:config"].update(
- {"hello-timer": want['interval']})
- if want.get('tlv_select'):
- # Create list of TLVs to be suppressed which aren't already
- want_suppress = [key.upper() for key, value in want["tlv_select"].items() if have["tlv_select"][key] != value and value is False]
- if want_suppress:
- # Add previously suppressed TLVs to the list as we are doing a PUT op
- want_suppress.extend([key.upper() for key, value in have["tlv_select"].items() if value is False])
- request["data"]["openconfig-lldp:config"].update(
- {"suppress-tlv-advertisement": want_suppress})
- request["data"]["openconfig-lldp:config"]["suppress-tlv-advertisement"].sort()
diff --git a/lib/ansible/module_utils/network/exos/config/lldp_interfaces/__init__.py b/lib/ansible/module_utils/network/exos/config/lldp_interfaces/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/config/lldp_interfaces/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/config/lldp_interfaces/lldp_interfaces.py b/lib/ansible/module_utils/network/exos/config/lldp_interfaces/lldp_interfaces.py
deleted file mode 100644
index c04e33405b..0000000000
--- a/lib/ansible/module_utils/network/exos/config/lldp_interfaces/lldp_interfaces.py
+++ /dev/null
@@ -1,243 +0,0 @@
-#
-# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-"""
-The exos_lldp_interfaces class
-It is in this file where the current configuration (as dict)
-is compared to the provided configuration (as dict) and the command set
-necessary to bring the current configuration to it's desired end-state is
-created
-"""
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import json
-from copy import deepcopy
-from ansible.module_utils.network.common.cfg.base import ConfigBase
-from ansible.module_utils.network.common.utils import to_list, dict_diff
-from ansible.module_utils.network.exos.facts.facts import Facts
-from ansible.module_utils.network.exos.exos import send_requests
-
-
-class Lldp_interfaces(ConfigBase):
- """
- The exos_lldp_interfaces class
- """
-
- gather_subset = [
- '!all',
- '!min',
- ]
-
- gather_network_resources = [
- 'lldp_interfaces',
- ]
-
- LLDP_INTERFACE = {
- "data": {
- "openconfig-lldp:config": {
- "name": None,
- "enabled": True
- }
- },
- "method": "PATCH",
- "path": None
- }
-
- LLDP_PATH = "/rest/restconf/data/openconfig-lldp:lldp/interfaces/interface="
-
- def __init__(self, module):
- super(Lldp_interfaces, self).__init__(module)
-
- def get_lldp_interfaces_facts(self):
- """ Get the 'facts' (the current configuration)
-
- :rtype: A dictionary
- :returns: The current configuration as a dictionary
- """
- facts, _warnings = Facts(self._module).get_facts(
- self.gather_subset, self.gather_network_resources)
- lldp_interfaces_facts = facts['ansible_network_resources'].get(
- 'lldp_interfaces')
- if not lldp_interfaces_facts:
- return []
- return lldp_interfaces_facts
-
- def execute_module(self):
- """ Execute the module
-
- :rtype: A dictionary
- :returns: The result from module execution
- """
- result = {'changed': False}
- warnings = list()
- requests = list()
-
- existing_lldp_interfaces_facts = self.get_lldp_interfaces_facts()
- requests.extend(self.set_config(existing_lldp_interfaces_facts))
- if requests:
- if not self._module.check_mode:
- send_requests(self._module, requests=requests)
- result['changed'] = True
- result['requests'] = requests
-
- changed_lldp_interfaces_facts = self.get_lldp_interfaces_facts()
-
- result['before'] = existing_lldp_interfaces_facts
- if result['changed']:
- result['after'] = changed_lldp_interfaces_facts
-
- result['warnings'] = warnings
- return result
-
- def set_config(self, existing_lldp_interfaces_facts):
- """ Collect the configuration from the args passed to the module,
- collect the current configuration (as a dict from facts)
-
- :rtype: A list
- :returns: the requests necessary to migrate the current configuration
- to the desired configuration
- """
- want = self._module.params['config']
- have = existing_lldp_interfaces_facts
- resp = self.set_state(want, have)
- return to_list(resp)
-
- def set_state(self, want, have):
- """ Select the appropriate function based on the state provided
-
- :param want: the desired configuration as a dictionary
- :param have: the current configuration as a dictionary
- :rtype: A list
- :returns: the requests necessary to migrate the current configuration
- to the desired configuration
- """
- state = self._module.params['state']
- if state == 'overridden':
- requests = self._state_overridden(want, have)
- elif state == 'deleted':
- requests = self._state_deleted(want, have)
- elif state == 'merged':
- requests = self._state_merged(want, have)
- elif state == 'replaced':
- requests = self._state_replaced(want, have)
- return requests
-
- def _state_replaced(self, want, have):
- """ The request generator when state is replaced
-
- :rtype: A list
- :returns: the requests necessary to migrate the current configuration
- to the desired configuration
- """
- requests = []
-
- for w in want:
- for h in have:
- if w['name'] == h['name']:
- lldp_request = self._update_patch_request(w, h)
- if lldp_request["path"]:
- lldp_request["data"] = json.dumps(lldp_request["data"])
- requests.append(lldp_request)
-
- return requests
-
- def _state_overridden(self, want, have):
- """ The request generator when state is overridden
-
- :rtype: A list
- :returns: the requests necessary to migrate the current configuration
- to the desired configuration
- """
- requests = []
- have_copy = []
- for w in want:
- for h in have:
- if w['name'] == h['name']:
- lldp_request = self._update_patch_request(w, h)
- if lldp_request["path"]:
- lldp_request["data"] = json.dumps(lldp_request["data"])
- requests.append(lldp_request)
- have_copy.append(h)
-
- for h in have:
- if h not in have_copy:
- if not h['enabled']:
- lldp_delete = self._update_delete_request(h)
- if lldp_delete["path"]:
- lldp_delete["data"] = json.dumps(lldp_delete["data"])
- requests.append(lldp_delete)
-
- return requests
-
- def _state_merged(self, want, have):
- """ The request generator when state is merged
-
- :rtype: A list
- :returns: the requests necessary to merge the provided into
- the current configuration
- """
- requests = []
- for w in want:
- for h in have:
- if w['name'] == h['name']:
- lldp_request = self._update_patch_request(w, h)
- if lldp_request["path"]:
- lldp_request["data"] = json.dumps(lldp_request["data"])
- requests.append(lldp_request)
-
- return requests
-
- def _state_deleted(self, want, have):
- """ The request generator when state is deleted
-
- :rtype: A list
- :returns: the requests necessary to remove the current configuration
- of the provided objects
- """
- requests = []
- if want:
- for w in want:
- for h in have:
- if w['name'] == h['name']:
- if not h['enabled']:
- lldp_delete = self._update_delete_request(h)
- if lldp_delete["path"]:
- lldp_delete["data"] = json.dumps(
- lldp_delete["data"])
- requests.append(lldp_delete)
- else:
- for h in have:
- if not h['enabled']:
- lldp_delete = self._update_delete_request(h)
- if lldp_delete["path"]:
- lldp_delete["data"] = json.dumps(lldp_delete["data"])
- requests.append(lldp_delete)
-
- return requests
-
- def _update_patch_request(self, want, have):
-
- lldp_request = deepcopy(self.LLDP_INTERFACE)
-
- if have['enabled'] != want['enabled']:
- lldp_request["data"]["openconfig-lldp:config"]["name"] = want[
- 'name']
- lldp_request["data"]["openconfig-lldp:config"]["enabled"] = want[
- 'enabled']
- lldp_request["path"] = self.LLDP_PATH + str(
- want['name']) + "/config"
-
- return lldp_request
-
- def _update_delete_request(self, have):
-
- lldp_delete = deepcopy(self.LLDP_INTERFACE)
-
- lldp_delete["data"]["openconfig-lldp:config"]["name"] = have['name']
- lldp_delete["data"]["openconfig-lldp:config"]["enabled"] = True
- lldp_delete["path"] = self.LLDP_PATH + str(have['name']) + "/config"
-
- return lldp_delete
diff --git a/lib/ansible/module_utils/network/exos/config/vlans/__init__.py b/lib/ansible/module_utils/network/exos/config/vlans/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/config/vlans/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/config/vlans/vlans.py b/lib/ansible/module_utils/network/exos/config/vlans/vlans.py
deleted file mode 100644
index efe615a19c..0000000000
--- a/lib/ansible/module_utils/network/exos/config/vlans/vlans.py
+++ /dev/null
@@ -1,277 +0,0 @@
-#
-# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-"""
-The exos_vlans class
-It is in this file where the current configuration (as dict)
-is compared to the provided configuration (as dict) and the command set
-necessary to bring the current configuration to it's desired end-state is
-created
-"""
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import json
-from copy import deepcopy
-from ansible.module_utils.network.common.cfg.base import ConfigBase
-from ansible.module_utils.network.common.utils import to_list, dict_diff
-from ansible.module_utils.network.exos.facts.facts import Facts
-from ansible.module_utils.network.exos.exos import send_requests
-from ansible.module_utils.network.exos.utils.utils import search_obj_in_list
-
-
-class Vlans(ConfigBase):
- """
- The exos_vlans class
- """
-
- gather_subset = [
- '!all',
- '!min',
- ]
-
- gather_network_resources = [
- 'vlans',
- ]
-
- VLAN_POST = {
- "data": {"openconfig-vlan:vlans": []},
- "method": "POST",
- "path": "/rest/restconf/data/openconfig-vlan:vlans/"
- }
-
- VLAN_PATCH = {
- "data": {"openconfig-vlan:vlans": {"vlan": []}},
- "method": "PATCH",
- "path": "/rest/restconf/data/openconfig-vlan:vlans/"
- }
-
- VLAN_DELETE = {
- "method": "DELETE",
- "path": None
- }
-
- DEL_PATH = "/rest/restconf/data/openconfig-vlan:vlans/vlan="
-
- REQUEST_BODY = {
- "config": {"name": None, "status": "ACTIVE", "tpid": "oc-vlan-types:TPID_0x8100", "vlan-id": None}
- }
-
- def __init__(self, module):
- super(Vlans, self).__init__(module)
-
- def get_vlans_facts(self):
- """ Get the 'facts' (the current configuration)
-
- :rtype: A dictionary
- :returns: The current configuration as a dictionary
- """
- facts, _warnings = Facts(self._module).get_facts(
- self.gather_subset, self.gather_network_resources)
- vlans_facts = facts['ansible_network_resources'].get('vlans')
- if not vlans_facts:
- return []
- return vlans_facts
-
- def execute_module(self):
- """ Execute the module
-
- :rtype: A dictionary
- :returns: The result from module execution
- """
- result = {'changed': False}
- warnings = list()
- requests = list()
-
- existing_vlans_facts = self.get_vlans_facts()
- requests.extend(self.set_config(existing_vlans_facts))
- if requests:
- if not self._module.check_mode:
- send_requests(self._module, requests=requests)
- result['changed'] = True
- result['requests'] = requests
-
- changed_vlans_facts = self.get_vlans_facts()
-
- result['before'] = existing_vlans_facts
- if result['changed']:
- result['after'] = changed_vlans_facts
-
- result['warnings'] = warnings
- return result
-
- def set_config(self, existing_vlans_facts):
- """ Collect the configuration from the args passed to the module,
- collect the current configuration (as a dict from facts)
-
- :rtype: A list
- :returns: the requests necessary to migrate the current configuration
- to the desired configuration
- """
- want = self._module.params['config']
- have = existing_vlans_facts
- resp = self.set_state(want, have)
- return to_list(resp)
-
- def set_state(self, want, have):
- """ Select the appropriate function based on the state provided
-
- :param want: the desired configuration as a dictionary
- :param have: the current configuration as a dictionary
- :rtype: A list
- :returns: the requests necessary to migrate the current configuration
- to the desired configuration
- """
- state = self._module.params['state']
- if state == 'overridden':
- requests = self._state_overridden(want, have)
- elif state == 'deleted':
- requests = self._state_deleted(want, have)
- elif state == 'merged':
- requests = self._state_merged(want, have)
- elif state == 'replaced':
- requests = self._state_replaced(want, have)
- return requests
-
- def _state_replaced(self, want, have):
- """ The request generator when state is replaced
-
- :rtype: A list
- :returns: the requests necessary to migrate the current configuration
- to the desired configuration
- """
- requests = []
- request_patch = deepcopy(self.VLAN_PATCH)
-
- for w in want:
- if w.get('vlan_id'):
- h = search_obj_in_list(w['vlan_id'], have, 'vlan_id')
- if h:
- if dict_diff(w, h):
- request_body = self._update_patch_request(w)
- request_patch["data"]["openconfig-vlan:vlans"]["vlan"].append(request_body)
- else:
- request_post = self._update_post_request(w)
- requests.append(request_post)
-
- if len(request_patch["data"]["openconfig-vlan:vlans"]["vlan"]):
- request_patch["data"] = json.dumps(request_patch["data"])
- requests.append(request_patch)
-
- return requests
-
- def _state_overridden(self, want, have):
- """ The request generator when state is overridden
-
- :rtype: A list
- :returns: the requests necessary to migrate the current configuration
- to the desired configuration
- """
- requests = []
- request_patch = deepcopy(self.VLAN_PATCH)
-
- have_copy = []
- for w in want:
- if w.get('vlan_id'):
- h = search_obj_in_list(w['vlan_id'], have, 'vlan_id')
- if h:
- if dict_diff(w, h):
- request_body = self._update_patch_request(w)
- request_patch["data"]["openconfig-vlan:vlans"]["vlan"].append(request_body)
- have_copy.append(h)
- else:
- request_post = self._update_post_request(w)
- requests.append(request_post)
-
- for h in have:
- if h not in have_copy and h['vlan_id'] != 1:
- request_delete = self._update_delete_request(h)
- requests.append(request_delete)
-
- if len(request_patch["data"]["openconfig-vlan:vlans"]["vlan"]):
- request_patch["data"] = json.dumps(request_patch["data"])
- requests.append(request_patch)
-
- return requests
-
- def _state_merged(self, want, have):
- """ The requests generator when state is merged
-
- :rtype: A list
- :returns: the requests necessary to merge the provided into
- the current configuration
- """
- requests = []
-
- request_patch = deepcopy(self.VLAN_PATCH)
-
- for w in want:
- if w.get('vlan_id'):
- h = search_obj_in_list(w['vlan_id'], have, 'vlan_id')
- if h:
- if dict_diff(w, h):
- request_body = self._update_patch_request(w)
- request_patch["data"]["openconfig-vlan:vlans"]["vlan"].append(request_body)
- else:
- request_post = self._update_post_request(w)
- requests.append(request_post)
-
- if len(request_patch["data"]["openconfig-vlan:vlans"]["vlan"]):
- request_patch["data"] = json.dumps(request_patch["data"])
- requests.append(request_patch)
- return requests
-
- def _state_deleted(self, want, have):
- """ The requests generator when state is deleted
-
- :rtype: A list
- :returns: the requests necessary to remove the current configuration
- of the provided objects
- """
- requests = []
-
- if want:
- for w in want:
- if w.get('vlan_id'):
- h = search_obj_in_list(w['vlan_id'], have, 'vlan_id')
- if h:
- request_delete = self._update_delete_request(h)
- requests.append(request_delete)
-
- else:
- if not have:
- return requests
- for h in have:
- if h['vlan_id'] == 1:
- continue
- else:
- request_delete = self._update_delete_request(h)
- requests.append(request_delete)
-
- return requests
-
- def _update_vlan_config_body(self, want, request):
- request["config"]["name"] = want["name"]
- request["config"]["status"] = "SUSPENDED" if want["state"] == "suspend" else want["state"].upper()
- request["config"]["vlan-id"] = want["vlan_id"]
- return request
-
- def _update_patch_request(self, want):
- request_body = deepcopy(self.REQUEST_BODY)
- request_body = self._update_vlan_config_body(want, request_body)
- return request_body
-
- def _update_post_request(self, want):
- request_post = deepcopy(self.VLAN_POST)
- request_body = deepcopy(self.REQUEST_BODY)
- request_body = self._update_vlan_config_body(want, request_body)
- request_post["data"]["openconfig-vlan:vlans"].append(request_body)
- request_post["data"] = json.dumps(request_post["data"])
- return request_post
-
- def _update_delete_request(self, have):
- request_delete = deepcopy(self.VLAN_DELETE)
- request_delete["path"] = self.DEL_PATH + str(have['vlan_id'])
- return request_delete
diff --git a/lib/ansible/module_utils/network/exos/exos.py b/lib/ansible/module_utils/network/exos/exos.py
deleted file mode 100644
index 7c5c8a8b18..0000000000
--- a/lib/ansible/module_utils/network/exos/exos.py
+++ /dev/null
@@ -1,219 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# (c) 2016 Red Hat Inc.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-import json
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.network.common.utils import to_list, ComplexList
-from ansible.module_utils.common._collections_compat import Mapping
-from ansible.module_utils.connection import Connection, ConnectionError
-
-_DEVICE_CONNECTION = None
-
-
-class Cli:
- def __init__(self, module):
- self._module = module
- self._device_configs = {}
- self._connection = None
-
- def get_capabilities(self):
- """Returns platform info of the remove device
- """
- connection = self._get_connection()
- return json.loads(connection.get_capabilities())
-
- def _get_connection(self):
- if not self._connection:
- self._connection = Connection(self._module._socket_path)
- return self._connection
-
- def get_config(self, flags=None):
- """Retrieves the current config from the device or cache
- """
- flags = [] if flags is None else flags
- if self._device_configs == {}:
- connection = self._get_connection()
- try:
- out = connection.get_config(flags=flags)
- except ConnectionError as exc:
- self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- self._device_configs = to_text(out, errors='surrogate_then_replace').strip()
- return self._device_configs
-
- def run_commands(self, commands, check_rc=True):
- """Runs list of commands on remote device and returns results
- """
- connection = self._get_connection()
- try:
- response = connection.run_commands(commands=commands, check_rc=check_rc)
- except ConnectionError as exc:
- self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- return response
-
- def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
- conn = self._get_connection()
- try:
- diff = conn.get_diff(candidate=candidate, running=running, diff_match=diff_match,
- diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
- except ConnectionError as exc:
- self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- return diff
-
-
-class HttpApi:
- def __init__(self, module):
- self._module = module
- self._device_configs = {}
- self._connection_obj = None
-
- def get_capabilities(self):
- """Returns platform info of the remove device
- """
- try:
- capabilities = self._connection.get_capabilities()
- except ConnectionError as exc:
- self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
-
- return json.loads(capabilities)
-
- @property
- def _connection(self):
- if not self._connection_obj:
- self._connection_obj = Connection(self._module._socket_path)
- return self._connection_obj
-
- def get_config(self, flags=None):
- """Retrieves the current config from the device or cache
- """
- flags = [] if flags is None else flags
- if self._device_configs == {}:
- try:
- out = self._connection.get_config(flags=flags)
- except ConnectionError as exc:
- self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- self._device_configs = to_text(out, errors='surrogate_then_replace').strip()
- return self._device_configs
-
- def run_commands(self, commands, check_rc=True):
- """Runs list of commands on remote device and returns results
- """
- try:
- response = self._connection.run_commands(commands=commands, check_rc=check_rc)
- except ConnectionError as exc:
- self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- return response
-
- def send_requests(self, requests):
- """Send a list of http requests to remote device and return results
- """
- if requests is None:
- raise ValueError("'requests' value is required")
-
- responses = list()
- for req in to_list(requests):
- try:
- response = self._connection.send_request(**req)
- except ConnectionError as exc:
- self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- responses.append(response)
- return responses
-
- def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
- try:
- diff = self._connection.get_diff(candidate=candidate, running=running, diff_match=diff_match,
- diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
- except ConnectionError as exc:
- self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- return diff
-
-
-def get_capabilities(module):
- conn = get_connection(module)
- return conn.get_capabilities()
-
-
-def get_connection(module):
- global _DEVICE_CONNECTION
- if not _DEVICE_CONNECTION:
- connection_proxy = Connection(module._socket_path)
- cap = json.loads(connection_proxy.get_capabilities())
- if cap['network_api'] == 'cliconf':
- conn = Cli(module)
- elif cap['network_api'] == 'exosapi':
- conn = HttpApi(module)
- else:
- module.fail_json(msg='Invalid connection type %s' % cap['network_api'])
- _DEVICE_CONNECTION = conn
- return _DEVICE_CONNECTION
-
-
-def get_config(module, flags=None):
- flags = None if flags is None else flags
- conn = get_connection(module)
- return conn.get_config(flags)
-
-
-def load_config(module, commands):
- conn = get_connection(module)
- return conn.run_commands(to_command(module, commands))
-
-
-def run_commands(module, commands, check_rc=True):
- conn = get_connection(module)
- return conn.run_commands(to_command(module, commands), check_rc=check_rc)
-
-
-def to_command(module, commands):
- transform = ComplexList(dict(
- command=dict(key=True),
- output=dict(default='text'),
- prompt=dict(type='list'),
- answer=dict(type='list'),
- sendonly=dict(type='bool', default=False),
- check_all=dict(type='bool', default=False),
- ), module)
- return transform(to_list(commands))
-
-
-def send_requests(module, requests):
- conn = get_connection(module)
- return conn.send_requests(to_request(module, requests))
-
-
-def to_request(module, requests):
- transform = ComplexList(dict(
- path=dict(key=True),
- method=dict(),
- data=dict(type='dict'),
- ), module)
- return transform(to_list(requests))
-
-
-def get_diff(module, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
- conn = get_connection(module)
- return conn.get_diff(candidate=candidate, running=running, diff_match=diff_match, diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
diff --git a/lib/ansible/module_utils/network/exos/facts/__init__.py b/lib/ansible/module_utils/network/exos/facts/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/facts/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/facts/facts.py b/lib/ansible/module_utils/network/exos/facts/facts.py
deleted file mode 100644
index 6c932b7601..0000000000
--- a/lib/ansible/module_utils/network/exos/facts/facts.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-"""
-The facts class for exos
-this file validates each subset of facts and selectively
-calls the appropriate facts gathering function
-"""
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-from ansible.module_utils.network.exos.argspec.facts.facts import FactsArgs
-from ansible.module_utils.network.common.facts.facts import FactsBase
-from ansible.module_utils.network.exos.facts.lldp_global.lldp_global import Lldp_globalFacts
-from ansible.module_utils.network.exos.facts.vlans.vlans import VlansFacts
-from ansible.module_utils.network.exos.facts.legacy.base import Default, Hardware, Interfaces, Config
-from ansible.module_utils.network.exos.facts.lldp_interfaces.lldp_interfaces import Lldp_interfacesFacts
-from ansible.module_utils.network.exos.facts.l2_interfaces.l2_interfaces import L2_interfacesFacts
-
-FACT_LEGACY_SUBSETS = dict(
- default=Default,
- hardware=Hardware,
- interfaces=Interfaces,
- config=Config)
-
-FACT_RESOURCE_SUBSETS = dict(
- lldp_global=Lldp_globalFacts,
- vlans=VlansFacts,
- lldp_interfaces=Lldp_interfacesFacts,
- l2_interfaces=L2_interfacesFacts,
-)
-
-
-class Facts(FactsBase):
- """ The fact class for exos
- """
-
- VALID_LEGACY_GATHER_SUBSETS = frozenset(FACT_LEGACY_SUBSETS.keys())
- VALID_RESOURCE_SUBSETS = frozenset(FACT_RESOURCE_SUBSETS.keys())
-
- def __init__(self, module):
- super(Facts, self).__init__(module)
-
- def get_facts(self, legacy_facts_type=None, resource_facts_type=None, data=None):
- """ Collect the facts for exos
-
- :param legacy_facts_type: List of legacy facts types
- :param resource_facts_type: List of resource fact types
- :param data: previously collected conf
- :rtype: dict
- :return: the facts gathered
- """
- if self.VALID_RESOURCE_SUBSETS:
- self.get_network_resources_facts(FACT_RESOURCE_SUBSETS, resource_facts_type, data)
-
- if self.VALID_LEGACY_GATHER_SUBSETS:
- self.get_network_legacy_facts(FACT_LEGACY_SUBSETS, legacy_facts_type)
-
- return self.ansible_facts, self._warnings
diff --git a/lib/ansible/module_utils/network/exos/facts/l2_interfaces/__init__.py b/lib/ansible/module_utils/network/exos/facts/l2_interfaces/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/facts/l2_interfaces/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/facts/l2_interfaces/l2_interfaces.py b/lib/ansible/module_utils/network/exos/facts/l2_interfaces/l2_interfaces.py
deleted file mode 100644
index 84e4b6fadc..0000000000
--- a/lib/ansible/module_utils/network/exos/facts/l2_interfaces/l2_interfaces.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#
-# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-"""
-The exos l2_interfaces fact class
-It is in this file the configuration is collected from the device
-for a given resource, parsed, and the facts tree is populated
-based on the configuration.
-"""
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import re
-from copy import deepcopy
-
-from ansible.module_utils.network.common import utils
-from ansible.module_utils.network.exos.argspec.l2_interfaces.l2_interfaces import L2_interfacesArgs
-from ansible.module_utils.network.exos.exos import send_requests
-
-
-class L2_interfacesFacts(object):
- """ The exos l2_interfaces fact class
- """
- def __init__(self, module, subspec='config', options='options'):
- self._module = module
- self.argument_spec = L2_interfacesArgs.argument_spec
- spec = deepcopy(self.argument_spec)
- if subspec:
- if options:
- facts_argument_spec = spec[subspec][options]
- else:
- facts_argument_spec = spec[subspec]
- else:
- facts_argument_spec = spec
-
- self.generated_spec = utils.generate_dict(facts_argument_spec)
-
- def populate_facts(self, connection, ansible_facts, data=None):
- """ Populate the facts for l2_interfaces
- :param connection: the device connection
- :param ansible_facts: Facts dictionary
- :param data: previously collected conf
- :rtype: dictionary
- :returns: facts
- """
-
- if not data:
- request = [{
- "path": "/rest/restconf/data/openconfig-interfaces:interfaces",
- "method": "GET"
- }]
- data = send_requests(self._module, requests=request)
-
- objs = []
- if data:
- for d in data[0]["openconfig-interfaces:interfaces"]["interface"]:
- obj = self.render_config(self.generated_spec, d)
- if obj:
- objs.append(obj)
-
- ansible_facts['ansible_network_resources'].pop('l2_interfaces', None)
- facts = {}
- if objs:
- params = utils.validate_config(self.argument_spec, {'config': objs})
- facts['l2_interfaces'] = params['config']
-
- ansible_facts['ansible_network_resources'].update(facts)
- return ansible_facts
-
- def render_config(self, spec, conf):
- """
- Render config as dictionary structure and delete keys
- from spec for null values
-
- :param spec: The facts tree, generated from the argspec
- :param conf: The configuration
- :rtype: dictionary
- :returns: The generated config
- """
- config = deepcopy(spec)
- if conf["config"]["type"] == "ethernetCsmacd":
- conf_dict = conf["openconfig-if-ethernet:ethernet"]["openconfig-vlan:switched-vlan"]["config"]
- config["name"] = conf["name"]
- if conf_dict["interface-mode"] == "ACCESS":
- config["access"]["vlan"] = conf_dict.get("access-vlan")
- else:
- if 'native-vlan' in conf_dict:
- config["trunk"]["native_vlan"] = conf_dict.get("native-vlan")
- config["trunk"]["trunk_allowed_vlans"] = conf_dict.get("trunk-vlans")
- return utils.remove_empties(config)
diff --git a/lib/ansible/module_utils/network/exos/facts/legacy/__init__.py b/lib/ansible/module_utils/network/exos/facts/legacy/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/facts/legacy/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/facts/legacy/base.py b/lib/ansible/module_utils/network/exos/facts/legacy/base.py
deleted file mode 100644
index 886fc86314..0000000000
--- a/lib/ansible/module_utils/network/exos/facts/legacy/base.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-"""
-The exos legacy fact class
-It is in this file the configuration is collected from the device
-for a given resource, parsed, and the facts tree is populated
-based on the configuration.
-"""
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-import re
-import json
-
-from ansible.module_utils.network.exos.exos import run_commands
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six import iteritems
-
-
-class FactsBase(object):
-
- COMMANDS = list()
-
- def __init__(self, module):
- self.module = module
- self.facts = dict()
- self.warnings = list()
- self.responses = None
-
- def populate(self):
- self.responses = run_commands(self.module, self.COMMANDS)
-
- def run(self, cmd):
- return run_commands(self.module, cmd)
-
-
-class Default(FactsBase):
-
- COMMANDS = [
- 'show version',
- 'show switch'
- ]
-
- def populate(self):
- super(Default, self).populate()
- data = self.responses[0]
- if data:
- self.facts['version'] = self.parse_version(data)
- self.facts['serialnum'] = self.parse_serialnum(data)
-
- data = self.responses[1]
- if data:
- self.facts['model'] = self.parse_model(data)
- self.facts['hostname'] = self.parse_hostname(data)
-
- def parse_version(self, data):
- match = re.search(r'Image\s+: ExtremeXOS version (\S+)', data)
- if match:
- return match.group(1)
-
- def parse_model(self, data):
- match = re.search(r'System Type:\s+(.*$)', data, re.M)
- if match:
- return match.group(1)
-
- def parse_hostname(self, data):
- match = re.search(r'SysName:\s+(\S+)', data, re.M)
- if match:
- return match.group(1)
-
- def parse_serialnum(self, data):
- match = re.search(r'Switch\s+: \S+ (\S+)', data, re.M)
- if match:
- return match.group(1)
- # For stack, return serial number of the first switch in the stack.
- match = re.search(r'Slot-\d+\s+: \S+ (\S+)', data, re.M)
- if match:
- return match.group(1)
- # Handle unique formatting for VM
- match = re.search(r'Switch\s+: PN:\S+\s+SN:(\S+)', data, re.M)
- if match:
- return match.group(1)
-
-
-class Hardware(FactsBase):
-
- COMMANDS = [
- 'show memory'
- ]
-
- def populate(self):
- super(Hardware, self).populate()
- data = self.responses[0]
- if data:
- self.facts['memtotal_mb'] = int(round(int(self.parse_memtotal(data)) / 1024, 0))
- self.facts['memfree_mb'] = int(round(int(self.parse_memfree(data)) / 1024, 0))
-
- def parse_memtotal(self, data):
- match = re.search(r' Total DRAM \(KB\): (\d+)', data, re.M)
- if match:
- return match.group(1)
- # Handle unique formatting for VM
- match = re.search(r' Total \s+\(KB\): (\d+)', data, re.M)
- if match:
- return match.group(1)
-
- def parse_memfree(self, data):
- match = re.search(r' Free\s+\(KB\): (\d+)', data, re.M)
- if match:
- return match.group(1)
-
-
-class Config(FactsBase):
-
- COMMANDS = ['show configuration detail']
-
- def populate(self):
- super(Config, self).populate()
- data = self.responses[0]
- if data:
- self.facts['config'] = data
-
-
-class Interfaces(FactsBase):
-
- COMMANDS = [
- 'show switch',
- {'command': 'show port config', 'output': 'json'},
- {'command': 'show port description', 'output': 'json'},
- {'command': 'show vlan detail', 'output': 'json'},
- {'command': 'show lldp neighbors', 'output': 'json'}
- ]
-
- def populate(self):
- super(Interfaces, self).populate()
-
- self.facts['all_ipv4_addresses'] = list()
- self.facts['all_ipv6_addresses'] = list()
-
- data = self.responses[0]
- if data:
- sysmac = self.parse_sysmac(data)
-
- data = self.responses[1]
- if data:
- self.facts['interfaces'] = self.populate_interfaces(data, sysmac)
-
- data = self.responses[2]
- if data:
- self.populate_interface_descriptions(data)
-
- data = self.responses[3]
- if data:
- self.populate_vlan_interfaces(data, sysmac)
-
- data = self.responses[4]
- if data:
- self.facts['neighbors'] = self.parse_neighbors(data)
-
- def parse_sysmac(self, data):
- match = re.search(r'System MAC:\s+(\S+)', data, re.M)
- if match:
- return match.group(1)
-
- def populate_interfaces(self, interfaces, sysmac):
- facts = dict()
- for elem in interfaces:
- intf = dict()
-
- if 'show_ports_config' not in elem:
- continue
-
- key = str(elem['show_ports_config']['port'])
-
- if elem['show_ports_config']['linkState'] == 2:
- # Link state is "not present", don't include
- continue
-
- intf['type'] = 'Ethernet'
- intf['macaddress'] = sysmac
- intf['bandwidth_configured'] = str(elem['show_ports_config']['speedCfg'])
- intf['bandwidth'] = str(elem['show_ports_config']['speedActual'])
- intf['duplex_configured'] = elem['show_ports_config']['duplexCfg']
- intf['duplex'] = elem['show_ports_config']['duplexActual']
- if elem['show_ports_config']['linkState'] == 1:
- intf['lineprotocol'] = 'up'
- else:
- intf['lineprotocol'] = 'down'
- if elem['show_ports_config']['portState'] == 1:
- intf['operstatus'] = 'up'
- else:
- intf['operstatus'] = 'admin down'
-
- facts[key] = intf
- return facts
-
- def populate_interface_descriptions(self, data):
- for elem in data:
- if 'show_ports_description' not in elem:
- continue
- key = str(elem['show_ports_description']['port'])
-
- if 'descriptionString' in elem['show_ports_description']:
- desc = elem['show_ports_description']['descriptionString']
- self.facts['interfaces'][key]['description'] = desc
-
- def populate_vlan_interfaces(self, data, sysmac):
- for elem in data:
- if 'vlanProc' in elem:
- key = elem['vlanProc']['name1']
- if key not in self.facts['interfaces']:
- intf = dict()
- intf['type'] = 'VLAN'
- intf['macaddress'] = sysmac
- self.facts['interfaces'][key] = intf
-
- if elem['vlanProc']['ipAddress'] != '0.0.0.0':
- self.facts['interfaces'][key]['ipv4'] = list()
- addr = elem['vlanProc']['ipAddress']
- subnet = elem['vlanProc']['maskForDisplay']
- ipv4 = dict(address=addr, subnet=subnet)
- self.add_ip_address(addr, 'ipv4')
- self.facts['interfaces'][key]['ipv4'].append(ipv4)
-
- if 'rtifIpv6Address' in elem:
- key = elem['rtifIpv6Address']['rtifName']
- if key not in self.facts['interfaces']:
- intf = dict()
- intf['type'] = 'VLAN'
- intf['macaddress'] = sysmac
- self.facts['interfaces'][key] = intf
- self.facts['interfaces'][key]['ipv6'] = list()
- addr, subnet = elem['rtifIpv6Address']['ipv6_address_mask'].split('/')
- ipv6 = dict(address=addr, subnet=subnet)
- self.add_ip_address(addr, 'ipv6')
- self.facts['interfaces'][key]['ipv6'].append(ipv6)
-
- def add_ip_address(self, address, family):
- if family == 'ipv4':
- if address not in self.facts['all_ipv4_addresses']:
- self.facts['all_ipv4_addresses'].append(address)
- else:
- if address not in self.facts['all_ipv6_addresses']:
- self.facts['all_ipv6_addresses'].append(address)
-
- def parse_neighbors(self, data):
- facts = dict()
- for elem in data:
- if 'lldpPortNbrInfoShort' not in elem:
- continue
- intf = str(elem['lldpPortNbrInfoShort']['port'])
- if intf not in facts:
- facts[intf] = list()
- fact = dict()
- fact['host'] = elem['lldpPortNbrInfoShort']['nbrSysName']
- fact['port'] = str(elem['lldpPortNbrInfoShort']['nbrPortID'])
- facts[intf].append(fact)
- return facts
diff --git a/lib/ansible/module_utils/network/exos/facts/lldp_global/__init__.py b/lib/ansible/module_utils/network/exos/facts/lldp_global/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/facts/lldp_global/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/facts/lldp_global/lldp_global.py b/lib/ansible/module_utils/network/exos/facts/lldp_global/lldp_global.py
deleted file mode 100644
index c6dad453dd..0000000000
--- a/lib/ansible/module_utils/network/exos/facts/lldp_global/lldp_global.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#
-# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-"""
-The exos lldp_global fact class
-It is in this file the configuration is collected from the device
-for a given resource, parsed, and the facts tree is populated
-based on the configuration.
-"""
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import re
-from copy import deepcopy
-
-from ansible.module_utils.network.common import utils
-from ansible.module_utils.network.exos.argspec.lldp_global.lldp_global \
- import Lldp_globalArgs
-from ansible.module_utils.network.exos.exos import send_requests
-
-
-class Lldp_globalFacts(object):
- """ The exos lldp_global fact class
- """
-
- TLV_SELECT_OPTIONS = [
- "SYSTEM_NAME",
- "SYSTEM_DESCRIPTION",
- "SYSTEM_CAPABILITIES",
- "MANAGEMENT_ADDRESS",
- "PORT_DESCRIPTION"]
-
- def __init__(self, module, subspec='config', options='options'):
- self._module = module
- self.argument_spec = Lldp_globalArgs.argument_spec
- spec = deepcopy(self.argument_spec)
- if subspec:
- if options:
- facts_argument_spec = spec[subspec][options]
- else:
- facts_argument_spec = spec[subspec]
- else:
- facts_argument_spec = spec
-
- self.generated_spec = utils.generate_dict(facts_argument_spec)
-
- def populate_facts(self, connection, ansible_facts, data=None):
- """ Populate the facts for lldp_global
- :param connection: the device connection
- :param ansible_facts: Facts dictionary
- :param data: previously collected conf
- :rtype: dictionary
- :returns: facts
- """
- if not data:
- request = {
- "path": "/rest/restconf/data/openconfig-lldp:lldp/config/",
- "method": "GET",
- }
- data = send_requests(self._module, request)
-
- obj = {}
- if data:
- lldp_obj = self.render_config(self.generated_spec, data[0])
- if lldp_obj:
- obj = lldp_obj
-
- ansible_facts['ansible_network_resources'].pop('lldp_global', None)
- facts = {}
-
- params = utils.validate_config(self.argument_spec, {'config': obj})
- facts['lldp_global'] = params['config']
-
- ansible_facts['ansible_network_resources'].update(facts)
- return ansible_facts
-
- def render_config(self, spec, conf):
- """
- Render config as dictionary structure and delete keys
- from spec for null values
-
- :param spec: The facts tree, generated from the argspec
- :param conf: The configuration
- :rtype: dictionary
- :returns: The generated config
- """
- config = deepcopy(spec)
- config['interval'] = conf["openconfig-lldp:config"]["hello-timer"]
-
- for item in self.TLV_SELECT_OPTIONS:
- config["tlv_select"][item.lower()] = (
- False if (item in conf["openconfig-lldp:config"]["suppress-tlv-advertisement"])
- else True)
-
- return utils.remove_empties(config)
diff --git a/lib/ansible/module_utils/network/exos/facts/lldp_interfaces/__init__.py b/lib/ansible/module_utils/network/exos/facts/lldp_interfaces/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/facts/lldp_interfaces/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/facts/lldp_interfaces/lldp_interfaces.py b/lib/ansible/module_utils/network/exos/facts/lldp_interfaces/lldp_interfaces.py
deleted file mode 100644
index 702070a201..0000000000
--- a/lib/ansible/module_utils/network/exos/facts/lldp_interfaces/lldp_interfaces.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-"""
-The exos lldp_interfaces fact class
-It is in this file the configuration is collected from the device
-for a given resource, parsed, and the facts tree is populated
-based on the configuration.
-"""
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import re
-from copy import deepcopy
-
-from ansible.module_utils.network.common import utils
-from ansible.module_utils.network.exos.argspec.lldp_interfaces.lldp_interfaces import Lldp_interfacesArgs
-from ansible.module_utils.network.exos.exos import send_requests
-
-
-class Lldp_interfacesFacts(object):
- """ The exos lldp_interfaces fact class
- """
-
- def __init__(self, module, subspec='config', options='options'):
- self._module = module
- self.argument_spec = Lldp_interfacesArgs.argument_spec
- spec = deepcopy(self.argument_spec)
- if subspec:
- if options:
- facts_argument_spec = spec[subspec][options]
- else:
- facts_argument_spec = spec[subspec]
- else:
- facts_argument_spec = spec
-
- self.generated_spec = utils.generate_dict(facts_argument_spec)
-
- def populate_facts(self, connection, ansible_facts, data=None):
- """ Populate the facts for lldp_interfaces
- :param connection: the device connection
- :param ansible_facts: Facts dictionary
- :param data: previously collected conf
- :rtype: dictionary
- :returns: facts
- """
-
- if not data:
- request = [{
- "path": "/rest/restconf/data/openconfig-lldp:lldp/interfaces?depth=4",
- "method": "GET"
- }]
- data = send_requests(self._module, requests=request)
-
- objs = []
- if data:
- for d in data[0]["openconfig-lldp:interfaces"]["interface"]:
- obj = self.render_config(self.generated_spec, d["config"])
- if obj:
- objs.append(obj)
-
- ansible_facts['ansible_network_resources'].pop('lldp_interfaces', None)
- facts = {}
- if objs:
- params = utils.validate_config(self.argument_spec, {'config': objs})
- facts['lldp_interfaces'] = params['config']
-
- ansible_facts['ansible_network_resources'].update(facts)
- return ansible_facts
-
- def render_config(self, spec, conf):
- """
- Render config as dictionary structure and delete keys
- from spec for null values
-
- :param spec: The facts tree, generated from the argspec
- :param conf: The configuration
- :rtype: dictionary
- :returns: The generated config
- """
- config = deepcopy(spec)
-
- config["name"] = conf["name"]
- config["enabled"] = bool(conf["enabled"])
-
- return utils.remove_empties(config)
diff --git a/lib/ansible/module_utils/network/exos/facts/vlans/__init__.py b/lib/ansible/module_utils/network/exos/facts/vlans/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/facts/vlans/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/facts/vlans/vlans.py b/lib/ansible/module_utils/network/exos/facts/vlans/vlans.py
deleted file mode 100644
index 55f211f732..0000000000
--- a/lib/ansible/module_utils/network/exos/facts/vlans/vlans.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#
-# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-"""
-The exos vlans fact class
-It is in this file the configuration is collected from the device
-for a given resource, parsed, and the facts tree is populated
-based on the configuration.
-"""
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import re
-from copy import deepcopy
-
-from ansible.module_utils.network.common import utils
-from ansible.module_utils.network.exos.argspec.vlans.vlans import VlansArgs
-from ansible.module_utils.network.exos.exos import send_requests
-
-
-class VlansFacts(object):
- """ The exos vlans fact class
- """
-
- def __init__(self, module, subspec='config', options='options'):
- self._module = module
- self.argument_spec = VlansArgs.argument_spec
- spec = deepcopy(self.argument_spec)
- if subspec:
- if options:
- facts_argument_spec = spec[subspec][options]
- else:
- facts_argument_spec = spec[subspec]
- else:
- facts_argument_spec = spec
-
- self.generated_spec = utils.generate_dict(facts_argument_spec)
-
- def populate_facts(self, connection, ansible_facts, data=None):
- """ Populate the facts for vlans
- :param connection: the device connection
- :param ansible_facts: Facts dictionary
- :param data: previously collected conf
- :rtype: dictionary
- :returns: facts
- """
-
- if not data:
- request = [{
- "path": "/rest/restconf/data/openconfig-vlan:vlans?depth=5",
- "method": "GET"
- }]
- data = send_requests(self._module, requests=request)
-
- objs = []
- if data:
- for d in data[0]["openconfig-vlan:vlans"]["vlan"]:
- obj = self.render_config(self.generated_spec, d["config"])
- if obj:
- objs.append(obj)
-
- ansible_facts['ansible_network_resources'].pop('vlans', None)
- facts = {}
- if objs:
- params = utils.validate_config(self.argument_spec, {'config': objs})
- facts['vlans'] = params['config']
-
- ansible_facts['ansible_network_resources'].update(facts)
- return ansible_facts
-
- def render_config(self, spec, conf):
- """
- Render config as dictionary structure and delete keys
- from spec for null values
-
- :param spec: The facts tree, generated from the argspec
- :param conf: The configuration
- :rtype: dictionary
- :returns: The generated config
- """
- config = deepcopy(spec)
-
- config["name"] = conf["name"]
- config["state"] = "suspend" if conf["status"] == "SUSPENDED" else conf["status"].lower()
- config["vlan_id"] = conf["vlan-id"]
-
- return utils.remove_empties(config)
diff --git a/lib/ansible/module_utils/network/exos/utils/__init__.py b/lib/ansible/module_utils/network/exos/utils/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/exos/utils/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/exos/utils/utils.py b/lib/ansible/module_utils/network/exos/utils/utils.py
deleted file mode 100644
index d40f81714c..0000000000
--- a/lib/ansible/module_utils/network/exos/utils/utils.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-def search_obj_in_list(item, lst, key):
- for o in lst:
- if o[key] == item:
- return o
- return None
diff --git a/lib/ansible/module_utils/network/f5/__init__.py b/lib/ansible/module_utils/network/f5/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/f5/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/f5/iworkflow.py b/lib/ansible/module_utils/network/f5/iworkflow.py
deleted file mode 100644
index a7de5c1c41..0000000000
--- a/lib/ansible/module_utils/network/f5/iworkflow.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2017 F5 Networks Inc.
-# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-import time
-
-try:
- from f5.iworkflow import ManagementRoot
- from icontrol.exceptions import iControlUnexpectedHTTPError
- HAS_F5SDK = True
-except ImportError:
- HAS_F5SDK = False
-
-try:
- from library.module_utils.network.f5.common import F5BaseClient
- from library.module_utils.network.f5.common import F5ModuleError
-except ImportError:
- from ansible.module_utils.network.f5.common import F5BaseClient
- from ansible.module_utils.network.f5.common import F5ModuleError
-
-
-class F5Client(F5BaseClient):
- @property
- def api(self):
- exc = None
- if self._client:
- return self._client
- for x in range(0, 3):
- try:
- server = self.params['provider']['server'] or self.params['server']
- user = self.params['provider']['user'] or self.params['user']
- password = self.params['provider']['password'] or self.params['password']
- server_port = self.params['provider']['server_port'] or self.params['server_port'] or 443
- validate_certs = self.params['provider']['validate_certs'] or self.params['validate_certs']
-
- result = ManagementRoot(
- server,
- user,
- password,
- port=server_port,
- verify=validate_certs,
- token='local'
- )
- self._client = result
- return self._client
- except Exception as ex:
- exc = ex
- time.sleep(3)
- error = 'Unable to connect to {0} on port {1}.'.format(self.params['server'], self.params['server_port'])
- if exc is not None:
- error += ' The reported error was "{0}".'.format(str(exc))
- raise F5ModuleError(error)
diff --git a/lib/ansible/module_utils/network/f5/legacy.py b/lib/ansible/module_utils/network/f5/legacy.py
deleted file mode 100644
index bb2189c2bb..0000000000
--- a/lib/ansible/module_utils/network/f5/legacy.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2017 F5 Networks Inc.
-# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-try:
- import bigsuds
- bigsuds_found = True
-except ImportError:
- bigsuds_found = False
-
-
-from ansible.module_utils.basic import env_fallback
-
-
-def f5_argument_spec():
- return dict(
- server=dict(
- type='str',
- required=True,
- fallback=(env_fallback, ['F5_SERVER'])
- ),
- user=dict(
- type='str',
- required=True,
- fallback=(env_fallback, ['F5_USER'])
- ),
- password=dict(
- type='str',
- aliases=['pass', 'pwd'],
- required=True,
- no_log=True,
- fallback=(env_fallback, ['F5_PASSWORD'])
- ),
- validate_certs=dict(
- default='yes',
- type='bool',
- fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
- ),
- server_port=dict(
- type='int',
- default=443,
- fallback=(env_fallback, ['F5_SERVER_PORT'])
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- partition=dict(
- type='str',
- default='Common',
- fallback=(env_fallback, ['F5_PARTITION'])
- )
- )
-
-
-def f5_parse_arguments(module):
- if not bigsuds_found:
- module.fail_json(msg="the python bigsuds module is required")
-
- if module.params['validate_certs']:
- import ssl
- if not hasattr(ssl, 'SSLContext'):
- module.fail_json(
- msg="bigsuds does not support verifying certificates with python < 2.7.9."
- "Either update python or set validate_certs=False on the task'")
-
- return (
- module.params['server'],
- module.params['user'],
- module.params['password'],
- module.params['state'],
- module.params['partition'],
- module.params['validate_certs'],
- module.params['server_port']
- )
-
-
-def bigip_api(bigip, user, password, validate_certs, port=443):
- try:
- if bigsuds.__version__ >= '1.0.4':
- api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs, port=port)
- elif bigsuds.__version__ == '1.0.3':
- api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs)
- else:
- api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
- except TypeError:
- # bigsuds < 1.0.3, no verify param
- if validate_certs:
- # Note: verified we have SSLContext when we parsed params
- api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
- else:
- import ssl
- if hasattr(ssl, 'SSLContext'):
- # Really, you should never do this. It disables certificate
- # verification *globally*. But since older bigip libraries
- # don't give us a way to toggle verification we need to
- # disable it at the global level.
- # From https://www.python.org/dev/peps/pep-0476/#id29
- ssl._create_default_https_context = ssl._create_unverified_context
- api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
-
- return api
-
-
-# Fully Qualified name (with the partition)
-def fq_name(partition, name):
- if name is not None and not name.startswith('/'):
- return '/%s/%s' % (partition, name)
- return name
-
-
-# Fully Qualified name (with partition) for a list
-def fq_list_names(partition, list_names):
- if list_names is None:
- return None
- return map(lambda x: fq_name(partition, x), list_names)
diff --git a/lib/ansible/module_utils/network/f5/urls.py b/lib/ansible/module_utils/network/f5/urls.py
deleted file mode 100644
index c3fc857117..0000000000
--- a/lib/ansible/module_utils/network/f5/urls.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2017, F5 Networks Inc.
-# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-import re
-
-try:
- from library.module_utils.network.f5.common import F5ModuleError
-except ImportError:
- from ansible.module_utils.network.f5.common import F5ModuleError
-
-_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
-_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
-
-
-def check_header_validity(header):
- """Verifies that header value is a string which doesn't contain
- leading whitespace or return characters.
-
- NOTE: This is a slightly modified version of the original function
- taken from the requests library:
- http://docs.python-requests.org/en/master/_modules/requests/utils/
-
- :param header: string containing ':'.
- """
- try:
- name, value = header.split(':')
- except ValueError:
- raise F5ModuleError('Invalid header format: {0}'.format(header))
- if name == '':
- raise F5ModuleError('Invalid header format: {0}'.format(header))
-
- if isinstance(value, bytes):
- pat = _CLEAN_HEADER_REGEX_BYTE
- else:
- pat = _CLEAN_HEADER_REGEX_STR
- try:
- if not pat.match(value):
- raise F5ModuleError("Invalid return character or leading space in header: %s" % name)
- except TypeError:
- raise F5ModuleError("Value for header {%s: %s} must be of type str or "
- "bytes, not %s" % (name, value, type(value)))
-
-
-def build_service_uri(base_uri, partition, name):
- """Build the proper uri for a service resource.
- This follows the scheme:
- <base_uri>/~<partition>~<<name>.app>~<name>
- :param base_uri: str -- base uri of the REST endpoint
- :param partition: str -- partition for the service
- :param name: str -- name of the service
- :returns: str -- uri to access the service
- """
- name = name.replace('/', '~')
- return '%s~%s~%s.app~%s' % (base_uri, partition, name, name)
-
-
-def parseStats(entry):
- if 'description' in entry:
- return entry['description']
- elif 'value' in entry:
- return entry['value']
- elif 'entries' in entry or 'nestedStats' in entry and 'entries' in entry['nestedStats']:
- if 'entries' in entry:
- entries = entry['entries']
- else:
- entries = entry['nestedStats']['entries']
- result = None
-
- for name in entries:
- entry = entries[name]
- if 'https://localhost' in name:
- name = name.split('/')
- name = name[-1]
- if result and isinstance(result, list):
- result.append(parseStats(entry))
- elif result and isinstance(result, dict):
- result[name] = parseStats(entry)
- else:
- try:
- int(name)
- result = list()
- result.append(parseStats(entry))
- except ValueError:
- result = dict()
- result[name] = parseStats(entry)
- else:
- if '.' in name:
- names = name.split('.')
- key = names[0]
- value = names[1]
- if result is None:
- # result can be None if this branch is reached first
- #
- # For example, the mgmt/tm/net/trunk/NAME/stats API
- # returns counters.bitsIn before anything else.
- result = dict()
- result[key] = dict()
- elif key not in result:
- result[key] = dict()
- elif result[key] is None:
- result[key] = dict()
- result[key][value] = parseStats(entry)
- else:
- if result and isinstance(result, list):
- result.append(parseStats(entry))
- elif result and isinstance(result, dict):
- result[name] = parseStats(entry)
- else:
- try:
- int(name)
- result = list()
- result.append(parseStats(entry))
- except ValueError:
- result = dict()
- result[name] = parseStats(entry)
- return result
diff --git a/lib/ansible/module_utils/network/fortianalyzer/__init__.py b/lib/ansible/module_utils/network/fortianalyzer/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/fortianalyzer/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/fortianalyzer/common.py b/lib/ansible/module_utils/network/fortianalyzer/common.py
deleted file mode 100644
index 546f71aa12..0000000000
--- a/lib/ansible/module_utils/network/fortianalyzer/common.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# (c) 2017 Fortinet, Inc
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-
-# BEGIN STATIC DATA AND MESSAGES
-class FAZMethods:
- GET = "get"
- SET = "set"
- EXEC = "exec"
- EXECUTE = "exec"
- UPDATE = "update"
- ADD = "add"
- DELETE = "delete"
- REPLACE = "replace"
- CLONE = "clone"
- MOVE = "move"
-
-
-BASE_HEADERS = {
- 'Content-Type': 'application/json',
- 'Accept': 'application/json'
-}
-
-
-# FAZ RETURN CODES
-FAZ_RC = {
- "faz_return_codes": {
- 0: {
- "msg": "OK",
- "changed": True,
- "stop_on_success": True
- },
- -100000: {
- "msg": "Module returned without actually running anything. "
- "Check parameters, and please contact the authors if needed.",
- "failed": True
- },
- -2: {
- "msg": "Object already exists.",
- "skipped": True,
- "changed": False,
- "good_codes": [0, -2]
- },
- -6: {
- "msg": "Invalid Url. Sometimes this can happen because the path is mapped to a hostname or object that"
- " doesn't exist. Double check your input object parameters."
- },
- -3: {
- "msg": "Object doesn't exist.",
- "skipped": True,
- "changed": False,
- "good_codes": [0, -3]
- },
- -10131: {
- "msg": "Object dependency failed. Do all named objects in parameters exist?",
- "changed": False,
- "skipped": True
- },
- -9998: {
- "msg": "Duplicate object. Try using mode='set', if using add. STOPPING. Use 'ignore_errors=yes' in playbook"
- "to override and mark successful.",
- },
- -20042: {
- "msg": "Device Unreachable.",
- "skipped": True
- },
- -10033: {
- "msg": "Duplicate object. Try using mode='set', if using add.",
- "changed": False,
- "skipped": True
- },
- -10000: {
- "msg": "Duplicate object. Try using mode='set', if using add.",
- "changed": False,
- "skipped": True
- },
- -20010: {
- "msg": "Device already added to FortiAnalyzer. Serial number already in use.",
- "good_codes": [0, -20010],
- "changed": False,
- "stop_on_failure": False
- },
- -20002: {
- "msg": "Invalid Argument -- Does this Device exist on FortiAnalyzer?",
- "changed": False,
- "skipped": True,
- }
- }
-}
-
-DEFAULT_RESULT_OBJ = (-100000, {"msg": "Nothing Happened. Check that handle_response is being called!"})
-FAIL_SOCKET_MSG = {"msg": "Socket Path Empty! The persistent connection manager is messed up. "
- "Try again in a few moments."}
-
-
-# BEGIN ERROR EXCEPTIONS
-class FAZBaseException(Exception):
- """Wrapper to catch the unexpected"""
-
- def __init__(self, msg=None, *args, **kwargs):
- if msg is None:
- msg = "An exception occurred within the fortianalyzer.py httpapi connection plugin."
- super(FAZBaseException, self).__init__(msg, *args)
-
-# END ERROR CLASSES
-
-
-# BEGIN CLASSES
-class FAZCommon(object):
-
- @staticmethod
- def format_request(method, url, *args, **kwargs):
- """
- Formats the payload from the module, into a payload the API handler can use.
-
- :param url: Connection URL to access
- :type url: string
- :param method: The preferred API Request method (GET, ADD, POST, etc....)
- :type method: basestring
- :param kwargs: The payload dictionary from the module to be converted.
-
- :return: Properly formatted dictionary payload for API Request via Connection Plugin.
- :rtype: dict
- """
-
- params = [{"url": url}]
- if args:
- for arg in args:
- params[0].update(arg)
- if kwargs:
- keylist = list(kwargs)
- for k in keylist:
- kwargs[k.replace("__", "-")] = kwargs.pop(k)
- if method == "get" or method == "clone":
- params[0].update(kwargs)
- else:
- if kwargs.get("data", False):
- params[0]["data"] = kwargs["data"]
- else:
- params[0]["data"] = kwargs
- return params
-
- @staticmethod
- def split_comma_strings_into_lists(obj):
- """
- Splits a CSV String into a list. Also takes a dictionary, and converts any CSV strings in any key, to a list.
-
- :param obj: object in CSV format to be parsed.
- :type obj: str or dict
-
- :return: A list containing the CSV items.
- :rtype: list
- """
- return_obj = ()
- if isinstance(obj, dict):
- if len(obj) > 0:
- for k, v in obj.items():
- if isinstance(v, str):
- new_list = list()
- if "," in v:
- new_items = v.split(",")
- for item in new_items:
- new_list.append(item.strip())
- obj[k] = new_list
- return_obj = obj
- elif isinstance(obj, str):
- return_obj = obj.replace(" ", "").split(",")
-
- return return_obj
-
- @staticmethod
- def cidr_to_netmask(cidr):
- """
- Converts a CIDR Network string to full blown IP/Subnet format in decimal format.
- Decided not use IP Address module to keep includes to a minimum.
-
- :param cidr: String object in CIDR format to be processed
- :type cidr: str
-
- :return: A string object that looks like this "x.x.x.x/y.y.y.y"
- :rtype: str
- """
- if isinstance(cidr, str):
- cidr = int(cidr)
- mask = (0xffffffff >> (32 - cidr)) << (32 - cidr)
- return (str((0xff000000 & mask) >> 24) + '.'
- + str((0x00ff0000 & mask) >> 16) + '.'
- + str((0x0000ff00 & mask) >> 8) + '.'
- + str((0x000000ff & mask)))
-
- @staticmethod
- def paramgram_child_list_override(list_overrides, paramgram, module):
- """
- If a list of items was provided to a "parent" paramgram attribute, the paramgram needs to be rewritten.
- The child keys of the desired attribute need to be deleted, and then that "parent" keys' contents is replaced
- With the list of items that was provided.
-
- :param list_overrides: Contains the response from the FortiAnalyzer.
- :type list_overrides: list
- :param paramgram: Contains the paramgram passed to the modules' local modify function.
- :type paramgram: dict
- :param module: Contains the Ansible Module Object being used by the module.
- :type module: classObject
-
- :return: A new "paramgram" refactored to allow for multiple entries being added.
- :rtype: dict
- """
- if len(list_overrides) > 0:
- for list_variable in list_overrides:
- try:
- list_variable = list_variable.replace("-", "_")
- override_data = module.params[list_variable]
- if override_data:
- del paramgram[list_variable]
- paramgram[list_variable] = override_data
- except BaseException as e:
- raise FAZBaseException("Error occurred merging custom lists for the paramgram parent: " + str(e))
- return paramgram
-
- @staticmethod
- def syslog(module, msg):
- try:
- module.log(msg=msg)
- except BaseException:
- pass
-
-
-# RECURSIVE FUNCTIONS START
-def prepare_dict(obj):
- """
- Removes any keys from a dictionary that are only specific to our use in the module. FortiAnalyzer will reject
- requests with these empty/None keys in it.
-
- :param obj: Dictionary object to be processed.
- :type obj: dict
-
- :return: Processed dictionary.
- :rtype: dict
- """
-
- list_of_elems = ["mode", "adom", "host", "username", "password"]
-
- if isinstance(obj, dict):
- obj = dict((key, prepare_dict(value)) for (key, value) in obj.items() if key not in list_of_elems)
- return obj
-
-
-def scrub_dict(obj):
- """
- Removes any keys from a dictionary that are EMPTY -- this includes parent keys. FortiAnalyzer doesn't
- like empty keys in dictionaries
-
- :param obj: Dictionary object to be processed.
- :type obj: dict
-
- :return: Processed dictionary.
- :rtype: dict
- """
-
- if isinstance(obj, dict):
- return dict((k, scrub_dict(v)) for k, v in obj.items() if v and scrub_dict(v))
- else:
- return obj
diff --git a/lib/ansible/module_utils/network/fortianalyzer/fortianalyzer.py b/lib/ansible/module_utils/network/fortianalyzer/fortianalyzer.py
deleted file mode 100644
index a018c0c940..0000000000
--- a/lib/ansible/module_utils/network/fortianalyzer/fortianalyzer.py
+++ /dev/null
@@ -1,477 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# (c) 2017 Fortinet, Inc
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-
-from ansible.module_utils.network.fortianalyzer.common import FAZ_RC
-from ansible.module_utils.network.fortianalyzer.common import FAZBaseException
-from ansible.module_utils.network.fortianalyzer.common import FAZCommon
-from ansible.module_utils.network.fortianalyzer.common import scrub_dict
-from ansible.module_utils.network.fortianalyzer.common import FAZMethods
-
-
-# ACTIVE BUG WITH OUR DEBUG IMPORT CALL - BECAUSE IT'S UNDER MODULE_UTILITIES
-# WHEN module_common.recursive_finder() runs under the module loader, it looks for this namespace debug import
-# and because it's not there, it always fails, regardless of it being under a try/catch here.
-# we're going to move it to a different namespace.
-# # check for debug lib
-# try:
-# from ansible.module_utils.network.fortianalyzer.fortianalyzer_debug import debug_dump
-# HAS_FAZ_DEBUG = True
-# except:
-# HAS_FAZ_DEBUG = False
-
-
-# BEGIN HANDLER CLASSES
-class FortiAnalyzerHandler(object):
- def __init__(self, conn, module):
- self._conn = conn
- self._module = module
- self._tools = FAZCommon
- self._uses_workspace = None
- self._uses_adoms = None
- self._locked_adom_list = list()
- self._lock_info = None
-
- self.workspace_check()
- if self._uses_workspace:
- self.get_lock_info(adom=self._module.paramgram["adom"])
-
- def process_request(self, url, datagram, method):
- """
- Formats and Runs the API Request via Connection Plugin. Streamlined for use from Modules.
-
- :param url: Connection URL to access
- :type url: string
- :param datagram: The prepared payload for the API Request in dictionary format
- :type datagram: dict
- :param method: The preferred API Request method (GET, ADD, POST, etc....)
- :type method: basestring
-
- :return: Dictionary containing results of the API Request via Connection Plugin.
- :rtype: dict
- """
- try:
- adom = self._module.paramgram["adom"]
- if self.uses_workspace and adom not in self._locked_adom_list and method != FAZMethods.GET:
- self.lock_adom(adom=adom)
- except BaseException as err:
- raise FAZBaseException(err)
-
- data = self._tools.format_request(method, url, **datagram)
- response = self._conn.send_request(method, data)
-
- try:
- adom = self._module.paramgram["adom"]
- if self.uses_workspace and adom in self._locked_adom_list \
- and response[0] == 0 and method != FAZMethods.GET:
- self.commit_changes(adom=adom)
- except BaseException as err:
- raise FAZBaseException(err)
-
- # if HAS_FAZ_DEBUG:
- # try:
- # debug_dump(response, datagram, self._module.paramgram, url, method)
- # except BaseException:
- # pass
-
- return response
-
- def workspace_check(self):
- """
- Checks FortiAnalyzer for the use of Workspace mode.
- """
- url = "/cli/global/system/global"
- data = {"fields": ["workspace-mode", "adom-status"]}
- resp_obj = self.process_request(url, data, FAZMethods.GET)
- try:
- if resp_obj[1]["workspace-mode"] in ["workflow", "normal"]:
- self.uses_workspace = True
- elif resp_obj[1]["workspace-mode"] == "disabled":
- self.uses_workspace = False
- except KeyError:
- self.uses_workspace = False
- except BaseException as err:
- raise FAZBaseException(msg="Couldn't determine workspace-mode in the plugin. Error: " + str(err))
- try:
- if resp_obj[1]["adom-status"] in [1, "enable"]:
- self.uses_adoms = True
- else:
- self.uses_adoms = False
- except KeyError:
- self.uses_adoms = False
- except BaseException as err:
- raise FAZBaseException(msg="Couldn't determine adom-status in the plugin. Error: " + str(err))
-
- def run_unlock(self):
- """
- Checks for ADOM status, if locked, it will unlock
- """
- for adom_locked in self._locked_adom_list:
- self.unlock_adom(adom_locked)
-
- def lock_adom(self, adom=None):
- """
- Locks an ADOM for changes
- """
- if not adom or adom == "root":
- url = "/dvmdb/adom/root/workspace/lock"
- else:
- if adom.lower() == "global":
- url = "/dvmdb/global/workspace/lock/"
- else:
- url = "/dvmdb/adom/{adom}/workspace/lock/".format(adom=adom)
- datagram = {}
- data = self._tools.format_request(FAZMethods.EXEC, url, **datagram)
- resp_obj = self._conn.send_request(FAZMethods.EXEC, data)
- code = resp_obj[0]
- if code == 0 and resp_obj[1]["status"]["message"].lower() == "ok":
- self.add_adom_to_lock_list(adom)
- else:
- lockinfo = self.get_lock_info(adom=adom)
- self._module.fail_json(msg=("An error occurred trying to lock the adom. Error: "
- + str(resp_obj) + ", LOCK INFO: " + str(lockinfo)))
- return resp_obj
-
- def unlock_adom(self, adom=None):
- """
- Unlocks an ADOM after changes
- """
- if not adom or adom == "root":
- url = "/dvmdb/adom/root/workspace/unlock"
- else:
- if adom.lower() == "global":
- url = "/dvmdb/global/workspace/unlock/"
- else:
- url = "/dvmdb/adom/{adom}/workspace/unlock/".format(adom=adom)
- datagram = {}
- data = self._tools.format_request(FAZMethods.EXEC, url, **datagram)
- resp_obj = self._conn.send_request(FAZMethods.EXEC, data)
- code = resp_obj[0]
- if code == 0 and resp_obj[1]["status"]["message"].lower() == "ok":
- self.remove_adom_from_lock_list(adom)
- else:
- self._module.fail_json(msg=("An error occurred trying to unlock the adom. Error: " + str(resp_obj)))
- return resp_obj
-
- def get_lock_info(self, adom=None):
- """
- Gets ADOM lock info so it can be displayed with the error messages. Or if determined to be locked by ansible
- for some reason, then unlock it.
- """
- if not adom or adom == "root":
- url = "/dvmdb/adom/root/workspace/lockinfo"
- else:
- if adom.lower() == "global":
- url = "/dvmdb/global/workspace/lockinfo/"
- else:
- url = "/dvmdb/adom/{adom}/workspace/lockinfo/".format(adom=adom)
- datagram = {}
- data = self._tools.format_request(FAZMethods.GET, url, **datagram)
- resp_obj = self._conn.send_request(FAZMethods.GET, data)
- code = resp_obj[0]
- if code != 0:
- self._module.fail_json(msg=("An error occurred trying to get the ADOM Lock Info. Error: " + str(resp_obj)))
- elif code == 0:
- self._lock_info = resp_obj[1]
- return resp_obj
-
- def commit_changes(self, adom=None, aux=False):
- """
- Commits changes to an ADOM
- """
- if not adom or adom == "root":
- url = "/dvmdb/adom/root/workspace/commit"
- else:
- if aux:
- url = "/pm/config/adom/{adom}/workspace/commit".format(adom=adom)
- else:
- if adom.lower() == "global":
- url = "/dvmdb/global/workspace/commit/"
- else:
- url = "/dvmdb/adom/{adom}/workspace/commit".format(adom=adom)
- datagram = {}
- data = self._tools.format_request(FAZMethods.EXEC, url, **datagram)
- resp_obj = self._conn.send_request(FAZMethods.EXEC, data)
- code = resp_obj[0]
- if code != 0:
- self._module.fail_json(msg=("An error occurred trying to commit changes to the adom. Error: "
- + str(resp_obj)))
-
- def govern_response(self, module, results, msg=None, good_codes=None,
- stop_on_fail=None, stop_on_success=None, skipped=None,
- changed=None, unreachable=None, failed=None, success=None, changed_if_success=None,
- ansible_facts=None):
- """
- This function will attempt to apply default values to canned responses from FortiAnalyzer we know of.
- This saves time, and turns the response in the module into a "one-liner", while still giving us...
- the flexibility to directly use return_response in modules if we have too. This function saves repeated code.
-
- :param module: The Ansible Module CLASS object, used to run fail/exit json
- :type module: object
- :param msg: An overridable custom message from the module that called this.
- :type msg: string
- :param results: A dictionary object containing an API call results
- :type results: dict
- :param good_codes: A list of exit codes considered successful from FortiAnalyzer
- :type good_codes: list
- :param stop_on_fail: If true, stops playbook run when return code is NOT IN good codes (default: true)
- :type stop_on_fail: boolean
- :param stop_on_success: If true, stops playbook run when return code is IN good codes (default: false)
- :type stop_on_success: boolean
- :param changed: If True, tells Ansible that object was changed (default: false)
- :type skipped: boolean
- :param skipped: If True, tells Ansible that object was skipped (default: false)
- :type skipped: boolean
- :param unreachable: If True, tells Ansible that object was unreachable (default: false)
- :type unreachable: boolean
- :param failed: If True, tells Ansible that execution was a failure. Overrides good_codes. (default: false)
- :type unreachable: boolean
- :param success: If True, tells Ansible that execution was a success. Overrides good_codes. (default: false)
- :type unreachable: boolean
- :param changed_if_success: If True, defaults to changed if successful if you specify or not"
- :type changed_if_success: boolean
- :param ansible_facts: A prepared dictionary of ansible facts from the execution.
- :type ansible_facts: dict
- """
- if module is None and results is None:
- raise FAZBaseException("govern_response() was called without a module and/or results tuple! Fix!")
- # Get the Return code from results
- try:
- rc = results[0]
- except BaseException:
- raise FAZBaseException("govern_response() was called without the return code at results[0]")
-
- # init a few items
- rc_data = None
-
- # Get the default values for the said return code.
- try:
- rc_codes = FAZ_RC.get('faz_return_codes')
- rc_data = rc_codes.get(rc)
- except BaseException:
- pass
-
- if not rc_data:
- rc_data = {}
- # ONLY add to overrides if not none -- This is very important that the keys aren't added at this stage
- # if they are empty. And there aren't that many, so let's just do a few if then statements.
- if good_codes is not None:
- rc_data["good_codes"] = good_codes
- if stop_on_fail is not None:
- rc_data["stop_on_fail"] = stop_on_fail
- if stop_on_success is not None:
- rc_data["stop_on_success"] = stop_on_success
- if skipped is not None:
- rc_data["skipped"] = skipped
- if changed is not None:
- rc_data["changed"] = changed
- if unreachable is not None:
- rc_data["unreachable"] = unreachable
- if failed is not None:
- rc_data["failed"] = failed
- if success is not None:
- rc_data["success"] = success
- if changed_if_success is not None:
- rc_data["changed_if_success"] = changed_if_success
- if results is not None:
- rc_data["results"] = results
- if msg is not None:
- rc_data["msg"] = msg
- if ansible_facts is None:
- rc_data["ansible_facts"] = {}
- else:
- rc_data["ansible_facts"] = ansible_facts
-
- return self.return_response(module=module,
- results=results,
- msg=rc_data.get("msg", "NULL"),
- good_codes=rc_data.get("good_codes", (0,)),
- stop_on_fail=rc_data.get("stop_on_fail", True),
- stop_on_success=rc_data.get("stop_on_success", False),
- skipped=rc_data.get("skipped", False),
- changed=rc_data.get("changed", False),
- changed_if_success=rc_data.get("changed_if_success", False),
- unreachable=rc_data.get("unreachable", False),
- failed=rc_data.get("failed", False),
- success=rc_data.get("success", False),
- ansible_facts=rc_data.get("ansible_facts", dict()))
-
- def return_response(self, module, results, msg="NULL", good_codes=(0,),
- stop_on_fail=True, stop_on_success=False, skipped=False,
- changed=False, unreachable=False, failed=False, success=False, changed_if_success=True,
- ansible_facts=()):
- """
- This function controls the logout and error reporting after an method or function runs. The exit_json for
- ansible comes from logic within this function. If this function returns just the msg, it means to continue
- execution on the playbook. It is called from the ansible module, or from the self.govern_response function.
-
- :param module: The Ansible Module CLASS object, used to run fail/exit json
- :type module: object
- :param msg: An overridable custom message from the module that called this.
- :type msg: string
- :param results: A dictionary object containing an API call results
- :type results: dict
- :param good_codes: A list of exit codes considered successful from FortiAnalyzer
- :type good_codes: list
- :param stop_on_fail: If true, stops playbook run when return code is NOT IN good codes (default: true)
- :type stop_on_fail: boolean
- :param stop_on_success: If true, stops playbook run when return code is IN good codes (default: false)
- :type stop_on_success: boolean
- :param changed: If True, tells Ansible that object was changed (default: false)
- :type skipped: boolean
- :param skipped: If True, tells Ansible that object was skipped (default: false)
- :type skipped: boolean
- :param unreachable: If True, tells Ansible that object was unreachable (default: false)
- :type unreachable: boolean
- :param failed: If True, tells Ansible that execution was a failure. Overrides good_codes. (default: false)
- :type unreachable: boolean
- :param success: If True, tells Ansible that execution was a success. Overrides good_codes. (default: false)
- :type unreachable: boolean
- :param changed_if_success: If True, defaults to changed if successful if you specify or not"
- :type changed_if_success: boolean
- :param ansible_facts: A prepared dictionary of ansible facts from the execution.
- :type ansible_facts: dict
-
- :return: A string object that contains an error message
- :rtype: str
- """
-
- # VALIDATION ERROR
- if (len(results) == 0) or (failed and success) or (changed and unreachable):
- module.exit_json(msg="Handle_response was called with no results, or conflicting failed/success or "
- "changed/unreachable parameters. Fix the exit code on module. "
- "Generic Failure", failed=True)
-
- # IDENTIFY SUCCESS/FAIL IF NOT DEFINED
- if not failed and not success:
- if len(results) > 0:
- if results[0] not in good_codes:
- failed = True
- elif results[0] in good_codes:
- success = True
-
- if len(results) > 0:
- # IF NO MESSAGE WAS SUPPLIED, GET IT FROM THE RESULTS, IF THAT DOESN'T WORK, THEN WRITE AN ERROR MESSAGE
- if msg == "NULL":
- try:
- msg = results[1]['status']['message']
- except BaseException:
- msg = "No status message returned at results[1][status][message], " \
- "and none supplied to msg parameter for handle_response."
-
- if failed:
- # BECAUSE SKIPPED/FAILED WILL OFTEN OCCUR ON CODES THAT DON'T GET INCLUDED, THEY ARE CONSIDERED FAILURES
- # HOWEVER, THEY ARE MUTUALLY EXCLUSIVE, SO IF IT IS MARKED SKIPPED OR UNREACHABLE BY THE MODULE LOGIC
- # THEN REMOVE THE FAILED FLAG SO IT DOESN'T OVERRIDE THE DESIRED STATUS OF SKIPPED OR UNREACHABLE.
- if failed and skipped:
- failed = False
- if failed and unreachable:
- failed = False
- if stop_on_fail:
- if self._uses_workspace:
- try:
- self.run_unlock()
- except BaseException as err:
- raise FAZBaseException(msg=("Couldn't unlock ADOM! Error: " + str(err)))
- module.exit_json(msg=msg, failed=failed, changed=changed, unreachable=unreachable, skipped=skipped,
- results=results[1], ansible_facts=ansible_facts, rc=results[0],
- invocation={"module_args": ansible_facts["ansible_params"]})
- elif success:
- if changed_if_success:
- changed = True
- success = False
- if stop_on_success:
- if self._uses_workspace:
- try:
- self.run_unlock()
- except BaseException as err:
- raise FAZBaseException(msg=("Couldn't unlock ADOM! Error: " + str(err)))
- module.exit_json(msg=msg, success=success, changed=changed, unreachable=unreachable,
- skipped=skipped, results=results[1], ansible_facts=ansible_facts, rc=results[0],
- invocation={"module_args": ansible_facts["ansible_params"]})
- return msg
-
- @staticmethod
- def construct_ansible_facts(response, ansible_params, paramgram, *args, **kwargs):
- """
- Constructs a dictionary to return to ansible facts, containing various information about the execution.
-
- :param response: Contains the response from the FortiAnalyzer.
- :type response: dict
- :param ansible_params: Contains the parameters Ansible was called with.
- :type ansible_params: dict
- :param paramgram: Contains the paramgram passed to the modules' local modify function.
- :type paramgram: dict
- :param args: Free-form arguments that could be added.
- :param kwargs: Free-form keyword arguments that could be added.
-
- :return: A dictionary containing lots of information to append to Ansible Facts.
- :rtype: dict
- """
-
- facts = {
- "response": response,
- "ansible_params": scrub_dict(ansible_params),
- "paramgram": scrub_dict(paramgram),
- }
-
- if args:
- facts["custom_args"] = args
- if kwargs:
- facts.update(kwargs)
-
- return facts
-
- @property
- def uses_workspace(self):
- return self._uses_workspace
-
- @uses_workspace.setter
- def uses_workspace(self, val):
- self._uses_workspace = val
-
- @property
- def uses_adoms(self):
- return self._uses_adoms
-
- @uses_adoms.setter
- def uses_adoms(self, val):
- self._uses_adoms = val
-
- def add_adom_to_lock_list(self, adom):
- if adom not in self._locked_adom_list:
- self._locked_adom_list.append(adom)
-
- def remove_adom_from_lock_list(self, adom):
- if adom in self._locked_adom_list:
- self._locked_adom_list.remove(adom)
diff --git a/lib/ansible/module_utils/network/ftd/__init__.py b/lib/ansible/module_utils/network/ftd/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/ftd/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/ftd/common.py b/lib/ansible/module_utils/network/ftd/common.py
deleted file mode 100644
index de3f459d5b..0000000000
--- a/lib/ansible/module_utils/network/ftd/common.py
+++ /dev/null
@@ -1,238 +0,0 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-import re
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.common.collections import is_string
-from ansible.module_utils.six import iteritems
-
-INVALID_IDENTIFIER_SYMBOLS = r'[^a-zA-Z0-9_]'
-
-IDENTITY_PROPERTIES = ['id', 'version', 'ruleId']
-NON_COMPARABLE_PROPERTIES = IDENTITY_PROPERTIES + ['isSystemDefined', 'links', 'token', 'rulePosition']
-
-
-class HTTPMethod:
- GET = 'get'
- POST = 'post'
- PUT = 'put'
- DELETE = 'delete'
-
-
-class ResponseParams:
- SUCCESS = 'success'
- STATUS_CODE = 'status_code'
- RESPONSE = 'response'
-
-
-class FtdConfigurationError(Exception):
- def __init__(self, msg, obj=None):
- super(FtdConfigurationError, self).__init__(msg)
- self.msg = msg
- self.obj = obj
-
-
-class FtdServerError(Exception):
- def __init__(self, response, code):
- super(FtdServerError, self).__init__(response)
- self.response = response
- self.code = code
-
-
-class FtdUnexpectedResponse(Exception):
- """The exception to be raised in case of unexpected responses from 3d parties."""
- pass
-
-
-def construct_ansible_facts(response, params):
- facts = dict()
- if response:
- response_body = response['items'] if 'items' in response else response
- if params.get('register_as'):
- facts[params['register_as']] = response_body
- elif type(response_body) is dict and response_body.get('name') and response_body.get('type'):
- object_name = re.sub(INVALID_IDENTIFIER_SYMBOLS, '_', response_body['name'].lower())
- fact_name = '%s_%s' % (response_body['type'], object_name)
- facts[fact_name] = response_body
- return facts
-
-
-def copy_identity_properties(source_obj, dest_obj):
- for property_name in IDENTITY_PROPERTIES:
- if property_name in source_obj:
- dest_obj[property_name] = source_obj[property_name]
- return dest_obj
-
-
-def is_object_ref(d):
- """
- Checks if a dictionary is a reference object. The dictionary is considered to be a
- reference object when it contains non-empty 'id' and 'type' fields.
-
- :type d: dict
- :return: True if passed dictionary is a reference object, otherwise False
- """
- has_id = 'id' in d.keys() and d['id']
- has_type = 'type' in d.keys() and d['type']
- return has_id and has_type
-
-
-def equal_object_refs(d1, d2):
- """
- Checks whether two references point to the same object.
-
- :type d1: dict
- :type d2: dict
- :return: True if passed references point to the same object, otherwise False
- """
- have_equal_ids = d1['id'] == d2['id']
- have_equal_types = d1['type'] == d2['type']
- return have_equal_ids and have_equal_types
-
-
-def equal_lists(l1, l2):
- """
- Checks whether two lists are equal. The order of elements in the arrays is important.
-
- :type l1: list
- :type l2: list
- :return: True if passed lists, their elements and order of elements are equal. Otherwise, returns False.
- """
- if len(l1) != len(l2):
- return False
-
- for v1, v2 in zip(l1, l2):
- if not equal_values(v1, v2):
- return False
-
- return True
-
-
-def equal_dicts(d1, d2, compare_by_reference=True):
- """
- Checks whether two dictionaries are equal. If `compare_by_reference` is set to True, dictionaries referencing
- objects are compared using `equal_object_refs` method. Otherwise, every key and value is checked.
-
- :type d1: dict
- :type d2: dict
- :param compare_by_reference: if True, dictionaries referencing objects are compared using `equal_object_refs` method
- :return: True if passed dicts are equal. Otherwise, returns False.
- """
- if compare_by_reference and is_object_ref(d1) and is_object_ref(d2):
- return equal_object_refs(d1, d2)
-
- if len(d1) != len(d2):
- return False
-
- for key, v1 in d1.items():
- if key not in d2:
- return False
-
- v2 = d2[key]
- if not equal_values(v1, v2):
- return False
-
- return True
-
-
-def equal_values(v1, v2):
- """
- Checks whether types and content of two values are the same. In case of complex objects, the method might be
- called recursively.
-
- :param v1: first value
- :param v2: second value
- :return: True if types and content of passed values are equal. Otherwise, returns False.
- :rtype: bool
- """
-
- # string-like values might have same text but different types, so checking them separately
- if is_string(v1) and is_string(v2):
- return to_text(v1) == to_text(v2)
-
- if type(v1) != type(v2):
- return False
- value_type = type(v1)
-
- if value_type == list:
- return equal_lists(v1, v2)
- elif value_type == dict:
- return equal_dicts(v1, v2)
- else:
- return v1 == v2
-
-
-def equal_objects(d1, d2):
- """
- Checks whether two objects are equal. Ignores special object properties (e.g. 'id', 'version') and
- properties with None and empty values. In case properties contains a reference to the other object,
- only object identities (ids and types) are checked. Also, if an array field contains multiple references
- to the same object, duplicates are ignored when comparing objects.
-
- :type d1: dict
- :type d2: dict
- :return: True if passed objects and their properties are equal. Otherwise, returns False.
- """
-
- def prepare_data_for_comparison(d):
- d = dict((k, d[k]) for k in d.keys() if k not in NON_COMPARABLE_PROPERTIES and d[k])
- d = delete_ref_duplicates(d)
- return d
-
- d1 = prepare_data_for_comparison(d1)
- d2 = prepare_data_for_comparison(d2)
- return equal_dicts(d1, d2, compare_by_reference=False)
-
-
-def delete_ref_duplicates(d):
- """
- Removes reference duplicates from array fields: if an array contains multiple items and some of
- them refer to the same object, only unique references are preserved (duplicates are removed).
-
- :param d: dict with data
- :type d: dict
- :return: dict without reference duplicates
- """
-
- def delete_ref_duplicates_from_list(refs):
- if all(type(i) == dict and is_object_ref(i) for i in refs):
- unique_refs = set()
- unique_list = list()
- for i in refs:
- key = (i['id'], i['type'])
- if key not in unique_refs:
- unique_refs.add(key)
- unique_list.append(i)
-
- return list(unique_list)
-
- else:
- return refs
-
- if not d:
- return d
-
- modified_d = {}
- for k, v in iteritems(d):
- if type(v) == list:
- modified_d[k] = delete_ref_duplicates_from_list(v)
- elif type(v) == dict:
- modified_d[k] = delete_ref_duplicates(v)
- else:
- modified_d[k] = v
- return modified_d
diff --git a/lib/ansible/module_utils/network/ftd/configuration.py b/lib/ansible/module_utils/network/ftd/configuration.py
deleted file mode 100644
index 975bef379c..0000000000
--- a/lib/ansible/module_utils/network/ftd/configuration.py
+++ /dev/null
@@ -1,565 +0,0 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-import copy
-from functools import partial
-
-from ansible.module_utils.network.ftd.common import HTTPMethod, equal_objects, FtdConfigurationError, \
- FtdServerError, ResponseParams, copy_identity_properties, FtdUnexpectedResponse
-from ansible.module_utils.network.ftd.fdm_swagger_client import OperationField, ValidationError
-from ansible.module_utils.six import iteritems
-
-DEFAULT_PAGE_SIZE = 10
-DEFAULT_OFFSET = 0
-
-UNPROCESSABLE_ENTITY_STATUS = 422
-INVALID_UUID_ERROR_MESSAGE = "Validation failed due to an invalid UUID"
-DUPLICATE_NAME_ERROR_MESSAGE = "Validation failed due to a duplicate name"
-
-MULTIPLE_DUPLICATES_FOUND_ERROR = (
- "Multiple objects matching specified filters are found. "
- "Please, define filters more precisely to match one object exactly."
-)
-DUPLICATE_ERROR = (
- "Cannot add a new object. "
- "An object with the same name but different parameters already exists."
-)
-ADD_OPERATION_NOT_SUPPORTED_ERROR = (
- "Cannot add a new object while executing an upsert request. "
- "Creation of objects with this type is not supported."
-)
-
-PATH_PARAMS_FOR_DEFAULT_OBJ = {'objId': 'default'}
-
-
-class OperationNamePrefix:
- ADD = 'add'
- EDIT = 'edit'
- GET = 'get'
- DELETE = 'delete'
- UPSERT = 'upsert'
-
-
-class QueryParams:
- FILTER = 'filter'
-
-
-class ParamName:
- QUERY_PARAMS = 'query_params'
- PATH_PARAMS = 'path_params'
- DATA = 'data'
- FILTERS = 'filters'
-
-
-class CheckModeException(Exception):
- pass
-
-
-class FtdInvalidOperationNameError(Exception):
- def __init__(self, operation_name):
- super(FtdInvalidOperationNameError, self).__init__(operation_name)
- self.operation_name = operation_name
-
-
-class OperationChecker(object):
-
- @classmethod
- def is_add_operation(cls, operation_name, operation_spec):
- """
- Check if operation defined with 'operation_name' is add object operation according to 'operation_spec'.
-
- :param operation_name: name of the operation being called by the user
- :type operation_name: str
- :param operation_spec: specification of the operation being called by the user
- :type operation_spec: dict
- :return: True if the called operation is add object operation, otherwise False
- :rtype: bool
- """
- # Some endpoints have non-CRUD operations, so checking operation name is required in addition to the HTTP method
- return operation_name.startswith(OperationNamePrefix.ADD) and is_post_request(operation_spec)
-
- @classmethod
- def is_edit_operation(cls, operation_name, operation_spec):
- """
- Check if operation defined with 'operation_name' is edit object operation according to 'operation_spec'.
-
- :param operation_name: name of the operation being called by the user
- :type operation_name: str
- :param operation_spec: specification of the operation being called by the user
- :type operation_spec: dict
- :return: True if the called operation is edit object operation, otherwise False
- :rtype: bool
- """
- # Some endpoints have non-CRUD operations, so checking operation name is required in addition to the HTTP method
- return operation_name.startswith(OperationNamePrefix.EDIT) and is_put_request(operation_spec)
-
- @classmethod
- def is_delete_operation(cls, operation_name, operation_spec):
- """
- Check if operation defined with 'operation_name' is delete object operation according to 'operation_spec'.
-
- :param operation_name: name of the operation being called by the user
- :type operation_name: str
- :param operation_spec: specification of the operation being called by the user
- :type operation_spec: dict
- :return: True if the called operation is delete object operation, otherwise False
- :rtype: bool
- """
- # Some endpoints have non-CRUD operations, so checking operation name is required in addition to the HTTP method
- return operation_name.startswith(OperationNamePrefix.DELETE) \
- and operation_spec[OperationField.METHOD] == HTTPMethod.DELETE
-
- @classmethod
- def is_get_list_operation(cls, operation_name, operation_spec):
- """
- Check if operation defined with 'operation_name' is get list of objects operation according to 'operation_spec'.
-
- :param operation_name: name of the operation being called by the user
- :type operation_name: str
- :param operation_spec: specification of the operation being called by the user
- :type operation_spec: dict
- :return: True if the called operation is get a list of objects operation, otherwise False
- :rtype: bool
- """
- return operation_spec[OperationField.METHOD] == HTTPMethod.GET \
- and operation_spec[OperationField.RETURN_MULTIPLE_ITEMS]
-
- @classmethod
- def is_get_operation(cls, operation_name, operation_spec):
- """
- Check if operation defined with 'operation_name' is get objects operation according to 'operation_spec'.
-
- :param operation_name: name of the operation being called by the user
- :type operation_name: str
- :param operation_spec: specification of the operation being called by the user
- :type operation_spec: dict
- :return: True if the called operation is get object operation, otherwise False
- :rtype: bool
- """
- return operation_spec[OperationField.METHOD] == HTTPMethod.GET \
- and not operation_spec[OperationField.RETURN_MULTIPLE_ITEMS]
-
- @classmethod
- def is_upsert_operation(cls, operation_name):
- """
- Check if operation defined with 'operation_name' is upsert objects operation according to 'operation_name'.
-
- :param operation_name: name of the operation being called by the user
- :type operation_name: str
- :return: True if the called operation is upsert object operation, otherwise False
- :rtype: bool
- """
- return operation_name.startswith(OperationNamePrefix.UPSERT)
-
- @classmethod
- def is_find_by_filter_operation(cls, operation_name, params, operation_spec):
- """
- Checks whether the called operation is 'find by filter'. This operation fetches all objects and finds
- the matching ones by the given filter. As filtering is done on the client side, this operation should be used
- only when selected filters are not implemented on the server side.
-
- :param operation_name: name of the operation being called by the user
- :type operation_name: str
- :param operation_spec: specification of the operation being called by the user
- :type operation_spec: dict
- :param params: params - params should contain 'filters'
- :return: True if the called operation is find by filter, otherwise False
- :rtype: bool
- """
- is_get_list = cls.is_get_list_operation(operation_name, operation_spec)
- return is_get_list and ParamName.FILTERS in params and params[ParamName.FILTERS]
-
- @classmethod
- def is_upsert_operation_supported(cls, operations):
- """
- Checks if all operations required for upsert object operation are defined in 'operations'.
-
- :param operations: specification of the operations supported by model
- :type operations: dict
- :return: True if all criteria required to provide requested called operation are satisfied, otherwise False
- :rtype: bool
- """
- has_edit_op = next((name for name, spec in iteritems(operations) if cls.is_edit_operation(name, spec)), None)
- has_get_list_op = next((name for name, spec in iteritems(operations)
- if cls.is_get_list_operation(name, spec)), None)
- return has_edit_op and has_get_list_op
-
-
-class BaseConfigurationResource(object):
-
- def __init__(self, conn, check_mode=False):
- self._conn = conn
- self.config_changed = False
- self._operation_spec_cache = {}
- self._models_operations_specs_cache = {}
- self._check_mode = check_mode
- self._operation_checker = OperationChecker
- self._system_info = None
-
- def execute_operation(self, op_name, params):
- """
- Allow user request execution of simple operations(natively supported by API provider) as well as complex
- operations(operations that are implemented as a set of simple operations).
-
- :param op_name: name of the operation being called by the user
- :type op_name: str
- :param params: definition of the params that operation should be executed with
- :type params: dict
- :return: Result of the operation being executed
- :rtype: dict
- """
- if self._operation_checker.is_upsert_operation(op_name):
- return self.upsert_object(op_name, params)
- else:
- return self.crud_operation(op_name, params)
-
- def crud_operation(self, op_name, params):
- """
- Allow user request execution of simple operations(natively supported by API provider) only.
-
- :param op_name: name of the operation being called by the user
- :type op_name: str
- :param params: definition of the params that operation should be executed with
- :type params: dict
- :return: Result of the operation being executed
- :rtype: dict
- """
- op_spec = self.get_operation_spec(op_name)
- if op_spec is None:
- raise FtdInvalidOperationNameError(op_name)
-
- if self._operation_checker.is_add_operation(op_name, op_spec):
- resp = self.add_object(op_name, params)
- elif self._operation_checker.is_edit_operation(op_name, op_spec):
- resp = self.edit_object(op_name, params)
- elif self._operation_checker.is_delete_operation(op_name, op_spec):
- resp = self.delete_object(op_name, params)
- elif self._operation_checker.is_find_by_filter_operation(op_name, params, op_spec):
- resp = list(self.get_objects_by_filter(op_name, params))
- else:
- resp = self.send_general_request(op_name, params)
- return resp
-
- def get_operation_spec(self, operation_name):
- if operation_name not in self._operation_spec_cache:
- self._operation_spec_cache[operation_name] = self._conn.get_operation_spec(operation_name)
- return self._operation_spec_cache[operation_name]
-
- def get_operation_specs_by_model_name(self, model_name):
- if model_name not in self._models_operations_specs_cache:
- model_op_specs = self._conn.get_operation_specs_by_model_name(model_name)
- self._models_operations_specs_cache[model_name] = model_op_specs
- for op_name, op_spec in iteritems(model_op_specs):
- self._operation_spec_cache.setdefault(op_name, op_spec)
- return self._models_operations_specs_cache[model_name]
-
- def get_objects_by_filter(self, operation_name, params):
-
- def match_filters(filter_params, obj):
- for k, v in iteritems(filter_params):
- if k not in obj or obj[k] != v:
- return False
- return True
-
- dummy, query_params, path_params = _get_user_params(params)
- # copy required params to avoid mutation of passed `params` dict
- url_params = {ParamName.QUERY_PARAMS: dict(query_params), ParamName.PATH_PARAMS: dict(path_params)}
-
- filters = params.get(ParamName.FILTERS) or {}
- if QueryParams.FILTER not in url_params[ParamName.QUERY_PARAMS] and 'name' in filters:
- # most endpoints only support filtering by name, so remaining `filters` are applied on returned objects
- url_params[ParamName.QUERY_PARAMS][QueryParams.FILTER] = self._stringify_name_filter(filters)
-
- item_generator = iterate_over_pageable_resource(
- partial(self.send_general_request, operation_name=operation_name), url_params
- )
- return (i for i in item_generator if match_filters(filters, i))
-
- def _stringify_name_filter(self, filters):
- build_version = self.get_build_version()
- if build_version >= '6.4.0':
- return "fts~%s" % filters['name']
- return "name:%s" % filters['name']
-
- def _fetch_system_info(self):
- if not self._system_info:
- params = {ParamName.PATH_PARAMS: PATH_PARAMS_FOR_DEFAULT_OBJ}
- self._system_info = self.send_general_request('getSystemInformation', params)
-
- return self._system_info
-
- def get_build_version(self):
- system_info = self._fetch_system_info()
- return system_info['databaseInfo']['buildVersion']
-
- def add_object(self, operation_name, params):
- def is_duplicate_name_error(err):
- return err.code == UNPROCESSABLE_ENTITY_STATUS and DUPLICATE_NAME_ERROR_MESSAGE in str(err)
-
- try:
- return self.send_general_request(operation_name, params)
- except FtdServerError as e:
- if is_duplicate_name_error(e):
- return self._check_equality_with_existing_object(operation_name, params, e)
- else:
- raise e
-
- def _check_equality_with_existing_object(self, operation_name, params, e):
- """
- Looks for an existing object that caused "object duplicate" error and
- checks whether it corresponds to the one specified in `params`.
-
- In case a single object is found and it is equal to one we are trying
- to create, the existing object is returned.
-
- When the existing object is not equal to the object being created or
- several objects are returned, an exception is raised.
- """
- model_name = self.get_operation_spec(operation_name)[OperationField.MODEL_NAME]
- existing_obj = self._find_object_matching_params(model_name, params)
-
- if existing_obj is not None:
- if equal_objects(existing_obj, params[ParamName.DATA]):
- return existing_obj
- else:
- raise FtdConfigurationError(DUPLICATE_ERROR, existing_obj)
-
- raise e
-
- def _find_object_matching_params(self, model_name, params):
- get_list_operation = self._find_get_list_operation(model_name)
- if not get_list_operation:
- return None
-
- data = params[ParamName.DATA]
- if not params.get(ParamName.FILTERS):
- params[ParamName.FILTERS] = {'name': data['name']}
-
- obj = None
- filtered_objs = self.get_objects_by_filter(get_list_operation, params)
-
- for i, obj in enumerate(filtered_objs):
- if i > 0:
- raise FtdConfigurationError(MULTIPLE_DUPLICATES_FOUND_ERROR)
- obj = obj
-
- return obj
-
- def _find_get_list_operation(self, model_name):
- operations = self.get_operation_specs_by_model_name(model_name) or {}
- return next((
- op for op, op_spec in operations.items()
- if self._operation_checker.is_get_list_operation(op, op_spec)), None)
-
- def _find_get_operation(self, model_name):
- operations = self.get_operation_specs_by_model_name(model_name) or {}
- return next((
- op for op, op_spec in operations.items()
- if self._operation_checker.is_get_operation(op, op_spec)), None)
-
- def delete_object(self, operation_name, params):
- def is_invalid_uuid_error(err):
- return err.code == UNPROCESSABLE_ENTITY_STATUS and INVALID_UUID_ERROR_MESSAGE in str(err)
-
- try:
- return self.send_general_request(operation_name, params)
- except FtdServerError as e:
- if is_invalid_uuid_error(e):
- return {'status': 'Referenced object does not exist'}
- else:
- raise e
-
- def edit_object(self, operation_name, params):
- data, dummy, path_params = _get_user_params(params)
-
- model_name = self.get_operation_spec(operation_name)[OperationField.MODEL_NAME]
- get_operation = self._find_get_operation(model_name)
-
- if get_operation:
- existing_object = self.send_general_request(get_operation, {ParamName.PATH_PARAMS: path_params})
- if not existing_object:
- raise FtdConfigurationError('Referenced object does not exist')
- elif equal_objects(existing_object, data):
- return existing_object
-
- return self.send_general_request(operation_name, params)
-
- def send_general_request(self, operation_name, params):
- def stop_if_check_mode():
- if self._check_mode:
- raise CheckModeException()
-
- self.validate_params(operation_name, params)
- stop_if_check_mode()
-
- data, query_params, path_params = _get_user_params(params)
- op_spec = self.get_operation_spec(operation_name)
- url, method = op_spec[OperationField.URL], op_spec[OperationField.METHOD]
-
- return self._send_request(url, method, data, path_params, query_params)
-
- def _send_request(self, url_path, http_method, body_params=None, path_params=None, query_params=None):
- def raise_for_failure(resp):
- if not resp[ResponseParams.SUCCESS]:
- raise FtdServerError(resp[ResponseParams.RESPONSE], resp[ResponseParams.STATUS_CODE])
-
- response = self._conn.send_request(url_path=url_path, http_method=http_method, body_params=body_params,
- path_params=path_params, query_params=query_params)
- raise_for_failure(response)
- if http_method != HTTPMethod.GET:
- self.config_changed = True
- return response[ResponseParams.RESPONSE]
-
- def validate_params(self, operation_name, params):
- report = {}
- op_spec = self.get_operation_spec(operation_name)
- data, query_params, path_params = _get_user_params(params)
-
- def validate(validation_method, field_name, user_params):
- key = 'Invalid %s provided' % field_name
- try:
- is_valid, validation_report = validation_method(operation_name, user_params)
- if not is_valid:
- report[key] = validation_report
- except Exception as e:
- report[key] = str(e)
- return report
-
- validate(self._conn.validate_query_params, ParamName.QUERY_PARAMS, query_params)
- validate(self._conn.validate_path_params, ParamName.PATH_PARAMS, path_params)
- if is_post_request(op_spec) or is_put_request(op_spec):
- validate(self._conn.validate_data, ParamName.DATA, data)
-
- if report:
- raise ValidationError(report)
-
- @staticmethod
- def _get_operation_name(checker, operations):
- return next((op_name for op_name, op_spec in iteritems(operations) if checker(op_name, op_spec)), None)
-
- def _add_upserted_object(self, model_operations, params):
- add_op_name = self._get_operation_name(self._operation_checker.is_add_operation, model_operations)
- if not add_op_name:
- raise FtdConfigurationError(ADD_OPERATION_NOT_SUPPORTED_ERROR)
- return self.add_object(add_op_name, params)
-
- def _edit_upserted_object(self, model_operations, existing_object, params):
- edit_op_name = self._get_operation_name(self._operation_checker.is_edit_operation, model_operations)
- _set_default(params, 'path_params', {})
- _set_default(params, 'data', {})
-
- params['path_params']['objId'] = existing_object['id']
- copy_identity_properties(existing_object, params['data'])
- return self.edit_object(edit_op_name, params)
-
- def upsert_object(self, op_name, params):
- """
- Updates an object if it already exists, or tries to create a new one if there is no
- such object. If multiple objects match filter criteria, or add operation is not supported,
- the exception is raised.
-
- :param op_name: upsert operation name
- :type op_name: str
- :param params: params that upsert operation should be executed with
- :type params: dict
- :return: upserted object representation
- :rtype: dict
- """
-
- def extract_and_validate_model():
- model = op_name[len(OperationNamePrefix.UPSERT):]
- if not self._conn.get_model_spec(model):
- raise FtdInvalidOperationNameError(op_name)
- return model
-
- model_name = extract_and_validate_model()
- model_operations = self.get_operation_specs_by_model_name(model_name)
-
- if not self._operation_checker.is_upsert_operation_supported(model_operations):
- raise FtdInvalidOperationNameError(op_name)
-
- existing_obj = self._find_object_matching_params(model_name, params)
- if existing_obj:
- equal_to_existing_obj = equal_objects(existing_obj, params[ParamName.DATA])
- return existing_obj if equal_to_existing_obj \
- else self._edit_upserted_object(model_operations, existing_obj, params)
- else:
- return self._add_upserted_object(model_operations, params)
-
-
-def _set_default(params, field_name, value):
- if field_name not in params or params[field_name] is None:
- params[field_name] = value
-
-
-def is_post_request(operation_spec):
- return operation_spec[OperationField.METHOD] == HTTPMethod.POST
-
-
-def is_put_request(operation_spec):
- return operation_spec[OperationField.METHOD] == HTTPMethod.PUT
-
-
-def _get_user_params(params):
- return params.get(ParamName.DATA) or {}, params.get(ParamName.QUERY_PARAMS) or {}, params.get(
- ParamName.PATH_PARAMS) or {}
-
-
-def iterate_over_pageable_resource(resource_func, params):
- """
- A generator function that iterates over a resource that supports pagination and lazily returns present items
- one by one.
-
- :param resource_func: function that receives `params` argument and returns a page of objects
- :type resource_func: callable
- :param params: initial dictionary of parameters that will be passed to the resource_func.
- Should contain `query_params` inside.
- :type params: dict
- :return: an iterator containing returned items
- :rtype: iterator of dict
- """
- # creating a copy not to mutate passed dict
- params = copy.deepcopy(params)
- params[ParamName.QUERY_PARAMS].setdefault('limit', DEFAULT_PAGE_SIZE)
- params[ParamName.QUERY_PARAMS].setdefault('offset', DEFAULT_OFFSET)
- limit = int(params[ParamName.QUERY_PARAMS]['limit'])
-
- def received_less_items_than_requested(items_in_response, items_expected):
- if items_in_response == items_expected:
- return False
- elif items_in_response < items_expected:
- return True
-
- raise FtdUnexpectedResponse(
- "Get List of Objects Response from the server contains more objects than requested. "
- "There are {0} item(s) in the response while {1} was(ere) requested".format(
- items_in_response, items_expected)
- )
-
- while True:
- result = resource_func(params=params)
-
- for item in result['items']:
- yield item
-
- if received_less_items_than_requested(len(result['items']), limit):
- break
-
- # creating a copy not to mutate existing dict
- params = copy.deepcopy(params)
- query_params = params[ParamName.QUERY_PARAMS]
- query_params['offset'] = int(query_params['offset']) + limit
diff --git a/lib/ansible/module_utils/network/ftd/device.py b/lib/ansible/module_utils/network/ftd/device.py
deleted file mode 100644
index 47b0eb3a43..0000000000
--- a/lib/ansible/module_utils/network/ftd/device.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-
-try:
- from kick.device2.ftd5500x.actions.ftd5500x import Ftd5500x
- from kick.device2.kp.actions import Kp
-
- HAS_KICK = True
-except ImportError:
- HAS_KICK = False
-
-
-def assert_kick_is_installed(module):
- if not HAS_KICK:
- module.fail_json(msg='Firepower-kickstart library is required to run this module. '
- 'Please, install the library with `pip install firepower-kickstart` '
- 'command and run the playbook again.')
-
-
-class FtdModel:
- FTD_ASA5506_X = 'Cisco ASA5506-X Threat Defense'
- FTD_ASA5508_X = 'Cisco ASA5508-X Threat Defense'
- FTD_ASA5516_X = 'Cisco ASA5516-X Threat Defense'
-
- FTD_2110 = 'Cisco Firepower 2110 Threat Defense'
- FTD_2120 = 'Cisco Firepower 2120 Threat Defense'
- FTD_2130 = 'Cisco Firepower 2130 Threat Defense'
- FTD_2140 = 'Cisco Firepower 2140 Threat Defense'
-
- @classmethod
- def supported_models(cls):
- return [getattr(cls, item) for item in dir(cls) if item.startswith('FTD_')]
-
-
-class FtdPlatformFactory(object):
-
- @staticmethod
- def create(model, module_params):
- for cls in AbstractFtdPlatform.__subclasses__():
- if cls.supports_ftd_model(model):
- return cls(module_params)
- raise ValueError("FTD model '%s' is not supported by this module." % model)
-
-
-class AbstractFtdPlatform(object):
- PLATFORM_MODELS = []
-
- def install_ftd_image(self, params):
- raise NotImplementedError('The method should be overridden in subclass')
-
- @classmethod
- def supports_ftd_model(cls, model):
- return model in cls.PLATFORM_MODELS
-
- @staticmethod
- def parse_rommon_file_location(rommon_file_location):
- rommon_url = urlparse(rommon_file_location)
- if rommon_url.scheme != 'tftp':
- raise ValueError('The ROMMON image must be downloaded from TFTP server, other protocols are not supported.')
- return rommon_url.netloc, rommon_url.path
-
-
-class Ftd2100Platform(AbstractFtdPlatform):
- PLATFORM_MODELS = [FtdModel.FTD_2110, FtdModel.FTD_2120, FtdModel.FTD_2130, FtdModel.FTD_2140]
-
- def __init__(self, params):
- self._ftd = Kp(hostname=params["device_hostname"],
- login_username=params["device_username"],
- login_password=params["device_password"],
- sudo_password=params.get("device_sudo_password") or params["device_password"])
-
- def install_ftd_image(self, params):
- line = self._ftd.ssh_console(ip=params["console_ip"],
- port=params["console_port"],
- username=params["console_username"],
- password=params["console_password"])
-
- try:
- rommon_server, rommon_path = self.parse_rommon_file_location(params["rommon_file_location"])
- line.baseline_fp2k_ftd(tftp_server=rommon_server,
- rommon_file=rommon_path,
- uut_hostname=params["device_hostname"],
- uut_username=params["device_username"],
- uut_password=params.get("device_new_password") or params["device_password"],
- uut_ip=params["device_ip"],
- uut_netmask=params["device_netmask"],
- uut_gateway=params["device_gateway"],
- dns_servers=params["dns_server"],
- search_domains=params["search_domains"],
- fxos_url=params["image_file_location"],
- ftd_version=params["image_version"])
- finally:
- line.disconnect()
-
-
-class FtdAsa5500xPlatform(AbstractFtdPlatform):
- PLATFORM_MODELS = [FtdModel.FTD_ASA5506_X, FtdModel.FTD_ASA5508_X, FtdModel.FTD_ASA5516_X]
-
- def __init__(self, params):
- self._ftd = Ftd5500x(hostname=params["device_hostname"],
- login_password=params["device_password"],
- sudo_password=params.get("device_sudo_password") or params["device_password"])
-
- def install_ftd_image(self, params):
- line = self._ftd.ssh_console(ip=params["console_ip"],
- port=params["console_port"],
- username=params["console_username"],
- password=params["console_password"])
- try:
- rommon_server, rommon_path = self.parse_rommon_file_location(params["rommon_file_location"])
- line.rommon_to_new_image(rommon_tftp_server=rommon_server,
- rommon_image=rommon_path,
- pkg_image=params["image_file_location"],
- uut_ip=params["device_ip"],
- uut_netmask=params["device_netmask"],
- uut_gateway=params["device_gateway"],
- dns_server=params["dns_server"],
- search_domains=params["search_domains"],
- hostname=params["device_hostname"])
- finally:
- line.disconnect()
diff --git a/lib/ansible/module_utils/network/ftd/fdm_swagger_client.py b/lib/ansible/module_utils/network/ftd/fdm_swagger_client.py
deleted file mode 100644
index 9d0cd54461..0000000000
--- a/lib/ansible/module_utils/network/ftd/fdm_swagger_client.py
+++ /dev/null
@@ -1,638 +0,0 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-from ansible.module_utils.network.ftd.common import HTTPMethod
-from ansible.module_utils.six import integer_types, string_types, iteritems
-
-FILE_MODEL_NAME = '_File'
-SUCCESS_RESPONSE_CODE = '200'
-DELETE_PREFIX = 'delete'
-
-
-class OperationField:
- URL = 'url'
- METHOD = 'method'
- PARAMETERS = 'parameters'
- MODEL_NAME = 'modelName'
- DESCRIPTION = 'description'
- RETURN_MULTIPLE_ITEMS = 'returnMultipleItems'
- TAGS = "tags"
-
-
-class SpecProp:
- DEFINITIONS = 'definitions'
- OPERATIONS = 'operations'
- MODELS = 'models'
- MODEL_OPERATIONS = 'model_operations'
-
-
-class PropName:
- ENUM = 'enum'
- TYPE = 'type'
- REQUIRED = 'required'
- INVALID_TYPE = 'invalid_type'
- REF = '$ref'
- ALL_OF = 'allOf'
- BASE_PATH = 'basePath'
- PATHS = 'paths'
- OPERATION_ID = 'operationId'
- SCHEMA = 'schema'
- ITEMS = 'items'
- PROPERTIES = 'properties'
- RESPONSES = 'responses'
- NAME = 'name'
- DESCRIPTION = 'description'
-
-
-class PropType:
- STRING = 'string'
- BOOLEAN = 'boolean'
- INTEGER = 'integer'
- NUMBER = 'number'
- OBJECT = 'object'
- ARRAY = 'array'
- FILE = 'file'
-
-
-class OperationParams:
- PATH = 'path'
- QUERY = 'query'
-
-
-class QueryParams:
- FILTER = 'filter'
-
-
-class PathParams:
- OBJ_ID = 'objId'
-
-
-def _get_model_name_from_url(schema_ref):
- path = schema_ref.split('/')
- return path[len(path) - 1]
-
-
-class IllegalArgumentException(ValueError):
- """
- Exception raised when the function parameters:
- - not all passed
- - empty string
- - wrong type
- """
- pass
-
-
-class ValidationError(ValueError):
- pass
-
-
-class FdmSwaggerParser:
- _definitions = None
- _base_path = None
-
- def parse_spec(self, spec, docs=None):
- """
- This method simplifies a swagger format, resolves a model name for each operation, and adds documentation for
- each operation and model if it is provided.
-
- :param spec: An API specification in the swagger format, see
- <https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md>
- :type spec: dict
- :param spec: A documentation map containing descriptions for models, operations and operation parameters.
- :type docs: dict
- :rtype: dict
- :return:
- Ex.
- The models field contains model definition from swagger see
- <#https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#definitions>
- {
- 'models':{
- 'model_name':{...},
- ...
- },
- 'operations':{
- 'operation_name':{
- 'method': 'get', #post, put, delete
- 'url': '/api/fdm/v2/object/networks', #url already contains a value from `basePath`
- 'modelName': 'NetworkObject', # it is a link to the model from 'models'
- # None - for a delete operation or we don't have information
- # '_File' - if an endpoint works with files
- 'returnMultipleItems': False, # shows if the operation returns a single item or an item list
- 'parameters': {
- 'path':{
- 'param_name':{
- 'type': 'string'#integer, boolean, number
- 'required' True #False
- }
- ...
- },
- 'query':{
- 'param_name':{
- 'type': 'string'#integer, boolean, number
- 'required' True #False
- }
- ...
- }
- }
- },
- ...
- },
- 'model_operations':{
- 'model_name':{ # a list of operations available for the current model
- 'operation_name':{
- ... # the same as in the operations section
- },
- ...
- },
- ...
- }
- }
- """
- self._definitions = spec[SpecProp.DEFINITIONS]
- self._base_path = spec[PropName.BASE_PATH]
- operations = self._get_operations(spec)
-
- if docs:
- operations = self._enrich_operations_with_docs(operations, docs)
- self._definitions = self._enrich_definitions_with_docs(self._definitions, docs)
-
- return {
- SpecProp.MODELS: self._definitions,
- SpecProp.OPERATIONS: operations,
- SpecProp.MODEL_OPERATIONS: self._get_model_operations(operations)
- }
-
- @property
- def base_path(self):
- return self._base_path
-
- def _get_model_operations(self, operations):
- model_operations = {}
- for operations_name, params in iteritems(operations):
- model_name = params[OperationField.MODEL_NAME]
- model_operations.setdefault(model_name, {})[operations_name] = params
- return model_operations
-
- def _get_operations(self, spec):
- paths_dict = spec[PropName.PATHS]
- operations_dict = {}
- for url, operation_params in iteritems(paths_dict):
- for method, params in iteritems(operation_params):
- operation = {
- OperationField.METHOD: method,
- OperationField.URL: self._base_path + url,
- OperationField.MODEL_NAME: self._get_model_name(method, params),
- OperationField.RETURN_MULTIPLE_ITEMS: self._return_multiple_items(params),
- OperationField.TAGS: params.get(OperationField.TAGS, [])
- }
- if OperationField.PARAMETERS in params:
- operation[OperationField.PARAMETERS] = self._get_rest_params(params[OperationField.PARAMETERS])
-
- operation_id = params[PropName.OPERATION_ID]
- operations_dict[operation_id] = operation
- return operations_dict
-
- def _enrich_operations_with_docs(self, operations, docs):
- def get_operation_docs(op):
- op_url = op[OperationField.URL][len(self._base_path):]
- return docs[PropName.PATHS].get(op_url, {}).get(op[OperationField.METHOD], {})
-
- for operation in operations.values():
- operation_docs = get_operation_docs(operation)
- operation[OperationField.DESCRIPTION] = operation_docs.get(PropName.DESCRIPTION, '')
-
- if OperationField.PARAMETERS in operation:
- param_descriptions = dict((
- (p[PropName.NAME], p[PropName.DESCRIPTION])
- for p in operation_docs.get(OperationField.PARAMETERS, {})
- ))
-
- for param_name, params_spec in operation[OperationField.PARAMETERS][OperationParams.PATH].items():
- params_spec[OperationField.DESCRIPTION] = param_descriptions.get(param_name, '')
-
- for param_name, params_spec in operation[OperationField.PARAMETERS][OperationParams.QUERY].items():
- params_spec[OperationField.DESCRIPTION] = param_descriptions.get(param_name, '')
-
- return operations
-
- def _enrich_definitions_with_docs(self, definitions, docs):
- for model_name, model_def in definitions.items():
- model_docs = docs[SpecProp.DEFINITIONS].get(model_name, {})
- model_def[PropName.DESCRIPTION] = model_docs.get(PropName.DESCRIPTION, '')
- for prop_name, prop_spec in model_def.get(PropName.PROPERTIES, {}).items():
- prop_spec[PropName.DESCRIPTION] = model_docs.get(PropName.PROPERTIES, {}).get(prop_name, '')
- prop_spec[PropName.REQUIRED] = prop_name in model_def.get(PropName.REQUIRED, [])
- return definitions
-
- def _get_model_name(self, method, params):
- if method == HTTPMethod.GET:
- return self._get_model_name_from_responses(params)
- elif method == HTTPMethod.POST or method == HTTPMethod.PUT:
- return self._get_model_name_for_post_put_requests(params)
- elif method == HTTPMethod.DELETE:
- return self._get_model_name_from_delete_operation(params)
- else:
- return None
-
- @staticmethod
- def _return_multiple_items(op_params):
- """
- Defines if the operation returns one item or a list of items.
-
- :param op_params: operation specification
- :return: True if the operation returns a list of items, otherwise False
- """
- try:
- schema = op_params[PropName.RESPONSES][SUCCESS_RESPONSE_CODE][PropName.SCHEMA]
- return PropName.ITEMS in schema[PropName.PROPERTIES]
- except KeyError:
- return False
-
- def _get_model_name_from_delete_operation(self, params):
- operation_id = params[PropName.OPERATION_ID]
- if operation_id.startswith(DELETE_PREFIX):
- model_name = operation_id[len(DELETE_PREFIX):]
- if model_name in self._definitions:
- return model_name
- return None
-
- def _get_model_name_for_post_put_requests(self, params):
- model_name = None
- if OperationField.PARAMETERS in params:
- body_param_dict = self._get_body_param_from_parameters(params[OperationField.PARAMETERS])
- if body_param_dict:
- schema_ref = body_param_dict[PropName.SCHEMA][PropName.REF]
- model_name = self._get_model_name_byschema_ref(schema_ref)
- if model_name is None:
- model_name = self._get_model_name_from_responses(params)
- return model_name
-
- @staticmethod
- def _get_body_param_from_parameters(params):
- return next((param for param in params if param['in'] == 'body'), None)
-
- def _get_model_name_from_responses(self, params):
- responses = params[PropName.RESPONSES]
- if SUCCESS_RESPONSE_CODE in responses:
- response = responses[SUCCESS_RESPONSE_CODE][PropName.SCHEMA]
- if PropName.REF in response:
- return self._get_model_name_byschema_ref(response[PropName.REF])
- elif PropName.PROPERTIES in response:
- ref = response[PropName.PROPERTIES][PropName.ITEMS][PropName.ITEMS][PropName.REF]
- return self._get_model_name_byschema_ref(ref)
- elif (PropName.TYPE in response) and response[PropName.TYPE] == PropType.FILE:
- return FILE_MODEL_NAME
- else:
- return None
-
- def _get_rest_params(self, params):
- path = {}
- query = {}
- operation_param = {
- OperationParams.PATH: path,
- OperationParams.QUERY: query
- }
- for param in params:
- in_param = param['in']
- if in_param == OperationParams.QUERY:
- query[param[PropName.NAME]] = self._simplify_param_def(param)
- elif in_param == OperationParams.PATH:
- path[param[PropName.NAME]] = self._simplify_param_def(param)
- return operation_param
-
- @staticmethod
- def _simplify_param_def(param):
- return {
- PropName.TYPE: param[PropName.TYPE],
- PropName.REQUIRED: param[PropName.REQUIRED]
- }
-
- def _get_model_name_byschema_ref(self, schema_ref):
- model_name = _get_model_name_from_url(schema_ref)
- model_def = self._definitions[model_name]
- if PropName.ALL_OF in model_def:
- return self._get_model_name_byschema_ref(model_def[PropName.ALL_OF][0][PropName.REF])
- else:
- return model_name
-
-
-class FdmSwaggerValidator:
- def __init__(self, spec):
- """
- :param spec: dict
- data from FdmSwaggerParser().parse_spec()
- """
- self._operations = spec[SpecProp.OPERATIONS]
- self._models = spec[SpecProp.MODELS]
-
- def validate_data(self, operation_name, data=None):
- """
- Validate data for the post|put requests
- :param operation_name: string
- The value must be non empty string.
- The operation name is used to get a model specification
- :param data: dict
- The value must be in the format that the model(from operation) expects
- :rtype: (bool, string|dict)
- :return:
- (True, None) - if data valid
- Invalid:
- (False, {
- 'required': [ #list of the fields that are required but were not present in the data
- 'field_name',
- 'patent.field_name',# when the nested field is omitted
- 'patent.list[2].field_name' # if data is array and one of the field is omitted
- ],
- 'invalid_type':[ #list of the fields with invalid data
- {
- 'path': 'objId', #field name or path to the field. Ex. objects[3].id, parent.name
- 'expected_type': 'string',# expected type. Ex. 'object', 'array', 'string', 'integer',
- # 'boolean', 'number'
- 'actually_value': 1 # the value that user passed
- }
- ]
- })
- :raises IllegalArgumentException
- 'The operation_name parameter must be a non-empty string' if operation_name is not valid
- 'The data parameter must be a dict' if data neither dict or None
- '{operation_name} operation does not support' if the spec does not contain the operation
- """
- if data is None:
- data = {}
-
- self._check_validate_data_params(data, operation_name)
-
- operation = self._operations[operation_name]
- model = self._models[operation[OperationField.MODEL_NAME]]
- status = self._init_report()
-
- self._validate_object(status, model, data, '')
-
- if len(status[PropName.REQUIRED]) > 0 or len(status[PropName.INVALID_TYPE]) > 0:
- return False, self._delete_empty_field_from_report(status)
- return True, None
-
- def _check_validate_data_params(self, data, operation_name):
- if not operation_name or not isinstance(operation_name, string_types):
- raise IllegalArgumentException("The operation_name parameter must be a non-empty string")
- if not isinstance(data, dict):
- raise IllegalArgumentException("The data parameter must be a dict")
- if operation_name not in self._operations:
- raise IllegalArgumentException("{0} operation does not support".format(operation_name))
-
- def validate_query_params(self, operation_name, params):
- """
- Validate params for the get requests. Use this method for validating the query part of the url.
- :param operation_name: string
- The value must be non empty string.
- The operation name is used to get a params specification
- :param params: dict
- should be in the format that the specification(from operation) expects
- Ex.
- {
- 'objId': "string_value",
- 'p_integer': 1,
- 'p_boolean': True,
- 'p_number': 2.3
- }
- :rtype:(Boolean, msg)
- :return:
- (True, None) - if params valid
- Invalid:
- (False, {
- 'required': [ #list of the fields that are required but are not present in the params
- 'field_name'
- ],
- 'invalid_type':[ #list of the fields with invalid data and expected type of the params
- {
- 'path': 'objId', #field name
- 'expected_type': 'string',#expected type. Ex. 'string', 'integer', 'boolean', 'number'
- 'actually_value': 1 # the value that user passed
- }
- ]
- })
- :raises IllegalArgumentException
- 'The operation_name parameter must be a non-empty string' if operation_name is not valid
- 'The params parameter must be a dict' if params neither dict or None
- '{operation_name} operation does not support' if the spec does not contain the operation
- """
- return self._validate_url_params(operation_name, params, resource=OperationParams.QUERY)
-
- def validate_path_params(self, operation_name, params):
- """
- Validate params for the get requests. Use this method for validating the path part of the url.
- :param operation_name: string
- The value must be non empty string.
- The operation name is used to get a params specification
- :param params: dict
- should be in the format that the specification(from operation) expects
-
- Ex.
- {
- 'objId': "string_value",
- 'p_integer': 1,
- 'p_boolean': True,
- 'p_number': 2.3
- }
- :rtype:(Boolean, msg)
- :return:
- (True, None) - if params valid
- Invalid:
- (False, {
- 'required': [ #list of the fields that are required but are not present in the params
- 'field_name'
- ],
- 'invalid_type':[ #list of the fields with invalid data and expected type of the params
- {
- 'path': 'objId', #field name
- 'expected_type': 'string',#expected type. Ex. 'string', 'integer', 'boolean', 'number'
- 'actually_value': 1 # the value that user passed
- }
- ]
- })
- :raises IllegalArgumentException
- 'The operation_name parameter must be a non-empty string' if operation_name is not valid
- 'The params parameter must be a dict' if params neither dict or None
- '{operation_name} operation does not support' if the spec does not contain the operation
- """
- return self._validate_url_params(operation_name, params, resource=OperationParams.PATH)
-
- def _validate_url_params(self, operation, params, resource):
- if params is None:
- params = {}
-
- self._check_validate_url_params(operation, params)
-
- operation = self._operations[operation]
- if OperationField.PARAMETERS in operation and resource in operation[OperationField.PARAMETERS]:
- spec = operation[OperationField.PARAMETERS][resource]
- status = self._init_report()
- self._check_url_params(status, spec, params)
-
- if len(status[PropName.REQUIRED]) > 0 or len(status[PropName.INVALID_TYPE]) > 0:
- return False, self._delete_empty_field_from_report(status)
- return True, None
- else:
- return True, None
-
- def _check_validate_url_params(self, operation, params):
- if not operation or not isinstance(operation, string_types):
- raise IllegalArgumentException("The operation_name parameter must be a non-empty string")
- if not isinstance(params, dict):
- raise IllegalArgumentException("The params parameter must be a dict")
- if operation not in self._operations:
- raise IllegalArgumentException("{0} operation does not support".format(operation))
-
- def _check_url_params(self, status, spec, params):
- for prop_name in spec.keys():
- prop = spec[prop_name]
- if prop[PropName.REQUIRED] and prop_name not in params:
- status[PropName.REQUIRED].append(prop_name)
- continue
- if prop_name in params:
- expected_type = prop[PropName.TYPE]
- value = params[prop_name]
- if prop_name in params and not self._is_correct_simple_types(expected_type, value, allow_null=False):
- self._add_invalid_type_report(status, '', prop_name, expected_type, value)
-
- def _validate_object(self, status, model, data, path):
- if self._is_enum(model):
- self._check_enum(status, model, data, path)
- elif self._is_object(model):
- self._check_object(status, model, data, path)
-
- def _is_enum(self, model):
- return self._is_string_type(model) and PropName.ENUM in model
-
- def _check_enum(self, status, model, value, path):
- if value is not None and value not in model[PropName.ENUM]:
- self._add_invalid_type_report(status, path, '', PropName.ENUM, value)
-
- def _add_invalid_type_report(self, status, path, prop_name, expected_type, actually_value):
- status[PropName.INVALID_TYPE].append({
- 'path': self._create_path_to_field(path, prop_name),
- 'expected_type': expected_type,
- 'actually_value': actually_value
- })
-
- def _check_object(self, status, model, data, path):
- if data is None:
- return
-
- if not isinstance(data, dict):
- self._add_invalid_type_report(status, path, '', PropType.OBJECT, data)
- return None
-
- if PropName.REQUIRED in model:
- self._check_required_fields(status, model[PropName.REQUIRED], data, path)
-
- model_properties = model[PropName.PROPERTIES]
- for prop in model_properties.keys():
- if prop in data:
- model_prop_val = model_properties[prop]
- expected_type = model_prop_val[PropName.TYPE]
- actually_value = data[prop]
- self._check_types(status, actually_value, expected_type, model_prop_val, path, prop)
-
- def _check_types(self, status, actually_value, expected_type, model, path, prop_name):
- if expected_type == PropType.OBJECT:
- ref_model = self._get_model_by_ref(model)
-
- self._validate_object(status, ref_model, actually_value,
- path=self._create_path_to_field(path, prop_name))
- elif expected_type == PropType.ARRAY:
- self._check_array(status, model, actually_value,
- path=self._create_path_to_field(path, prop_name))
- elif not self._is_correct_simple_types(expected_type, actually_value):
- self._add_invalid_type_report(status, path, prop_name, expected_type, actually_value)
-
- def _get_model_by_ref(self, model_prop_val):
- model = _get_model_name_from_url(model_prop_val[PropName.REF])
- return self._models[model]
-
- def _check_required_fields(self, status, required_fields, data, path):
- missed_required_fields = [self._create_path_to_field(path, field) for field in
- required_fields if field not in data.keys() or data[field] is None]
- if len(missed_required_fields) > 0:
- status[PropName.REQUIRED] += missed_required_fields
-
- def _check_array(self, status, model, data, path):
- if data is None:
- return
- elif not isinstance(data, list):
- self._add_invalid_type_report(status, path, '', PropType.ARRAY, data)
- else:
- item_model = model[PropName.ITEMS]
- for i, item_data in enumerate(data):
- self._check_types(status, item_data, item_model[PropName.TYPE], item_model, "{0}[{1}]".format(path, i),
- '')
-
- @staticmethod
- def _is_correct_simple_types(expected_type, value, allow_null=True):
- def is_numeric_string(s):
- try:
- float(s)
- return True
- except ValueError:
- return False
-
- if value is None and allow_null:
- return True
- elif expected_type == PropType.STRING:
- return isinstance(value, string_types)
- elif expected_type == PropType.BOOLEAN:
- return isinstance(value, bool)
- elif expected_type == PropType.INTEGER:
- is_integer = isinstance(value, integer_types) and not isinstance(value, bool)
- is_digit_string = isinstance(value, string_types) and value.isdigit()
- return is_integer or is_digit_string
- elif expected_type == PropType.NUMBER:
- is_number = isinstance(value, (integer_types, float)) and not isinstance(value, bool)
- is_numeric_string = isinstance(value, string_types) and is_numeric_string(value)
- return is_number or is_numeric_string
- return False
-
- @staticmethod
- def _is_string_type(model):
- return PropName.TYPE in model and model[PropName.TYPE] == PropType.STRING
-
- @staticmethod
- def _init_report():
- return {
- PropName.REQUIRED: [],
- PropName.INVALID_TYPE: []
- }
-
- @staticmethod
- def _delete_empty_field_from_report(status):
- if not status[PropName.REQUIRED]:
- del status[PropName.REQUIRED]
- if not status[PropName.INVALID_TYPE]:
- del status[PropName.INVALID_TYPE]
- return status
-
- @staticmethod
- def _create_path_to_field(path='', field=''):
- separator = ''
- if path and field:
- separator = '.'
- return "{0}{1}{2}".format(path, separator, field)
-
- @staticmethod
- def _is_object(model):
- return PropName.TYPE in model and model[PropName.TYPE] == PropType.OBJECT
diff --git a/lib/ansible/module_utils/network/ftd/operation.py b/lib/ansible/module_utils/network/ftd/operation.py
deleted file mode 100644
index 6006fbae56..0000000000
--- a/lib/ansible/module_utils/network/ftd/operation.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-from ansible.module_utils.network.ftd.configuration import ParamName, PATH_PARAMS_FOR_DEFAULT_OBJ
-
-
-class FtdOperations:
- """
- Utility class for common operation names
- """
- GET_SYSTEM_INFO = 'getSystemInformation'
- GET_MANAGEMENT_IP_LIST = 'getManagementIPList'
- GET_DNS_SETTING_LIST = 'getDeviceDNSSettingsList'
- GET_DNS_SERVER_GROUP = 'getDNSServerGroup'
-
-
-def get_system_info(resource):
- """
- Executes `getSystemInformation` operation and returns information about the system.
-
- :param resource: a BaseConfigurationResource object to connect to the device
- :return: a dictionary with system information about the device and its software
- """
- path_params = {ParamName.PATH_PARAMS: PATH_PARAMS_FOR_DEFAULT_OBJ}
- system_info = resource.execute_operation(FtdOperations.GET_SYSTEM_INFO, path_params)
- return system_info
diff --git a/lib/ansible/module_utils/network/icx/__init__.py b/lib/ansible/module_utils/network/icx/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/icx/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/icx/icx.py b/lib/ansible/module_utils/network/icx/icx.py
deleted file mode 100644
index 9270f676da..0000000000
--- a/lib/ansible/module_utils/network/icx/icx.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright: (c) 2019, Ansible Project
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import json
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.network.common.utils import to_list
-from ansible.module_utils.connection import Connection, ConnectionError
-
-_DEVICE_CONFIGS = {}
-
-
-def get_connection(module):
- return Connection(module._socket_path)
-
-
-def load_config(module, commands):
- connection = get_connection(module)
-
- try:
- resp = connection.edit_config(candidate=commands)
- return resp.get('response')
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc))
-
-
-def run_commands(module, commands, check_rc=True):
- connection = get_connection(module)
- try:
- return connection.run_commands(commands=commands, check_rc=check_rc)
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc))
-
-
-def exec_scp(module, command):
- connection = Connection(module._socket_path)
- return connection.scp(**command)
-
-
-def get_config(module, flags=None, compare=None):
- flag_str = ' '.join(to_list(flags))
- try:
- return _DEVICE_CONFIGS[flag_str]
- except KeyError:
- connection = get_connection(module)
- try:
- out = connection.get_config(flags=flags, compare=compare)
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- cfg = to_text(out, errors='surrogate_then_replace').strip()
- _DEVICE_CONFIGS[flag_str] = cfg
- return cfg
-
-
-def check_args(module, warnings):
- pass
-
-
-def get_defaults_flag(module):
- connection = get_connection(module)
- try:
- out = connection.get_defaults_flag()
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- return to_text(out, errors='surrogate_then_replace').strip()
diff --git a/lib/ansible/module_utils/network/ingate/__init__.py b/lib/ansible/module_utils/network/ingate/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/ingate/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/ingate/common.py b/lib/ansible/module_utils/network/ingate/common.py
deleted file mode 100644
index ff632520b0..0000000000
--- a/lib/ansible/module_utils/network/ingate/common.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2018, Ingate Systems AB
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-try:
- from ingate import ingatesdk
- HAS_INGATESDK = True
-except ImportError:
- HAS_INGATESDK = False
-
-
-def ingate_argument_spec(**kwargs):
- client_options = dict(
- version=dict(choices=['v1'], default='v1'),
- scheme=dict(choices=['http', 'https'], required=True),
- address=dict(type='str', required=True),
- username=dict(type='str', required=True),
- password=dict(type='str', required=True, no_log=True),
- port=dict(type='int'),
- timeout=dict(type='int'),
- validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
- )
- argument_spec = dict(
- client=dict(type='dict', required=True,
- options=client_options),
- )
- argument_spec.update(kwargs)
- return argument_spec
-
-
-def ingate_create_client(**kwargs):
- api_client = ingate_create_client_noauth(**kwargs)
-
- # Authenticate and get hold of a security token.
- api_client.authenticate()
-
- # Return the client.
- return api_client
-
-
-def ingate_create_client_noauth(**kwargs):
- client_params = kwargs['client']
-
- # Create API client.
- api_client = ingatesdk.Client(client_params['version'],
- client_params['scheme'],
- client_params['address'],
- client_params['username'],
- client_params['password'],
- port=client_params['port'],
- timeout=client_params['timeout'])
-
- # Check if we should skip SSL Certificate verification.
- verify_ssl = client_params.get('validate_certs')
- if not verify_ssl:
- api_client.skip_verify_certificate()
-
- # Return the client.
- return api_client
-
-
-def is_ingatesdk_installed(module):
- if not HAS_INGATESDK:
- module.fail_json(msg="The Ingate Python SDK module is required for this module.")
diff --git a/lib/ansible/module_utils/network/ironware/__init__.py b/lib/ansible/module_utils/network/ironware/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/ironware/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/ironware/ironware.py b/lib/ansible/module_utils/network/ironware/ironware.py
deleted file mode 100644
index de24eb967d..0000000000
--- a/lib/ansible/module_utils/network/ironware/ironware.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#
-# Copyright (c) 2017, Paul Baker <paul@paulbaker.id.au>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.network.common.utils import to_list, EntityCollection
-from ansible.module_utils.connection import Connection, exec_command
-
-_DEVICE_CONFIG = None
-_CONNECTION = None
-
-ironware_provider_spec = {
- 'host': dict(),
- 'port': dict(type='int'),
- 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
- 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
- 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
- 'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
- 'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
- 'timeout': dict(type='int'),
-}
-
-ironware_argument_spec = {
- 'provider': dict(type='dict', options=ironware_provider_spec)
-}
-
-command_spec = {
- 'command': dict(key=True),
- 'prompt': dict(),
- 'answer': dict()
-}
-
-
-def get_provider_argspec():
- return ironware_provider_spec
-
-
-def check_args(module):
- pass
-
-
-def get_connection(module):
- global _CONNECTION
- if _CONNECTION:
- return _CONNECTION
- _CONNECTION = Connection(module._socket_path)
-
- return _CONNECTION
-
-
-def to_commands(module, commands):
- if not isinstance(commands, list):
- raise AssertionError('argument must be of type <list>')
-
- transform = EntityCollection(module, command_spec)
- commands = transform(commands)
-
- for index, item in enumerate(commands):
- if module.check_mode and not item['command'].startswith('show'):
- module.warn('only show commands are supported when using check '
- 'mode, not executing `%s`' % item['command'])
-
- return commands
-
-
-def run_commands(module, commands, check_rc=True):
- connection = get_connection(module)
-
- commands = to_commands(module, to_list(commands))
-
- responses = list()
-
- for cmd in commands:
- out = connection.get(**cmd)
- responses.append(to_text(out, errors='surrogate_then_replace'))
-
- return responses
-
-
-def get_config(module, source='running', flags=None):
- global _DEVICE_CONFIG
- if source == 'running' and flags is None and _DEVICE_CONFIG is not None:
- return _DEVICE_CONFIG
- else:
- conn = get_connection(module)
- out = conn.get_config(source=source, flags=flags)
- cfg = to_text(out, errors='surrogate_then_replace').strip()
- if source == 'running' and flags is None:
- _DEVICE_CONFIG = cfg
- return cfg
-
-
-def load_config(module, config):
- conn = get_connection(module)
- conn.edit_config(config)
diff --git a/lib/ansible/module_utils/network/netscaler/__init__.py b/lib/ansible/module_utils/network/netscaler/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/netscaler/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/netscaler/netscaler.py b/lib/ansible/module_utils/network/netscaler/netscaler.py
deleted file mode 100644
index ccf0dbff8f..0000000000
--- a/lib/ansible/module_utils/network/netscaler/netscaler.py
+++ /dev/null
@@ -1,322 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2017 Citrix Systems
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-import json
-import re
-import sys
-
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.six import binary_type, text_type
-from ansible.module_utils._text import to_native
-
-
-class ConfigProxy(object):
-
- def __init__(self, actual, client, attribute_values_dict, readwrite_attrs, transforms=None, readonly_attrs=None, immutable_attrs=None, json_encodes=None):
- transforms = {} if transforms is None else transforms
- readonly_attrs = [] if readonly_attrs is None else readonly_attrs
- immutable_attrs = [] if immutable_attrs is None else immutable_attrs
- json_encodes = [] if json_encodes is None else json_encodes
-
- # Actual config object from nitro sdk
- self.actual = actual
-
- # nitro client
- self.client = client
-
- # ansible attribute_values_dict
- self.attribute_values_dict = attribute_values_dict
-
- self.readwrite_attrs = readwrite_attrs
- self.readonly_attrs = readonly_attrs
- self.immutable_attrs = immutable_attrs
- self.json_encodes = json_encodes
- self.transforms = transforms
-
- self.attribute_values_processed = {}
- for attribute, value in self.attribute_values_dict.items():
- if value is None:
- continue
- if attribute in transforms:
- for transform in self.transforms[attribute]:
- if transform == 'bool_yes_no':
- if value is True:
- value = 'YES'
- elif value is False:
- value = 'NO'
- elif transform == 'bool_on_off':
- if value is True:
- value = 'ON'
- elif value is False:
- value = 'OFF'
- elif callable(transform):
- value = transform(value)
- else:
- raise Exception('Invalid transform %s' % transform)
- self.attribute_values_processed[attribute] = value
-
- self._copy_attributes_to_actual()
-
- def _copy_attributes_to_actual(self):
- for attribute in self.readwrite_attrs:
- if attribute in self.attribute_values_processed:
- attribute_value = self.attribute_values_processed[attribute]
-
- if attribute_value is None:
- continue
-
- # Fallthrough
- if attribute in self.json_encodes:
- attribute_value = json.JSONEncoder().encode(attribute_value).strip('"')
- setattr(self.actual, attribute, attribute_value)
-
- def __getattr__(self, name):
- if name in self.attribute_values_dict:
- return self.attribute_values_dict[name]
- else:
- raise AttributeError('No attribute %s found' % name)
-
- def add(self):
- self.actual.__class__.add(self.client, self.actual)
-
- def update(self):
- return self.actual.__class__.update(self.client, self.actual)
-
- def delete(self):
- self.actual.__class__.delete(self.client, self.actual)
-
- def get(self, *args, **kwargs):
- result = self.actual.__class__.get(self.client, *args, **kwargs)
-
- return result
-
- def has_equal_attributes(self, other):
- if self.diff_object(other) == {}:
- return True
- else:
- return False
-
- def diff_object(self, other):
- diff_dict = {}
- for attribute in self.attribute_values_processed:
- # Skip readonly attributes
- if attribute not in self.readwrite_attrs:
- continue
-
- # Skip attributes not present in module arguments
- if self.attribute_values_processed[attribute] is None:
- continue
-
- # Check existence
- if hasattr(other, attribute):
- attribute_value = getattr(other, attribute)
- else:
- diff_dict[attribute] = 'missing from other'
- continue
-
- # Compare values
- param_type = self.attribute_values_processed[attribute].__class__
- if attribute_value is None or param_type(attribute_value) != self.attribute_values_processed[attribute]:
- str_tuple = (
- type(self.attribute_values_processed[attribute]),
- self.attribute_values_processed[attribute],
- type(attribute_value),
- attribute_value,
- )
- diff_dict[attribute] = 'difference. ours: (%s) %s other: (%s) %s' % str_tuple
- return diff_dict
-
- def get_actual_rw_attributes(self, filter='name'):
- if self.actual.__class__.count_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter])) == 0:
- return {}
- server_list = self.actual.__class__.get_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter]))
- actual_instance = server_list[0]
- ret_val = {}
- for attribute in self.readwrite_attrs:
- if not hasattr(actual_instance, attribute):
- continue
- ret_val[attribute] = getattr(actual_instance, attribute)
- return ret_val
-
- def get_actual_ro_attributes(self, filter='name'):
- if self.actual.__class__.count_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter])) == 0:
- return {}
- server_list = self.actual.__class__.get_filtered(self.client, '%s:%s' % (filter, self.attribute_values_dict[filter]))
- actual_instance = server_list[0]
- ret_val = {}
- for attribute in self.readonly_attrs:
- if not hasattr(actual_instance, attribute):
- continue
- ret_val[attribute] = getattr(actual_instance, attribute)
- return ret_val
-
- def get_missing_rw_attributes(self):
- return list(set(self.readwrite_attrs) - set(self.get_actual_rw_attributes().keys()))
-
- def get_missing_ro_attributes(self):
- return list(set(self.readonly_attrs) - set(self.get_actual_ro_attributes().keys()))
-
-
-def get_immutables_intersection(config_proxy, keys):
- immutables_set = set(config_proxy.immutable_attrs)
- keys_set = set(keys)
- # Return list of sets' intersection
- return list(immutables_set & keys_set)
-
-
-def ensure_feature_is_enabled(client, feature_str):
- enabled_features = client.get_enabled_features()
-
- if enabled_features is None:
- enabled_features = []
-
- if feature_str not in enabled_features:
- client.enable_features(feature_str)
- client.save_config()
-
-
-def get_nitro_client(module):
- from nssrc.com.citrix.netscaler.nitro.service.nitro_service import nitro_service
-
- client = nitro_service(module.params['nsip'], module.params['nitro_protocol'])
- client.set_credential(module.params['nitro_user'], module.params['nitro_pass'])
- client.timeout = float(module.params['nitro_timeout'])
- client.certvalidation = module.params['validate_certs']
- return client
-
-
-netscaler_common_arguments = dict(
- nsip=dict(
- required=True,
- fallback=(env_fallback, ['NETSCALER_NSIP']),
- ),
- nitro_user=dict(
- required=True,
- fallback=(env_fallback, ['NETSCALER_NITRO_USER']),
- no_log=True
- ),
- nitro_pass=dict(
- required=True,
- fallback=(env_fallback, ['NETSCALER_NITRO_PASS']),
- no_log=True
- ),
- nitro_protocol=dict(
- choices=['http', 'https'],
- fallback=(env_fallback, ['NETSCALER_NITRO_PROTOCOL']),
- default='http'
- ),
- validate_certs=dict(
- default=True,
- type='bool'
- ),
- nitro_timeout=dict(default=310, type='float'),
- state=dict(
- choices=[
- 'present',
- 'absent',
- ],
- default='present',
- ),
- save_config=dict(
- type='bool',
- default=True,
- ),
-)
-
-
-loglines = []
-
-
-def complete_missing_attributes(actual, attrs_list, fill_value=None):
- for attribute in attrs_list:
- if not hasattr(actual, attribute):
- setattr(actual, attribute, fill_value)
-
-
-def log(msg):
- loglines.append(msg)
-
-
-def get_ns_version(client):
- from nssrc.com.citrix.netscaler.nitro.resource.config.ns.nsversion import nsversion
- result = nsversion.get(client)
- m = re.match(r'^.*NS(\d+)\.(\d+).*$', result[0].version)
- if m is None:
- return None
- else:
- return int(m.group(1)), int(m.group(2))
-
-
-def get_ns_hardware(client):
- from nssrc.com.citrix.netscaler.nitro.resource.config.ns.nshardware import nshardware
- result = nshardware.get(client)
- return result
-
-
-def monkey_patch_nitro_api():
-
- from nssrc.com.citrix.netscaler.nitro.resource.base.Json import Json
-
- def new_resource_to_string_convert(self, resrc):
- # Line below is the actual patch
- dict_valid_values = dict((k.replace('_', '', 1), v) for k, v in resrc.__dict__.items() if v)
- return json.dumps(dict_valid_values)
- Json.resource_to_string_convert = new_resource_to_string_convert
-
- from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
-
- @classmethod
- def object_to_string_new(cls, obj):
- output = []
- flds = obj.__dict__
- for k, v in ((k.replace('_', '', 1), v) for k, v in flds.items() if v):
- if isinstance(v, bool):
- output.append('"%s":%s' % (k, v))
- elif isinstance(v, (binary_type, text_type)):
- v = to_native(v, errors='surrogate_or_strict')
- output.append('"%s":"%s"' % (k, v))
- elif isinstance(v, int):
- output.append('"%s":"%s"' % (k, v))
- return ','.join(output)
-
- @classmethod
- def object_to_string_withoutquotes_new(cls, obj):
- output = []
- flds = obj.__dict__
- for k, v in ((k.replace('_', '', 1), v) for k, v in flds.items() if v):
- if isinstance(v, (int, bool)):
- output.append('%s:%s' % (k, v))
- elif isinstance(v, (binary_type, text_type)):
- v = to_native(v, errors='surrogate_or_strict')
- output.append('%s:%s' % (k, cls.encode(v)))
- return ','.join(output)
-
- nitro_util.object_to_string = object_to_string_new
- nitro_util.object_to_string_withoutquotes = object_to_string_withoutquotes_new
diff --git a/lib/ansible/module_utils/network/netvisor/__init__.py b/lib/ansible/module_utils/network/netvisor/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/netvisor/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/netvisor/netvisor.py b/lib/ansible/module_utils/network/netvisor/netvisor.py
deleted file mode 100644
index 2d83a6a54e..0000000000
--- a/lib/ansible/module_utils/network/netvisor/netvisor.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright: (c) 2018, Pluribus Networks
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-#
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import json
-from ansible.module_utils._text import to_text
-from ansible.module_utils.network.common.utils import to_list, ComplexList
-from ansible.module_utils.connection import Connection, ConnectionError
-from ansible.module_utils.connection import exec_command
-
-
-def get_connection(module):
- if hasattr(module, '_nvos_connection'):
- return module._nvos_connection
-
- capabilities = get_capabilities(module)
- network_api = capabilities.get('network_api')
- if network_api == 'cliconf':
- module._nvos_connection = Connection(module._socket_path)
- else:
- module.fail_json(msg='Invalid connection type %s' % network_api)
-
- return module._nvos_connection
-
-
-def get_capabilities(module):
- if hasattr(module, '_nvos_capabilities'):
- return module._nvos_capabilities
- try:
- capabilities = Connection(module._socket_path).get_capabilities()
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- module._nvos_capabilities = json.loads(capabilities)
- return module._nvos_capabilities
-
-
-def to_commands(module, commands):
- spec = {
- 'command': dict(key=True),
- 'prompt': dict(),
- 'answer': dict()
- }
- transform = ComplexList(spec, module)
- return transform(commands)
-
-
-def run_commands(module, commands, check_rc=True):
- commands = to_commands(module, to_list(commands))
- for cmd in commands:
- cmd = module.jsonify(cmd)
- rc, out, err = exec_command(module, cmd)
- if check_rc and rc != 0:
- module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc)
- responses = (to_text(out, errors='surrogate_or_strict'))
-
- return rc, out, err
diff --git a/lib/ansible/module_utils/network/netvisor/pn_nvos.py b/lib/ansible/module_utils/network/netvisor/pn_nvos.py
deleted file mode 100644
index 95da2fab4a..0000000000
--- a/lib/ansible/module_utils/network/netvisor/pn_nvos.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright: (c) 2018, Pluribus Networks
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-#
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-from ansible.module_utils.network.netvisor.netvisor import run_commands
-
-
-def pn_cli(module, switch=None, username=None, password=None, switch_local=None):
- """
- Method to generate the cli portion to launch the Netvisor cli.
- :param module: The Ansible module to fetch username and password.
- :return: The cli string for further processing.
- """
-
- cli = ''
-
- if username and password:
- cli += '--user "%s":"%s" ' % (username, password)
- if switch:
- cli += ' switch ' + switch
- if switch_local:
- cli += ' switch-local '
-
- return cli
-
-
-def booleanArgs(arg, trueString, falseString):
- if arg is True:
- return " %s " % trueString
- elif arg is False:
- return " %s " % falseString
- else:
- return ""
-
-
-def run_cli(module, cli, state_map):
- """
- This method executes the cli command on the target node(s) and returns the
- output. The module then exits based on the output.
- :param cli: the complete cli string to be executed on the target node(s).
- :param state_map: Provides state of the command.
- :param module: The Ansible module to fetch command
- """
- state = module.params['state']
- command = state_map[state]
-
- result, out, err = run_commands(module, cli)
-
- results = dict(
- command=cli,
- msg="%s operation completed" % cli,
- changed=True
- )
- # Response in JSON format
- if result != 0:
- module.exit_json(
- command=cli,
- msg="%s operation failed" % cli,
- changed=False
- )
-
- module.exit_json(**results)
diff --git a/lib/ansible/module_utils/network/nos/__init__.py b/lib/ansible/module_utils/network/nos/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/nos/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/nos/nos.py b/lib/ansible/module_utils/network/nos/nos.py
deleted file mode 100644
index e031a92a20..0000000000
--- a/lib/ansible/module_utils/network/nos/nos.py
+++ /dev/null
@@ -1,160 +0,0 @@
-#
-# (c) 2018 Extreme Networks Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-import json
-from ansible.module_utils._text import to_text
-from ansible.module_utils.network.common.utils import to_list
-from ansible.module_utils.connection import Connection, ConnectionError
-
-
-def get_connection(module):
- """Get switch connection
-
- Creates reusable SSH connection to the switch described in a given module.
-
- Args:
- module: A valid AnsibleModule instance.
-
- Returns:
- An instance of `ansible.module_utils.connection.Connection` with a
- connection to the switch described in the provided module.
-
- Raises:
- AnsibleConnectionFailure: An error occurred connecting to the device
- """
- if hasattr(module, 'nos_connection'):
- return module.nos_connection
-
- capabilities = get_capabilities(module)
- network_api = capabilities.get('network_api')
- if network_api == 'cliconf':
- module.nos_connection = Connection(module._socket_path)
- else:
- module.fail_json(msg='Invalid connection type %s' % network_api)
-
- return module.nos_connection
-
-
-def get_capabilities(module):
- """Get switch capabilities
-
- Collects and returns a python object with the switch capabilities.
-
- Args:
- module: A valid AnsibleModule instance.
-
- Returns:
- A dictionary containing the switch capabilities.
- """
- if hasattr(module, 'nos_capabilities'):
- return module.nos_capabilities
-
- try:
- capabilities = Connection(module._socket_path).get_capabilities()
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- module.nos_capabilities = json.loads(capabilities)
- return module.nos_capabilities
-
-
-def run_commands(module, commands):
- """Run command list against connection.
-
- Get new or previously used connection and send commands to it one at a time,
- collecting response.
-
- Args:
- module: A valid AnsibleModule instance.
- commands: Iterable of command strings.
-
- Returns:
- A list of output strings.
- """
- responses = list()
- connection = get_connection(module)
-
- for cmd in to_list(commands):
- if isinstance(cmd, dict):
- command = cmd['command']
- prompt = cmd['prompt']
- answer = cmd['answer']
- else:
- command = cmd
- prompt = None
- answer = None
-
- try:
- out = connection.get(command, prompt, answer)
- out = to_text(out, errors='surrogate_or_strict')
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc))
- except UnicodeError:
- module.fail_json(msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
-
- responses.append(out)
-
- return responses
-
-
-def get_config(module):
- """Get switch configuration
-
- Gets the described device's current configuration. If a configuration has
- already been retrieved it will return the previously obtained configuration.
-
- Args:
- module: A valid AnsibleModule instance.
-
- Returns:
- A string containing the configuration.
- """
- if not hasattr(module, 'device_configs'):
- module.device_configs = {}
- elif module.device_configs != {}:
- return module.device_configs
-
- connection = get_connection(module)
- try:
- out = connection.get_config()
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- cfg = to_text(out, errors='surrogate_then_replace').strip()
- module.device_configs = cfg
- return cfg
-
-
-def load_config(module, commands):
- """Apply a list of commands to a device.
-
- Given a list of commands apply them to the device to modify the
- configuration in bulk.
-
- Args:
- module: A valid AnsibleModule instance.
- commands: Iterable of command strings.
-
- Returns:
- None
- """
- connection = get_connection(module)
-
- try:
- resp = connection.edit_config(commands)
- return resp.get('response')
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc))
diff --git a/lib/ansible/module_utils/network/nso/__init__.py b/lib/ansible/module_utils/network/nso/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/nso/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/nso/nso.py b/lib/ansible/module_utils/network/nso/nso.py
deleted file mode 100644
index 217ac5dd8b..0000000000
--- a/lib/ansible/module_utils/network/nso/nso.py
+++ /dev/null
@@ -1,822 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017, Cisco and/or its affiliates.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.urls import open_url
-from ansible.module_utils._text import to_text
-
-import json
-import re
-import socket
-
-try:
- unicode
- HAVE_UNICODE = True
-except NameError:
- unicode = str
- HAVE_UNICODE = False
-
-
-nso_argument_spec = dict(
- url=dict(type='str', required=True),
- username=dict(type='str', required=True, fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
- password=dict(type='str', required=True, no_log=True, fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD'])),
- timeout=dict(type='int', default=300),
- validate_certs=dict(type='bool', default=False)
-)
-
-
-class State(object):
- SET = 'set'
- PRESENT = 'present'
- ABSENT = 'absent'
- CHECK_SYNC = 'check-sync'
- DEEP_CHECK_SYNC = 'deep-check-sync'
- IN_SYNC = 'in-sync'
- DEEP_IN_SYNC = 'deep-in-sync'
-
- SYNC_STATES = ('check-sync', 'deep-check-sync', 'in-sync', 'deep-in-sync')
-
-
-class ModuleFailException(Exception):
- def __init__(self, message):
- super(ModuleFailException, self).__init__(message)
- self.message = message
-
-
-class NsoException(Exception):
- def __init__(self, message, error):
- super(NsoException, self).__init__(message)
- self.message = message
- self.error = error
-
-
-class JsonRpc(object):
- def __init__(self, url, timeout, validate_certs):
- self._url = url
- self._timeout = timeout
- self._validate_certs = validate_certs
- self._id = 0
- self._trans = {}
- self._headers = {'Content-Type': 'application/json'}
- self._conn = None
- self._system_settings = {}
-
- def login(self, user, passwd):
- payload = {
- 'method': 'login',
- 'params': {'user': user, 'passwd': passwd}
- }
- resp, resp_json = self._call(payload)
- self._headers['Cookie'] = resp.headers['set-cookie']
-
- def logout(self):
- payload = {'method': 'logout', 'params': {}}
- self._call(payload)
-
- def get_system_setting(self, setting):
- if setting not in self._system_settings:
- payload = {'method': 'get_system_setting', 'params': {'operation': setting}}
- resp, resp_json = self._call(payload)
- self._system_settings[setting] = resp_json['result']
- return self._system_settings[setting]
-
- def new_trans(self, **kwargs):
- payload = {'method': 'new_trans', 'params': kwargs}
- resp, resp_json = self._call(payload)
- return resp_json['result']['th']
-
- def get_trans(self, mode):
- if mode not in self._trans:
- th = self.new_trans(mode=mode)
- self._trans[mode] = th
- return self._trans[mode]
-
- def delete_trans(self, th):
- payload = {'method': 'delete_trans', 'params': {'th': th}}
- resp, resp_json = self._call(payload)
- self._maybe_delete_trans(th)
-
- def validate_trans(self, th):
- payload = {'method': 'validate_trans', 'params': {'th': th}}
- resp, resp_json = self._write_call(payload)
- return resp_json['result']
-
- def get_trans_changes(self, th):
- payload = {'method': 'get_trans_changes', 'params': {'th': th}}
- resp, resp_json = self._write_call(payload)
- return resp_json['result']['changes']
-
- def validate_commit(self, th):
- payload = {'method': 'validate_commit', 'params': {'th': th}}
- resp, resp_json = self._write_call(payload)
- return resp_json['result'].get('warnings', [])
-
- def commit(self, th):
- payload = {'method': 'commit', 'params': {'th': th}}
- resp, resp_json = self._write_call(payload)
- if len(resp_json['result']) == 0:
- self._maybe_delete_trans(th)
- return resp_json['result']
-
- def get_schema(self, **kwargs):
- payload = {'method': 'get_schema', 'params': kwargs}
- resp, resp_json = self._maybe_write_call(payload)
- return resp_json['result']
-
- def get_module_prefix_map(self, path=None):
- if path is None:
- payload = {'method': 'get_module_prefix_map', 'params': {}}
- resp, resp_json = self._call(payload)
- else:
- payload = {'method': 'get_module_prefix_map', 'params': {'path': path}}
- resp, resp_json = self._maybe_write_call(payload)
- return resp_json['result']
-
- def get_value(self, path):
- payload = {
- 'method': 'get_value',
- 'params': {'path': path}
- }
- resp, resp_json = self._read_call(payload)
- return resp_json['result']
-
- def exists(self, path):
- payload = {'method': 'exists', 'params': {'path': path}}
- try:
- resp, resp_json = self._read_call(payload)
- return resp_json['result']['exists']
- except NsoException as ex:
- # calling exists on a sub-list when the parent list does
- # not exists will cause data.not_found errors on recent
- # NSO
- if 'type' in ex.error and ex.error['type'] == 'data.not_found':
- return False
- raise
-
- def create(self, th, path):
- payload = {'method': 'create', 'params': {'th': th, 'path': path}}
- self._write_call(payload)
-
- def delete(self, th, path):
- payload = {'method': 'delete', 'params': {'th': th, 'path': path}}
- self._write_call(payload)
-
- def set_value(self, th, path, value):
- payload = {
- 'method': 'set_value',
- 'params': {'th': th, 'path': path, 'value': value}
- }
- resp, resp_json = self._write_call(payload)
- return resp_json['result']
-
- def show_config(self, path, operational=False):
- payload = {
- 'method': 'show_config',
- 'params': {
- 'path': path,
- 'result_as': 'json',
- 'with_oper': operational}
- }
- resp, resp_json = self._read_call(payload)
- return resp_json['result']
-
- def query(self, xpath, fields):
- payload = {
- 'method': 'query',
- 'params': {
- 'xpath_expr': xpath,
- 'selection': fields
- }
- }
- resp, resp_json = self._read_call(payload)
- return resp_json['result']['results']
-
- def run_action(self, th, path, params=None):
- if params is None:
- params = {}
-
- if is_version(self, [(4, 5), (4, 4, 3)]):
- result_format = 'json'
- else:
- result_format = 'normal'
-
- payload = {
- 'method': 'run_action',
- 'params': {
- 'format': result_format,
- 'path': path,
- 'params': params
- }
- }
- if th is None:
- resp, resp_json = self._read_call(payload)
- else:
- payload['params']['th'] = th
- resp, resp_json = self._call(payload)
-
- if result_format == 'normal':
- # this only works for one-level results, list entries,
- # containers etc will have / in their name.
- result = {}
- for info in resp_json['result']:
- result[info['name']] = info['value']
- else:
- result = resp_json['result']
-
- return result
-
- def _call(self, payload):
- self._id += 1
- if 'id' not in payload:
- payload['id'] = self._id
-
- if 'jsonrpc' not in payload:
- payload['jsonrpc'] = '2.0'
-
- data = json.dumps(payload)
- try:
- resp = open_url(
- self._url, timeout=self._timeout,
- method='POST', data=data, headers=self._headers,
- validate_certs=self._validate_certs)
- if resp.code != 200:
- raise NsoException(
- 'NSO returned HTTP code {0}, expected 200'.format(resp.status), {})
- except socket.timeout:
- raise NsoException('request timed out against NSO at {0}'.format(self._url), {})
-
- resp_body = resp.read()
- resp_json = json.loads(resp_body)
-
- if 'error' in resp_json:
- self._handle_call_error(payload, resp_json)
- return resp, resp_json
-
- def _handle_call_error(self, payload, resp_json):
- method = payload['method']
-
- error = resp_json['error']
- error_type = error['type'][len('rpc.method.'):]
- if error_type in ('unexpected_params',
- 'unknown_params_value',
- 'invalid_params',
- 'invalid_params_type',
- 'data_not_found'):
- key = error['data']['param']
- error_type_s = error_type.replace('_', ' ')
- if key == 'path':
- msg = 'NSO {0} {1}. path = {2}'.format(
- method, error_type_s, payload['params']['path'])
- else:
- path = payload['params'].get('path', 'unknown')
- msg = 'NSO {0} {1}. path = {2}. {3} = {4}'.format(
- method, error_type_s, path, key, payload['params'][key])
- else:
- msg = 'NSO {0} returned JSON-RPC error: {1}'.format(method, error)
-
- raise NsoException(msg, error)
-
- def _read_call(self, payload):
- if 'th' not in payload['params']:
- payload['params']['th'] = self.get_trans(mode='read')
- return self._call(payload)
-
- def _write_call(self, payload):
- if 'th' not in payload['params']:
- payload['params']['th'] = self.get_trans(mode='read_write')
- return self._call(payload)
-
- def _maybe_write_call(self, payload):
- if 'read_write' in self._trans:
- return self._write_call(payload)
- else:
- return self._read_call(payload)
-
- def _maybe_delete_trans(self, th):
- for mode in ('read', 'read_write'):
- if th == self._trans.get(mode, None):
- del self._trans[mode]
-
-
-class ValueBuilder(object):
- PATH_RE = re.compile('{[^}]*}')
- PATH_RE_50 = re.compile('{[^}]*}$')
-
- class Value(object):
- __slots__ = ['path', 'tag_path', 'state', 'value', 'deps']
-
- def __init__(self, path, state, value, deps):
- self.path = path
- self.tag_path = ValueBuilder.PATH_RE.sub('', path)
- self.state = state
- self.value = value
- self.deps = deps
-
- # nodes can depend on themselves
- if self.tag_path in self.deps:
- self.deps.remove(self.tag_path)
-
- def __lt__(self, rhs):
- l_len = len(self.path.split('/'))
- r_len = len(rhs.path.split('/'))
- if l_len == r_len:
- return self.path.__lt__(rhs.path)
- return l_len < r_len
-
- def __str__(self):
- return 'Value<path={0}, state={1}, value={2}>'.format(
- self.path, self.state, self.value)
-
- class ValueIterator(object):
- def __init__(self, client, values, delayed_values):
- self._client = client
- self._values = values
- self._delayed_values = delayed_values
- self._pos = 0
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return self.next()
-
- def next(self):
- if self._pos >= len(self._values):
- if len(self._delayed_values) == 0:
- raise StopIteration()
-
- builder = ValueBuilder(self._client, delay=False)
- for (parent, maybe_qname, value) in self._delayed_values:
- builder.build(parent, maybe_qname, value)
- del self._delayed_values[:]
- self._values.extend(builder.values)
-
- return self.next()
-
- value = self._values[self._pos]
- self._pos += 1
- return value
-
- def __init__(self, client, mode='config', delay=None):
- self._client = client
- self._mode = mode
- self._schema_cache = {}
- self._module_prefix_map_cache = {}
- self._values = []
- self._values_dirty = False
- self._delay = delay is None and mode == 'config' and is_version(self._client, [(5, 0)])
- self._delayed_values = []
-
- def build(self, parent, maybe_qname, value, schema=None):
- qname, name = self.get_prefix_name(parent, maybe_qname)
- if name is None:
- path = parent
- else:
- path = '{0}/{1}'.format(parent, qname)
-
- if schema is None:
- schema = self._get_schema(path)
-
- if self._delay and schema.get('is_mount_point', False):
- # delay conversion of mounted values, required to get
- # shema information on 5.0 and later.
- self._delayed_values.append((parent, maybe_qname, value))
- elif self._is_leaf_list(schema) and is_version(self._client, [(4, 5)]):
- self._build_leaf_list(path, schema, value)
- elif self._is_leaf(schema):
- deps = schema.get('deps', [])
- if self._is_empty_leaf(schema):
- exists = self._client.exists(path)
- if exists and value != [None]:
- self._add_value(path, State.ABSENT, None, deps)
- elif not exists and value == [None]:
- self._add_value(path, State.PRESENT, None, deps)
- else:
- if maybe_qname is None:
- value_type = self.get_type(path)
- else:
- value_type = self._get_child_type(parent, qname)
-
- if 'identityref' in value_type:
- if isinstance(value, list):
- value = [ll_v for ll_v, t_ll_v
- in [self.get_prefix_name(parent, v) for v in value]]
- else:
- value, t_value = self.get_prefix_name(parent, value)
- self._add_value(path, State.SET, value, deps)
- elif isinstance(value, dict):
- self._build_dict(path, schema, value)
- elif isinstance(value, list):
- self._build_list(path, schema, value)
- else:
- raise ModuleFailException(
- 'unsupported schema {0} at {1}'.format(
- schema['kind'], path))
-
- @property
- def values(self):
- if self._values_dirty:
- self._values = ValueBuilder.sort_values(self._values)
- self._values_dirty = False
-
- return ValueBuilder.ValueIterator(self._client, self._values, self._delayed_values)
-
- @staticmethod
- def sort_values(values):
- class N(object):
- def __init__(self, v):
- self.tmp_mark = False
- self.mark = False
- self.v = v
-
- sorted_values = []
- nodes = [N(v) for v in sorted(values)]
-
- def get_node(tag_path):
- return next((m for m in nodes
- if m.v.tag_path == tag_path), None)
-
- def is_cycle(n, dep, visited):
- visited.add(n.v.tag_path)
- if dep in visited:
- return True
-
- dep_n = get_node(dep)
- if dep_n is not None:
- for sub_dep in dep_n.v.deps:
- if is_cycle(dep_n, sub_dep, visited):
- return True
-
- return False
-
- # check for dependency cycles, remove if detected. sort will
- # not be 100% but allows for a best-effort to work around
- # issue in NSO.
- for n in nodes:
- for dep in n.v.deps:
- if is_cycle(n, dep, set()):
- n.v.deps.remove(dep)
-
- def visit(n):
- if n.tmp_mark:
- return False
- if not n.mark:
- n.tmp_mark = True
- for m in nodes:
- if m.v.tag_path in n.v.deps:
- if not visit(m):
- return False
-
- n.tmp_mark = False
- n.mark = True
-
- sorted_values.insert(0, n.v)
-
- return True
-
- n = next((n for n in nodes if not n.mark), None)
- while n is not None:
- visit(n)
- n = next((n for n in nodes if not n.mark), None)
-
- return sorted_values[::-1]
-
- def _build_dict(self, path, schema, value):
- keys = schema.get('key', [])
- for dict_key, dict_value in value.items():
- qname, name = self.get_prefix_name(path, dict_key)
- if dict_key in ('__state', ) or name in keys:
- continue
-
- child_schema = self._find_child(path, schema, qname)
- self.build(path, dict_key, dict_value, child_schema)
-
- def _build_leaf_list(self, path, schema, value):
- deps = schema.get('deps', [])
- entry_type = self.get_type(path, schema)
-
- if self._mode == 'verify':
- for entry in value:
- if 'identityref' in entry_type:
- entry, t_entry = self.get_prefix_name(path, entry)
- entry_path = '{0}{{{1}}}'.format(path, entry)
- if not self._client.exists(entry_path):
- self._add_value(entry_path, State.ABSENT, None, deps)
- else:
- # remove leaf list if treated as a list and then re-create the
- # expected list entries.
- self._add_value(path, State.ABSENT, None, deps)
-
- for entry in value:
- if 'identityref' in entry_type:
- entry, t_entry = self.get_prefix_name(path, entry)
- entry_path = '{0}{{{1}}}'.format(path, entry)
- self._add_value(entry_path, State.PRESENT, None, deps)
-
- def _build_list(self, path, schema, value):
- deps = schema.get('deps', [])
- for entry in value:
- entry_key = self._build_key(path, entry, schema['key'])
- entry_path = '{0}{{{1}}}'.format(path, entry_key)
- entry_state = entry.get('__state', 'present')
- entry_exists = self._client.exists(entry_path)
-
- if entry_state == 'absent':
- if entry_exists:
- self._add_value(entry_path, State.ABSENT, None, deps)
- else:
- if not entry_exists:
- self._add_value(entry_path, State.PRESENT, None, deps)
- if entry_state in State.SYNC_STATES:
- self._add_value(entry_path, entry_state, None, deps)
-
- self.build(entry_path, None, entry)
-
- def _build_key(self, path, entry, schema_keys):
- key_parts = []
- for key in schema_keys:
- value = entry.get(key, None)
- if value is None:
- raise ModuleFailException(
- 'required leaf {0} in {1} not set in data'.format(
- key, path))
-
- value_type = self._get_child_type(path, key)
- if 'identityref' in value_type:
- value, t_value = self.get_prefix_name(path, value)
- key_parts.append(self._quote_key(value))
- return ' '.join(key_parts)
-
- def _quote_key(self, key):
- if isinstance(key, bool):
- return key and 'true' or 'false'
-
- q_key = []
- for c in str(key):
- if c in ('{', '}', "'", '\\'):
- q_key.append('\\')
- q_key.append(c)
- q_key = ''.join(q_key)
- if ' ' in q_key:
- return '"{0}"'.format(q_key)
- return q_key
-
- def _find_child(self, path, schema, qname):
- if 'children' not in schema:
- schema = self._get_schema(path)
-
- # look for the qualified name if : is in the name
- child_schema = self._get_child(schema, qname)
- if child_schema is not None:
- return child_schema
-
- # no child was found, look for a choice with a child matching
- for child_schema in schema['children']:
- if child_schema['kind'] != 'choice':
- continue
- choice_child_schema = self._get_choice_child(child_schema, qname)
- if choice_child_schema is not None:
- return choice_child_schema
-
- raise ModuleFailException(
- 'no child in {0} with name {1}. children {2}'.format(
- path, qname, ','.join((c.get('qname', c.get('name', None)) for c in schema['children']))))
-
- def _add_value(self, path, state, value, deps):
- self._values.append(ValueBuilder.Value(path, state, value, deps))
- self._values_dirty = True
-
- def get_prefix_name(self, path, qname):
- if not isinstance(qname, (str, unicode)):
- return qname, None
- if ':' not in qname:
- return qname, qname
-
- module_prefix_map = self._get_module_prefix_map(path)
- module, name = qname.split(':', 1)
- if module not in module_prefix_map:
- raise ModuleFailException(
- 'no module mapping for module {0}. loaded modules {1}'.format(
- module, ','.join(sorted(module_prefix_map.keys()))))
-
- return '{0}:{1}'.format(module_prefix_map[module], name), name
-
- def _get_schema(self, path):
- return self._ensure_schema_cached(path)['data']
-
- def _get_child_type(self, parent_path, key):
- all_schema = self._ensure_schema_cached(parent_path)
- parent_schema = all_schema['data']
- meta = all_schema['meta']
- schema = self._find_child(parent_path, parent_schema, key)
- return self.get_type(parent_path, schema, meta)
-
- def get_type(self, path, schema=None, meta=None):
- if schema is None or meta is None:
- all_schema = self._ensure_schema_cached(path)
- schema = all_schema['data']
- meta = all_schema['meta']
-
- if self._is_leaf(schema):
- def get_type(meta, curr_type):
- if curr_type.get('primitive', False):
- return [curr_type['name']]
- if 'namespace' in curr_type:
- curr_type_key = '{0}:{1}'.format(
- curr_type['namespace'], curr_type['name'])
- type_info = meta['types'][curr_type_key][-1]
- return get_type(meta, type_info)
- if 'leaf_type' in curr_type:
- return get_type(meta, curr_type['leaf_type'][-1])
- if 'union' in curr_type:
- union_types = []
- for union_type in curr_type['union']:
- union_types.extend(get_type(meta, union_type[-1]))
- return union_types
- return [curr_type.get('name', 'unknown')]
-
- return get_type(meta, schema['type'])
- return None
-
- def _ensure_schema_cached(self, path):
- if not self._delay and is_version(self._client, [(5, 0)]):
- # newer versions of NSO support multiple different schemas
- # for different devices, thus the device is required to
- # look up the schema. Remove the key entry to get schema
- # logic working ok.
- path = ValueBuilder.PATH_RE_50.sub('', path)
- else:
- path = ValueBuilder.PATH_RE.sub('', path)
-
- if path not in self._schema_cache:
- schema = self._client.get_schema(path=path, levels=1)
- self._schema_cache[path] = schema
- return self._schema_cache[path]
-
- def _get_module_prefix_map(self, path):
- # newer versions of NSO support multiple mappings from module
- # to prefix depending on which device is used.
- if path != '' and is_version(self._client, [(5, 0)]):
- if path not in self._module_prefix_map_cache:
- self._module_prefix_map_cache[path] = self._client.get_module_prefix_map(path)
- return self._module_prefix_map_cache[path]
-
- if '' not in self._module_prefix_map_cache:
- self._module_prefix_map_cache[''] = self._client.get_module_prefix_map()
- return self._module_prefix_map_cache['']
-
- def _get_child(self, schema, qname):
- # no child specified, return parent
- if qname is None:
- return schema
-
- name_key = ':' in qname and 'qname' or 'name'
- return next((c for c in schema['children']
- if c.get(name_key, None) == qname), None)
-
- def _get_choice_child(self, schema, qname):
- name_key = ':' in qname and 'qname' or 'name'
- for child_case in schema['cases']:
- # look for direct child
- choice_child_schema = next(
- (c for c in child_case['children']
- if c.get(name_key, None) == qname), None)
- if choice_child_schema is not None:
- return choice_child_schema
-
- # look for nested choice
- for child_schema in child_case['children']:
- if child_schema['kind'] != 'choice':
- continue
- choice_child_schema = self._get_choice_child(child_schema, qname)
- if choice_child_schema is not None:
- return choice_child_schema
- return None
-
- def _is_leaf_list(self, schema):
- return schema.get('kind', None) == 'leaf-list'
-
- def _is_leaf(self, schema):
- # still checking for leaf-list here to be compatible with pre
- # 4.5 versions of NSO.
- return schema.get('kind', None) in ('key', 'leaf', 'leaf-list')
-
- def _is_empty_leaf(self, schema):
- return (schema.get('kind', None) == 'leaf' and
- schema['type'].get('primitive', False) and
- schema['type'].get('name', '') == 'empty')
-
-
-def connect(params):
- client = JsonRpc(params['url'],
- params['timeout'],
- params['validate_certs'])
- client.login(params['username'], params['password'])
- return client
-
-
-def verify_version(client, required_versions):
- version_str = client.get_system_setting('version')
- if not verify_version_str(version_str, required_versions):
- supported_versions = ', '.join(
- ['.'.join([str(p) for p in required_version])
- for required_version in required_versions])
- raise ModuleFailException(
- 'unsupported NSO version {0}. {1} or later supported'.format(
- version_str, supported_versions))
-
-
-def is_version(client, required_versions):
- version_str = client.get_system_setting('version')
- return verify_version_str(version_str, required_versions)
-
-
-def verify_version_str(version_str, required_versions):
- version_str = re.sub('_.*', '', version_str)
-
- version = [int(p) for p in version_str.split('.')]
- if len(version) < 2:
- raise ModuleFailException(
- 'unsupported NSO version format {0}'.format(version_str))
-
- def check_version(required_version, version):
- for pos in range(len(required_version)):
- if pos >= len(version):
- return False
- if version[pos] > required_version[pos]:
- return True
- if version[pos] < required_version[pos]:
- return False
- return True
-
- for required_version in required_versions:
- if check_version(required_version, version):
- return True
- return False
-
-
-def normalize_value(expected_value, value, key):
- if value is None:
- return None
- if (isinstance(expected_value, bool) and
- isinstance(value, (str, unicode))):
- return value == 'true'
- if isinstance(expected_value, int):
- try:
- return int(value)
- except TypeError:
- raise ModuleFailException(
- 'returned value {0} for {1} is not a valid integer'.format(
- key, value))
- if isinstance(expected_value, float):
- try:
- return float(value)
- except TypeError:
- raise ModuleFailException(
- 'returned value {0} for {1} is not a valid float'.format(
- key, value))
- if isinstance(expected_value, (list, tuple)):
- if not isinstance(value, (list, tuple)):
- raise ModuleFailException(
- 'returned value {0} for {1} is not a list'.format(value, key))
- if len(expected_value) != len(value):
- raise ModuleFailException(
- 'list length mismatch for {0}'.format(key))
-
- normalized_value = []
- for i in range(len(expected_value)):
- normalized_value.append(
- normalize_value(expected_value[i], value[i], '{0}[{1}]'.format(key, i)))
- return normalized_value
-
- if isinstance(expected_value, dict):
- if not isinstance(value, dict):
- raise ModuleFailException(
- 'returned value {0} for {1} is not a dict'.format(value, key))
- if len(expected_value) != len(value):
- raise ModuleFailException(
- 'dict length mismatch for {0}'.format(key))
-
- normalized_value = {}
- for k in expected_value.keys():
- n_k = normalize_value(k, k, '{0}[{1}]'.format(key, k))
- if n_k not in value:
- raise ModuleFailException('missing {0} in value'.format(n_k))
- normalized_value[n_k] = normalize_value(expected_value[k], value[k], '{0}[{1}]'.format(key, k))
- return normalized_value
-
- if HAVE_UNICODE:
- if isinstance(expected_value, unicode) and isinstance(value, str):
- return value.decode('utf-8')
- if isinstance(expected_value, str) and isinstance(value, unicode):
- return value.encode('utf-8')
- else:
- if hasattr(expected_value, 'encode') and hasattr(value, 'decode'):
- return value.decode('utf-8')
- if hasattr(expected_value, 'decode') and hasattr(value, 'encode'):
- return value.encode('utf-8')
-
- return value
diff --git a/lib/ansible/module_utils/network/onyx/__init__.py b/lib/ansible/module_utils/network/onyx/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/onyx/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/onyx/onyx.py b/lib/ansible/module_utils/network/onyx/onyx.py
deleted file mode 100644
index 90adc66581..0000000000
--- a/lib/ansible/module_utils/network/onyx/onyx.py
+++ /dev/null
@@ -1,261 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# (c) 2017, Ansible by Red Hat, inc
-#
-# This file is part of Ansible by Red Hat
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-import json
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.connection import Connection, ConnectionError
-from ansible.module_utils.network.common.utils import to_list, EntityCollection
-
-_DEVICE_CONFIGS = {}
-_CONNECTION = None
-
-_COMMAND_SPEC = {
- 'command': dict(key=True),
- 'prompt': dict(),
- 'answer': dict()
-}
-
-
-def get_connection(module):
- global _CONNECTION
- if _CONNECTION:
- return _CONNECTION
- _CONNECTION = Connection(module._socket_path)
- return _CONNECTION
-
-
-def to_commands(module, commands):
- if not isinstance(commands, list):
- raise AssertionError('argument must be of type <list>')
-
- transform = EntityCollection(module, _COMMAND_SPEC)
- commands = transform(commands)
- return commands
-
-
-def run_commands(module, commands, check_rc=True):
- connection = get_connection(module)
-
- commands = to_commands(module, to_list(commands))
-
- responses = list()
-
- for cmd in commands:
- out = connection.get(**cmd)
- responses.append(to_text(out, errors='surrogate_then_replace'))
-
- return responses
-
-
-def get_config(module, source='running'):
- conn = get_connection(module)
- out = conn.get_config(source)
- cfg = to_text(out, errors='surrogate_then_replace').strip()
- return cfg
-
-
-def load_config(module, config):
- try:
- conn = get_connection(module)
- conn.edit_config(config)
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc))
-
-
-def _parse_json_output(out):
- out_list = out.split('\n')
- first_index = 0
- opening_char = None
- lines_count = len(out_list)
- while first_index < lines_count:
- first_line = out_list[first_index].strip()
- if not first_line or first_line[0] not in ("[", "{"):
- first_index += 1
- continue
- opening_char = first_line[0]
- break
- if not opening_char:
- return "null"
- closing_char = ']' if opening_char == '[' else '}'
- last_index = lines_count - 1
- found = False
- while last_index > first_index:
- last_line = out_list[last_index].strip()
- if not last_line or last_line[0] != closing_char:
- last_index -= 1
- continue
- found = True
- break
- if not found:
- return opening_char + closing_char
- return "".join(out_list[first_index:last_index + 1])
-
-
-def show_cmd(module, cmd, json_fmt=True, fail_on_error=True):
- if json_fmt:
- cmd += " | json-print"
- conn = get_connection(module)
- command_obj = to_commands(module, to_list(cmd))[0]
- try:
- out = conn.get(**command_obj)
- except ConnectionError:
- if fail_on_error:
- raise
- return None
- if json_fmt:
- out = _parse_json_output(out)
- try:
- cfg = json.loads(out)
- except ValueError:
- module.fail_json(
- msg="got invalid json",
- stderr=to_text(out, errors='surrogate_then_replace'))
- else:
- cfg = to_text(out, errors='surrogate_then_replace').strip()
- return cfg
-
-
-def get_interfaces_config(module, interface_type, flags=None, json_fmt=True):
- cmd = "show interfaces %s" % interface_type
- if flags:
- cmd += " %s" % flags
- return show_cmd(module, cmd, json_fmt)
-
-
-def get_bgp_summary(module):
- cmd = "show running-config protocol bgp"
- return show_cmd(module, cmd, json_fmt=False, fail_on_error=False)
-
-
-def get_capabilities(module):
- """Returns platform info of the remove device
- """
- if hasattr(module, '_capabilities'):
- return module._capabilities
-
- connection = get_connection(module)
- try:
- capabilities = connection.get_capabilities()
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
-
- module._capabilities = json.loads(capabilities)
- return module._capabilities
-
-
-class BaseOnyxModule(object):
- ONYX_API_VERSION = "3.6.6000"
-
- def __init__(self):
- self._module = None
- self._commands = list()
- self._current_config = None
- self._required_config = None
- self._os_version = None
-
- def init_module(self):
- pass
-
- def load_current_config(self):
- pass
-
- def get_required_config(self):
- pass
-
- def _get_os_version(self):
- capabilities = get_capabilities(self._module)
- device_info = capabilities['device_info']
- return device_info['network_os_version']
-
- # pylint: disable=unused-argument
- def check_declarative_intent_params(self, result):
- return None
-
- def _validate_key(self, param, key):
- validator = getattr(self, 'validate_%s' % key)
- if callable(validator):
- validator(param.get(key))
-
- def validate_param_values(self, obj, param=None):
- if param is None:
- param = self._module.params
- for key in obj:
- # validate the param value (if validator func exists)
- try:
- self._validate_key(param, key)
- except AttributeError:
- pass
-
- @classmethod
- def get_config_attr(cls, item, arg):
- return item.get(arg)
-
- @classmethod
- def get_mtu(cls, item):
- mtu = cls.get_config_attr(item, "MTU")
- mtu_parts = mtu.split()
- try:
- return int(mtu_parts[0])
- except ValueError:
- return None
-
- def _validate_range(self, attr_name, min_val, max_val, value):
- if value is None:
- return True
- if not min_val <= int(value) <= max_val:
- msg = '%s must be between %s and %s' % (
- attr_name, min_val, max_val)
- self._module.fail_json(msg=msg)
-
- def validate_mtu(self, value):
- self._validate_range('mtu', 1500, 9612, value)
-
- def generate_commands(self):
- pass
-
- def run(self):
- self.init_module()
-
- result = {'changed': False}
-
- self.get_required_config()
- self.load_current_config()
-
- self.generate_commands()
- result['commands'] = self._commands
-
- if self._commands:
- if not self._module.check_mode:
- load_config(self._module, self._commands)
- result['changed'] = True
-
- failed_conditions = self.check_declarative_intent_params(result)
-
- if failed_conditions:
- msg = 'One or more conditional statements have not been satisfied'
- self._module.fail_json(msg=msg,
- failed_conditions=failed_conditions)
-
- self._module.exit_json(**result)
-
- @classmethod
- def main(cls):
- app = cls()
- app.run()
diff --git a/lib/ansible/module_utils/network/ordnance/__init__.py b/lib/ansible/module_utils/network/ordnance/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/ordnance/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/ordnance/ordnance.py b/lib/ansible/module_utils/network/ordnance/ordnance.py
deleted file mode 100644
index 070a86d3e1..0000000000
--- a/lib/ansible/module_utils/network/ordnance/ordnance.py
+++ /dev/null
@@ -1,19 +0,0 @@
-_DEVICE_CONFIGS = {}
-
-
-def get_config(module, flags=None):
- flags = [] if flags is None else flags
-
- cmd = 'show running-config '
- cmd += ' '.join(flags)
- cmd = cmd.strip()
-
- try:
- return _DEVICE_CONFIGS[cmd]
- except KeyError:
- rc, out, err = module.exec_command(cmd)
- if rc != 0:
- module.fail_json(msg='unable to retrieve current config', stderr=err)
- cfg = str(out).strip()
- _DEVICE_CONFIGS[cmd] = cfg
- return cfg
diff --git a/lib/ansible/module_utils/network/panos/__init__.py b/lib/ansible/module_utils/network/panos/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/panos/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/panos/panos.py b/lib/ansible/module_utils/network/panos/panos.py
deleted file mode 100644
index f50257dcf2..0000000000
--- a/lib/ansible/module_utils/network/panos/panos.py
+++ /dev/null
@@ -1,418 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c) 2018 Palo Alto Networks techbizdev, <techbizdev@paloaltonetworks.com>
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-_MIN_VERSION_ERROR = '{0} version ({1}) < minimum version ({2})'
-HAS_PANDEVICE = True
-try:
- import pandevice
- from pandevice.base import PanDevice
- from pandevice.firewall import Firewall
- from pandevice.panorama import DeviceGroup, Template, TemplateStack
- from pandevice.policies import PreRulebase, PostRulebase, Rulebase
- from pandevice.device import Vsys
- from pandevice.errors import PanDeviceError
-except ImportError:
- HAS_PANDEVICE = False
-
-
-def _vstr(val):
- return '{0}.{1}.{2}'.format(*val)
-
-
-class ConnectionHelper(object):
- def __init__(self, min_pandevice_version, min_panos_version,
- panorama_error, firewall_error):
- """Performs connection initialization and determines params."""
- # Params for AnsibleModule.
- self.argument_spec = {}
- self.required_one_of = []
-
- # Params for pandevice tree construction.
- self.vsys = None
- self.device_group = None
- self.vsys_dg = None
- self.rulebase = None
- self.template = None
- self.template_stack = None
- self.vsys_importable = None
- self.min_pandevice_version = min_pandevice_version
- self.min_panos_version = min_panos_version
- self.panorama_error = panorama_error
- self.firewall_error = firewall_error
-
- # The PAN-OS device.
- self.device = None
-
- def get_pandevice_parent(self, module):
- """Builds the pandevice object tree, returning the parent object.
-
- If pandevice is not installed, then module.fail_json() will be
- invoked.
-
- Arguments:
- * module(AnsibleModule): the ansible module.
-
- Returns:
- * The parent pandevice object based on the spec given to
- get_connection().
- """
- # Sanity check.
- if not HAS_PANDEVICE:
- module.fail_json(msg='Missing required library "pandevice".')
-
- # Verify pandevice minimum version.
- if self.min_pandevice_version is not None:
- pdv = tuple(int(x) for x in pandevice.__version__.split('.'))
- if pdv < self.min_pandevice_version:
- module.fail_json(msg=_MIN_VERSION_ERROR.format(
- 'pandevice', pandevice.__version__,
- _vstr(self.min_pandevice_version)))
-
- pan_device_auth, serial_number = None, None
- if module.params['provider'] and module.params['provider']['ip_address']:
- pan_device_auth = (
- module.params['provider']['ip_address'],
- module.params['provider']['username'],
- module.params['provider']['password'],
- module.params['provider']['api_key'],
- module.params['provider']['port'],
- )
- serial_number = module.params['provider']['serial_number']
- elif module.params.get('ip_address', None) is not None:
- pan_device_auth = (
- module.params['ip_address'],
- module.params['username'],
- module.params['password'],
- module.params['api_key'],
- module.params['port'],
- )
- msg = 'Classic provider params are deprecated; use "provider" instead'
- module.deprecate(msg, '2.12')
- else:
- module.fail_json(msg='Provider params are required.')
-
- # Create the connection object.
- try:
- self.device = PanDevice.create_from_device(*pan_device_auth)
- except PanDeviceError as e:
- module.fail_json(msg='Failed connection: {0}'.format(e))
-
- # Verify PAN-OS minimum version.
- if self.min_panos_version is not None:
- if self.device._version_info < self.min_panos_version:
- module.fail_json(msg=_MIN_VERSION_ERROR.format(
- 'PAN-OS', _vstr(self.device._version_info),
- _vstr(self.min_panos_version)))
-
- # Optional: Firewall via Panorama connectivity specified.
- if hasattr(self.device, 'refresh_devices') and serial_number:
- fw = Firewall(serial=serial_number)
- self.device.add(fw)
- self.device = fw
-
- parent = self.device
- not_found = '{0} "{1}" is not present.'
- pano_mia_param = 'Param "{0}" is required for Panorama but not specified.'
- ts_error = 'Specify either the template or the template stack{0}.'
- if hasattr(self.device, 'refresh_devices'):
- # Panorama connection.
- # Error if Panorama is not supported.
- if self.panorama_error is not None:
- module.fail_json(msg=self.panorama_error)
-
- # Spec: template stack.
- tmpl_required = False
- added_template = False
- if self.template_stack is not None:
- name = module.params[self.template_stack]
- if name is not None:
- stacks = TemplateStack.refreshall(parent, name_only=True)
- for ts in stacks:
- if ts.name == name:
- parent = ts
- added_template = True
- break
- else:
- module.fail_json(msg=not_found.format(
- 'Template stack', name,
- ))
- elif self.template is not None:
- tmpl_required = True
- else:
- module.fail_json(msg=pano_mia_param.format(self.template_stack))
-
- # Spec: template.
- if self.template is not None:
- name = module.params[self.template]
- if name is not None:
- if added_template:
- module.fail_json(msg=ts_error.format(', not both'))
- templates = Template.refreshall(parent, name_only=True)
- for t in templates:
- if t.name == name:
- parent = t
- break
- else:
- module.fail_json(msg=not_found.format(
- 'Template', name,
- ))
- elif tmpl_required:
- module.fail_json(msg=ts_error.format(''))
- else:
- module.fail_json(msg=pano_mia_param.format(self.template))
-
- # Spec: vsys importable.
- vsys_name = self.vsys_importable or self.vsys
- if vsys_name is not None:
- name = module.params[vsys_name]
- if name not in (None, 'shared'):
- vo = Vsys(name)
- parent.add(vo)
- parent = vo
-
- # Spec: vsys_dg or device_group.
- dg_name = self.vsys_dg or self.device_group
- if dg_name is not None:
- name = module.params[dg_name]
- if name not in (None, 'shared'):
- groups = DeviceGroup.refreshall(parent, name_only=True)
- for dg in groups:
- if dg.name == name:
- parent = dg
- break
- else:
- module.fail_json(msg=not_found.format(
- 'Device group', name,
- ))
-
- # Spec: rulebase.
- if self.rulebase is not None:
- if module.params[self.rulebase] in (None, 'pre-rulebase'):
- rb = PreRulebase()
- parent.add(rb)
- parent = rb
- elif module.params[self.rulebase] == 'rulebase':
- rb = Rulebase()
- parent.add(rb)
- parent = rb
- elif module.params[self.rulebase] == 'post-rulebase':
- rb = PostRulebase()
- parent.add(rb)
- parent = rb
- else:
- module.fail_json(msg=not_found.format(
- 'Rulebase', module.params[self.rulebase]))
- else:
- # Firewall connection.
- # Error if firewalls are not supported.
- if self.firewall_error is not None:
- module.fail_json(msg=self.firewall_error)
-
- # Spec: vsys or vsys_dg or vsys_importable.
- vsys_name = self.vsys_dg or self.vsys or self.vsys_importable
- if vsys_name is not None:
- parent.vsys = module.params[vsys_name]
-
- # Spec: rulebase.
- if self.rulebase is not None:
- rb = Rulebase()
- parent.add(rb)
- parent = rb
-
- # Done.
- return parent
-
-
-def get_connection(vsys=None, device_group=None,
- vsys_dg=None, vsys_importable=None,
- rulebase=None, template=None, template_stack=None,
- with_classic_provider_spec=False, with_state=True,
- argument_spec=None, required_one_of=None,
- min_pandevice_version=None, min_panos_version=None,
- panorama_error=None, firewall_error=None):
- """Returns a helper object that handles pandevice object tree init.
-
- The `vsys`, `device_group`, `vsys_dg`, `vsys_importable`, `rulebase`,
- `template`, and `template_stack` params can be any of the following types:
-
- * None - do not include this in the spec
- * True - use the default param name
- * string - use this string for the param name
-
- The `min_pandevice_version` and `min_panos_version` args expect a 3 element
- tuple of ints. For example, `(0, 6, 0)` or `(8, 1, 0)`.
-
- If you are including template support (by defining either `template` and/or
- `template_stack`), and the thing the module is enabling the management of is
- an "importable", you should define either `vsys_importable` (whose default
- value is None) or `vsys` (whose default value is 'vsys1').
-
- Arguments:
- vsys: The vsys (default: 'vsys1').
- device_group: Panorama only - The device group (default: 'shared').
- vsys_dg: The param name if vsys and device_group are a shared param.
- vsys_importable: Either this or `vsys` should be specified. For:
- - Interfaces
- - VLANs
- - Virtual Wires
- - Virtual Routers
- rulebase: This is a policy of some sort.
- template: Panorama - The template name.
- template_stack: Panorama - The template stack name.
- with_classic_provider_spec(bool): Include the ip_address, username,
- password, api_key, and port params in the base spec, and make the
- "provider" param optional.
- with_state(bool): Include the standard 'state' param.
- argument_spec(dict): The argument spec to mixin with the
- generated spec based on the given parameters.
- required_one_of(list): List of lists to extend into required_one_of.
- min_pandevice_version(tuple): Minimum pandevice version allowed.
- min_panos_version(tuple): Minimum PAN-OS version allowed.
- panorama_error(str): The error message if the device is Panorama.
- firewall_error(str): The error message if the device is a firewall.
-
- Returns:
- ConnectionHelper
- """
- helper = ConnectionHelper(
- min_pandevice_version, min_panos_version,
- panorama_error, firewall_error)
- req = []
- spec = {
- 'provider': {
- 'required': True,
- 'type': 'dict',
- 'required_one_of': [['password', 'api_key'], ],
- 'options': {
- 'ip_address': {'required': True},
- 'username': {'default': 'admin'},
- 'password': {'no_log': True},
- 'api_key': {'no_log': True},
- 'port': {'default': 443, 'type': 'int'},
- 'serial_number': {'no_log': True},
- },
- },
- }
-
- if with_classic_provider_spec:
- spec['provider']['required'] = False
- spec['provider']['options']['ip_address']['required'] = False
- del(spec['provider']['required_one_of'])
- spec.update({
- 'ip_address': {'required': False},
- 'username': {'default': 'admin'},
- 'password': {'no_log': True},
- 'api_key': {'no_log': True},
- 'port': {'default': 443, 'type': 'int'},
- })
- req.extend([
- ['provider', 'ip_address'],
- ['provider', 'password', 'api_key'],
- ])
-
- if with_state:
- spec['state'] = {
- 'default': 'present',
- 'choices': ['present', 'absent'],
- }
-
- if vsys_dg is not None:
- if isinstance(vsys_dg, bool):
- param = 'vsys_dg'
- else:
- param = vsys_dg
- spec[param] = {}
- helper.vsys_dg = param
- else:
- if vsys is not None:
- if isinstance(vsys, bool):
- param = 'vsys'
- else:
- param = vsys
- spec[param] = {'default': 'vsys1'}
- helper.vsys = param
- if device_group is not None:
- if isinstance(device_group, bool):
- param = 'device_group'
- else:
- param = device_group
- spec[param] = {'default': 'shared'}
- helper.device_group = param
- if vsys_importable is not None:
- if vsys is not None:
- raise KeyError('Define "vsys" or "vsys_importable", not both.')
- if isinstance(vsys_importable, bool):
- param = 'vsys'
- else:
- param = vsys_importable
- spec[param] = {}
- helper.vsys_importable = param
-
- if rulebase is not None:
- if isinstance(rulebase, bool):
- param = 'rulebase'
- else:
- param = rulebase
- spec[param] = {
- 'default': None,
- 'choices': ['pre-rulebase', 'rulebase', 'post-rulebase'],
- }
- helper.rulebase = param
-
- if template is not None:
- if isinstance(template, bool):
- param = 'template'
- else:
- param = template
- spec[param] = {}
- helper.template = param
-
- if template_stack is not None:
- if isinstance(template_stack, bool):
- param = 'template_stack'
- else:
- param = template_stack
- spec[param] = {}
- helper.template_stack = param
-
- if argument_spec is not None:
- for k in argument_spec.keys():
- if k in spec:
- raise KeyError('{0}: key used by connection helper.'.format(k))
- spec[k] = argument_spec[k]
-
- if required_one_of is not None:
- req.extend(required_one_of)
-
- # Done.
- helper.argument_spec = spec
- helper.required_one_of = req
- return helper
diff --git a/lib/ansible/module_utils/network/routeros/__init__.py b/lib/ansible/module_utils/network/routeros/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/routeros/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/routeros/routeros.py b/lib/ansible/module_utils/network/routeros/routeros.py
deleted file mode 100644
index 9442ec54aa..0000000000
--- a/lib/ansible/module_utils/network/routeros/routeros.py
+++ /dev/null
@@ -1,156 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# (c) 2016 Red Hat Inc.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-import json
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.network.common.utils import to_list, ComplexList
-from ansible.module_utils.connection import Connection, ConnectionError
-
-_DEVICE_CONFIGS = {}
-
-routeros_provider_spec = {
- 'host': dict(),
- 'port': dict(type='int'),
- 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
- 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
- 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
- 'timeout': dict(type='int')
-}
-routeros_argument_spec = {}
-
-
-def get_provider_argspec():
- return routeros_provider_spec
-
-
-def get_connection(module):
- if hasattr(module, '_routeros_connection'):
- return module._routeros_connection
-
- capabilities = get_capabilities(module)
- network_api = capabilities.get('network_api')
- if network_api == 'cliconf':
- module._routeros_connection = Connection(module._socket_path)
- else:
- module.fail_json(msg='Invalid connection type %s' % network_api)
-
- return module._routeros_connection
-
-
-def get_capabilities(module):
- if hasattr(module, '_routeros_capabilities'):
- return module._routeros_capabilities
-
- capabilities = Connection(module._socket_path).get_capabilities()
- module._routeros_capabilities = json.loads(capabilities)
- return module._routeros_capabilities
-
-
-def get_defaults_flag(module):
- connection = get_connection(module)
-
- try:
- out = connection.get('/system default-configuration print')
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
-
- out = to_text(out, errors='surrogate_then_replace')
-
- commands = set()
- for line in out.splitlines():
- if line.strip():
- commands.add(line.strip().split()[0])
-
- if 'all' in commands:
- return ['all']
- else:
- return ['full']
-
-
-def get_config(module, flags=None):
- flag_str = ' '.join(to_list(flags))
-
- try:
- return _DEVICE_CONFIGS[flag_str]
- except KeyError:
- connection = get_connection(module)
-
- try:
- out = connection.get_config(flags=flags)
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
-
- cfg = to_text(out, errors='surrogate_then_replace').strip()
- _DEVICE_CONFIGS[flag_str] = cfg
- return cfg
-
-
-def to_commands(module, commands):
- spec = {
- 'command': dict(key=True),
- 'prompt': dict(),
- 'answer': dict()
- }
- transform = ComplexList(spec, module)
- return transform(commands)
-
-
-def run_commands(module, commands, check_rc=True):
- responses = list()
- connection = get_connection(module)
-
- for cmd in to_list(commands):
- if isinstance(cmd, dict):
- command = cmd['command']
- prompt = cmd['prompt']
- answer = cmd['answer']
- else:
- command = cmd
- prompt = None
- answer = None
-
- try:
- out = connection.get(command, prompt, answer)
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
-
- try:
- out = to_text(out, errors='surrogate_or_strict')
- except UnicodeError:
- module.fail_json(
- msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
-
- responses.append(out)
-
- return responses
-
-
-def load_config(module, commands):
- connection = get_connection(module)
-
- out = connection.edit_config(commands)
diff --git a/lib/ansible/module_utils/network/slxos/__init__.py b/lib/ansible/module_utils/network/slxos/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/slxos/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/slxos/slxos.py b/lib/ansible/module_utils/network/slxos/slxos.py
deleted file mode 100644
index cfd369f124..0000000000
--- a/lib/ansible/module_utils/network/slxos/slxos.py
+++ /dev/null
@@ -1,148 +0,0 @@
-#
-# (c) 2018 Extreme Networks Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-import json
-from ansible.module_utils._text import to_text
-from ansible.module_utils.network.common.utils import to_list, ComplexList
-from ansible.module_utils.connection import Connection
-
-
-def get_connection(module):
- """Get switch connection
-
- Creates reusable SSH connection to the switch described in a given module.
-
- Args:
- module: A valid AnsibleModule instance.
-
- Returns:
- An instance of `ansible.module_utils.connection.Connection` with a
- connection to the switch described in the provided module.
-
- Raises:
- AnsibleConnectionFailure: An error occurred connecting to the device
- """
- if hasattr(module, 'slxos_connection'):
- return module.slxos_connection
-
- capabilities = get_capabilities(module)
- network_api = capabilities.get('network_api')
- if network_api == 'cliconf':
- module.slxos_connection = Connection(module._socket_path)
- else:
- module.fail_json(msg='Invalid connection type %s' % network_api)
-
- return module.slxos_connection
-
-
-def get_capabilities(module):
- """Get switch capabilities
-
- Collects and returns a python object with the switch capabilities.
-
- Args:
- module: A valid AnsibleModule instance.
-
- Returns:
- A dictionary containing the switch capabilities.
- """
- if hasattr(module, 'slxos_capabilities'):
- return module.slxos_capabilities
-
- capabilities = Connection(module._socket_path).get_capabilities()
- module.slxos_capabilities = json.loads(capabilities)
- return module.slxos_capabilities
-
-
-def run_commands(module, commands):
- """Run command list against connection.
-
- Get new or previously used connection and send commands to it one at a time,
- collecting response.
-
- Args:
- module: A valid AnsibleModule instance.
- commands: Iterable of command strings.
-
- Returns:
- A list of output strings.
- """
- responses = list()
- connection = get_connection(module)
-
- for cmd in to_list(commands):
- if isinstance(cmd, dict):
- command = cmd['command']
- prompt = cmd['prompt']
- answer = cmd['answer']
- else:
- command = cmd
- prompt = None
- answer = None
-
- out = connection.get(command, prompt, answer)
-
- try:
- out = to_text(out, errors='surrogate_or_strict')
- except UnicodeError:
- module.fail_json(msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
-
- responses.append(out)
-
- return responses
-
-
-def get_config(module):
- """Get switch configuration
-
- Gets the described device's current configuration. If a configuration has
- already been retrieved it will return the previously obtained configuration.
-
- Args:
- module: A valid AnsibleModule instance.
-
- Returns:
- A string containing the configuration.
- """
- if not hasattr(module, 'device_configs'):
- module.device_configs = {}
- elif module.device_configs != {}:
- return module.device_configs
-
- connection = get_connection(module)
- out = connection.get_config()
- cfg = to_text(out, errors='surrogate_then_replace').strip()
- module.device_configs = cfg
- return cfg
-
-
-def load_config(module, commands):
- """Apply a list of commands to a device.
-
- Given a list of commands apply them to the device to modify the
- configuration in bulk.
-
- Args:
- module: A valid AnsibleModule instance.
- commands: Iterable of command strings.
-
- Returns:
- None
- """
- connection = get_connection(module)
- connection.edit_config(commands)
diff --git a/lib/ansible/module_utils/network/sros/__init__.py b/lib/ansible/module_utils/network/sros/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/sros/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/sros/sros.py b/lib/ansible/module_utils/network/sros/sros.py
deleted file mode 100644
index 8a848cfe1d..0000000000
--- a/lib/ansible/module_utils/network/sros/sros.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c) 2016 Peter Sprygada, <psprygada@ansible.com>
-#
-# Redistribution and use in source and binary forms, with or without
-# modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice,
-# this list of conditions and the following disclaimer in the
-# documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-import re
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.network.common.utils import to_list, ComplexList
-from ansible.module_utils.connection import exec_command
-
-_DEVICE_CONFIGS = {}
-
-sros_provider_spec = {
- 'host': dict(),
- 'port': dict(type='int'),
- 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
- 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
- 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
- 'timeout': dict(type='int'),
-}
-sros_argument_spec = {
- 'provider': dict(type='dict', options=sros_provider_spec),
-}
-sros_top_spec = {
- 'host': dict(removed_in_version=2.9),
- 'port': dict(removed_in_version=2.9, type='int'),
- 'username': dict(removed_in_version=2.9),
- 'password': dict(removed_in_version=2.9, no_log=True),
- 'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
- 'timeout': dict(removed_in_version=2.9, type='int'),
-}
-sros_argument_spec.update(sros_top_spec)
-
-
-def check_args(module, warnings):
- pass
-
-
-def get_config(module, flags=None):
- flags = [] if flags is None else flags
-
- cmd = 'admin display-config '
- cmd += ' '.join(flags)
- cmd = cmd.strip()
-
- try:
- return _DEVICE_CONFIGS[cmd]
- except KeyError:
- rc, out, err = exec_command(module, cmd)
- if rc != 0:
- module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_or_strict'))
- cfg = to_text(out, errors='surrogate_or_strict').strip()
- _DEVICE_CONFIGS[cmd] = cfg
- return cfg
-
-
-def to_commands(module, commands):
- spec = {
- 'command': dict(key=True),
- 'prompt': dict(),
- 'answer': dict()
- }
- transform = ComplexList(spec, module)
- return transform(commands)
-
-
-def run_commands(module, commands, check_rc=True):
- responses = list()
- commands = to_commands(module, to_list(commands))
- for cmd in commands:
- cmd = module.jsonify(cmd)
- rc, out, err = exec_command(module, cmd)
- if check_rc and rc != 0:
- module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc)
- responses.append(to_text(out, errors='surrogate_or_strict'))
- return responses
-
-
-def load_config(module, commands):
- for command in to_list(commands):
- rc, out, err = exec_command(module, command)
- if rc != 0:
- module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), command=command, rc=rc)
- exec_command(module, 'exit all')
diff --git a/lib/ansible/module_utils/network/voss/__init__.py b/lib/ansible/module_utils/network/voss/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/network/voss/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/network/voss/voss.py b/lib/ansible/module_utils/network/voss/voss.py
deleted file mode 100644
index e70fe25438..0000000000
--- a/lib/ansible/module_utils/network/voss/voss.py
+++ /dev/null
@@ -1,219 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# (c) 2018 Extreme Networks Inc.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-import json
-import re
-
-from ansible.module_utils._text import to_native, to_text
-from ansible.module_utils.network.common.utils import to_list, ComplexList
-from ansible.module_utils.connection import Connection, ConnectionError
-from ansible.module_utils.network.common.config import NetworkConfig, ConfigLine
-
-_DEVICE_CONFIGS = {}
-
-DEFAULT_COMMENT_TOKENS = ['#', '!', '/*', '*/', 'echo']
-
-DEFAULT_IGNORE_LINES_RE = set([
- re.compile(r"Preparing to Display Configuration\.\.\.")
-])
-
-
-def get_connection(module):
- if hasattr(module, '_voss_connection'):
- return module._voss_connection
-
- capabilities = get_capabilities(module)
- network_api = capabilities.get('network_api')
- if network_api == 'cliconf':
- module._voss_connection = Connection(module._socket_path)
- else:
- module.fail_json(msg='Invalid connection type %s' % network_api)
-
- return module._voss_connection
-
-
-def get_capabilities(module):
- if hasattr(module, '_voss_capabilities'):
- return module._voss_capabilities
- try:
- capabilities = Connection(module._socket_path).get_capabilities()
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- module._voss_capabilities = json.loads(capabilities)
- return module._voss_capabilities
-
-
-def get_defaults_flag(module):
- connection = get_connection(module)
- try:
- out = connection.get_defaults_flag()
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- return to_text(out, errors='surrogate_then_replace').strip()
-
-
-def get_config(module, source='running', flags=None):
- flag_str = ' '.join(to_list(flags))
-
- try:
- return _DEVICE_CONFIGS[flag_str]
- except KeyError:
- connection = get_connection(module)
- try:
- out = connection.get_config(source=source, flags=flags)
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
- cfg = to_text(out, errors='surrogate_then_replace').strip()
- _DEVICE_CONFIGS[flag_str] = cfg
- return cfg
-
-
-def to_commands(module, commands):
- spec = {
- 'command': dict(key=True),
- 'prompt': dict(),
- 'answer': dict()
- }
- transform = ComplexList(spec, module)
- return transform(commands)
-
-
-def run_commands(module, commands, check_rc=True):
- connection = get_connection(module)
- try:
- out = connection.run_commands(commands=commands, check_rc=check_rc)
- return out
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc))
-
-
-def load_config(module, commands):
- connection = get_connection(module)
-
- try:
- resp = connection.edit_config(commands)
- return resp.get('response')
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc))
-
-
-def get_sublevel_config(running_config, module):
- contents = list()
- current_config_contents = list()
- sublevel_config = VossNetworkConfig(indent=0)
- obj = running_config.get_object(module.params['parents'])
- if obj:
- contents = obj._children
- for c in contents:
- if isinstance(c, ConfigLine):
- current_config_contents.append(c.raw)
- sublevel_config.add(current_config_contents, module.params['parents'])
- return sublevel_config
-
-
-def ignore_line(text, tokens=None):
- for item in (tokens or DEFAULT_COMMENT_TOKENS):
- if text.startswith(item):
- return True
- for regex in DEFAULT_IGNORE_LINES_RE:
- if regex.match(text):
- return True
-
-
-def voss_parse(lines, indent=None, comment_tokens=None):
- toplevel = re.compile(r'(^interface.*$)|(^router \w+$)|(^router vrf \w+$)')
- exitline = re.compile(r'^exit$')
- entry_reg = re.compile(r'([{};])')
-
- ancestors = list()
- config = list()
- dup_parent_index = None
-
- for line in to_native(lines, errors='surrogate_or_strict').split('\n'):
- text = entry_reg.sub('', line).strip()
-
- cfg = ConfigLine(text)
-
- if not text or ignore_line(text, comment_tokens):
- continue
-
- # Handle top level commands
- if toplevel.match(text):
- # Looking to see if we have existing parent
- for index, item in enumerate(config):
- if item.text == text:
- # This means we have an existing parent with same label
- dup_parent_index = index
- break
- ancestors = [cfg]
- config.append(cfg)
-
- # Handle 'exit' line
- elif exitline.match(text):
- ancestors = list()
-
- if dup_parent_index is not None:
- # We're working with a duplicate parent
- # Don't need to store exit, just go to next line in config
- dup_parent_index = None
- else:
- cfg._parents = ancestors[:1]
- config.append(cfg)
-
- # Handle sub-level commands. Only have single sub-level
- elif ancestors:
- cfg._parents = ancestors[:1]
- if dup_parent_index is not None:
- # Update existing entry, since this already exists in config
- config[int(dup_parent_index)].add_child(cfg)
- new_index = dup_parent_index + 1
- config.insert(new_index, cfg)
- else:
- ancestors[0].add_child(cfg)
- config.append(cfg)
-
- else:
- # Global command, no further special handling needed
- config.append(cfg)
- return config
-
-
-class VossNetworkConfig(NetworkConfig):
-
- def load(self, s):
- self._config_text = s
- self._items = voss_parse(s, self._indent)
-
- def _diff_line(self, other):
- updates = list()
- for item in self.items:
- if str(item) == "exit":
- if updates and updates[-1]._parents:
- updates.append(item)
- elif item not in other:
- updates.append(item)
- return updates
diff --git a/lib/ansible/module_utils/oneandone.py b/lib/ansible/module_utils/oneandone.py
deleted file mode 100644
index 75cfbae695..0000000000
--- a/lib/ansible/module_utils/oneandone.py
+++ /dev/null
@@ -1,277 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-import time
-
-
-class OneAndOneResources:
- firewall_policy = 'firewall_policy'
- load_balancer = 'load_balancer'
- monitoring_policy = 'monitoring_policy'
- private_network = 'private_network'
- public_ip = 'public_ip'
- role = 'role'
- server = 'server'
- user = 'user'
- vpn = 'vpn'
-
-
-def get_resource(oneandone_conn, resource_type, resource_id):
- switcher = {
- 'firewall_policy': oneandone_conn.get_firewall,
- 'load_balancer': oneandone_conn.get_load_balancer,
- 'monitoring_policy': oneandone_conn.get_monitoring_policy,
- 'private_network': oneandone_conn.get_private_network,
- 'public_ip': oneandone_conn.get_public_ip,
- 'role': oneandone_conn.get_role,
- 'server': oneandone_conn.get_server,
- 'user': oneandone_conn.get_user,
- 'vpn': oneandone_conn.get_vpn,
- }
-
- return switcher.get(resource_type, None)(resource_id)
-
-
-def get_datacenter(oneandone_conn, datacenter, full_object=False):
- """
- Validates the datacenter exists by ID or country code.
- Returns the datacenter ID.
- """
- for _datacenter in oneandone_conn.list_datacenters():
- if datacenter in (_datacenter['id'], _datacenter['country_code']):
- if full_object:
- return _datacenter
- return _datacenter['id']
-
-
-def get_fixed_instance_size(oneandone_conn, fixed_instance_size, full_object=False):
- """
- Validates the fixed instance size exists by ID or name.
- Return the instance size ID.
- """
- for _fixed_instance_size in oneandone_conn.fixed_server_flavors():
- if fixed_instance_size in (_fixed_instance_size['id'],
- _fixed_instance_size['name']):
- if full_object:
- return _fixed_instance_size
- return _fixed_instance_size['id']
-
-
-def get_appliance(oneandone_conn, appliance, full_object=False):
- """
- Validates the appliance exists by ID or name.
- Return the appliance ID.
- """
- for _appliance in oneandone_conn.list_appliances(q='IMAGE'):
- if appliance in (_appliance['id'], _appliance['name']):
- if full_object:
- return _appliance
- return _appliance['id']
-
-
-def get_private_network(oneandone_conn, private_network, full_object=False):
- """
- Validates the private network exists by ID or name.
- Return the private network ID.
- """
- for _private_network in oneandone_conn.list_private_networks():
- if private_network in (_private_network['name'],
- _private_network['id']):
- if full_object:
- return _private_network
- return _private_network['id']
-
-
-def get_monitoring_policy(oneandone_conn, monitoring_policy, full_object=False):
- """
- Validates the monitoring policy exists by ID or name.
- Return the monitoring policy ID.
- """
- for _monitoring_policy in oneandone_conn.list_monitoring_policies():
- if monitoring_policy in (_monitoring_policy['name'],
- _monitoring_policy['id']):
- if full_object:
- return _monitoring_policy
- return _monitoring_policy['id']
-
-
-def get_firewall_policy(oneandone_conn, firewall_policy, full_object=False):
- """
- Validates the firewall policy exists by ID or name.
- Return the firewall policy ID.
- """
- for _firewall_policy in oneandone_conn.list_firewall_policies():
- if firewall_policy in (_firewall_policy['name'],
- _firewall_policy['id']):
- if full_object:
- return _firewall_policy
- return _firewall_policy['id']
-
-
-def get_load_balancer(oneandone_conn, load_balancer, full_object=False):
- """
- Validates the load balancer exists by ID or name.
- Return the load balancer ID.
- """
- for _load_balancer in oneandone_conn.list_load_balancers():
- if load_balancer in (_load_balancer['name'],
- _load_balancer['id']):
- if full_object:
- return _load_balancer
- return _load_balancer['id']
-
-
-def get_server(oneandone_conn, instance, full_object=False):
- """
- Validates that the server exists whether by ID or name.
- Returns the server if one was found.
- """
- for server in oneandone_conn.list_servers(per_page=1000):
- if instance in (server['id'], server['name']):
- if full_object:
- return server
- return server['id']
-
-
-def get_user(oneandone_conn, user, full_object=False):
- """
- Validates that the user exists by ID or a name.
- Returns the user if one was found.
- """
- for _user in oneandone_conn.list_users(per_page=1000):
- if user in (_user['id'], _user['name']):
- if full_object:
- return _user
- return _user['id']
-
-
-def get_role(oneandone_conn, role, full_object=False):
- """
- Given a name, validates that the role exists
- whether it is a proper ID or a name.
- Returns the role if one was found, else None.
- """
- for _role in oneandone_conn.list_roles(per_page=1000):
- if role in (_role['id'], _role['name']):
- if full_object:
- return _role
- return _role['id']
-
-
-def get_vpn(oneandone_conn, vpn, full_object=False):
- """
- Validates that the vpn exists by ID or a name.
- Returns the vpn if one was found.
- """
- for _vpn in oneandone_conn.list_vpns(per_page=1000):
- if vpn in (_vpn['id'], _vpn['name']):
- if full_object:
- return _vpn
- return _vpn['id']
-
-
-def get_public_ip(oneandone_conn, public_ip, full_object=False):
- """
- Validates that the public ip exists by ID or a name.
- Returns the public ip if one was found.
- """
- for _public_ip in oneandone_conn.list_public_ips(per_page=1000):
- if public_ip in (_public_ip['id'], _public_ip['ip']):
- if full_object:
- return _public_ip
- return _public_ip['id']
-
-
-def wait_for_resource_creation_completion(oneandone_conn,
- resource_type,
- resource_id,
- wait_timeout,
- wait_interval):
- """
- Waits for the resource create operation to complete based on the timeout period.
- """
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(wait_interval)
-
- # Refresh the resource info
- resource = get_resource(oneandone_conn, resource_type, resource_id)
-
- if resource_type == OneAndOneResources.server:
- resource_state = resource['status']['state']
- else:
- resource_state = resource['state']
-
- if ((resource_type == OneAndOneResources.server and resource_state.lower() == 'powered_on') or
- (resource_type != OneAndOneResources.server and resource_state.lower() == 'active')):
- return
- elif resource_state.lower() == 'failed':
- raise Exception('%s creation failed for %s' % (resource_type, resource_id))
- elif resource_state.lower() in ('active',
- 'enabled',
- 'deploying',
- 'configuring'):
- continue
- else:
- raise Exception(
- 'Unknown %s state %s' % (resource_type, resource_state))
-
- raise Exception(
- 'Timed out waiting for %s completion for %s' % (resource_type, resource_id))
-
-
-def wait_for_resource_deletion_completion(oneandone_conn,
- resource_type,
- resource_id,
- wait_timeout,
- wait_interval):
- """
- Waits for the resource delete operation to complete based on the timeout period.
- """
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(wait_interval)
-
- # Refresh the operation info
- logs = oneandone_conn.list_logs(q='DELETE',
- period='LAST_HOUR',
- sort='-start_date')
-
- if resource_type == OneAndOneResources.server:
- _type = 'VM'
- elif resource_type == OneAndOneResources.private_network:
- _type = 'PRIVATENETWORK'
- else:
- raise Exception(
- 'Unsupported wait_for delete operation for %s resource' % resource_type)
-
- for log in logs:
- if (log['resource']['id'] == resource_id and
- log['action'] == 'DELETE' and
- log['type'] == _type and
- log['status']['state'] == 'OK'):
- return
- raise Exception(
- 'Timed out waiting for %s deletion for %s' % (resource_type, resource_id))
diff --git a/lib/ansible/module_utils/oneview.py b/lib/ansible/module_utils/oneview.py
deleted file mode 100644
index 0d3116c8e6..0000000000
--- a/lib/ansible/module_utils/oneview.py
+++ /dev/null
@@ -1,502 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from __future__ import (absolute_import, division, print_function)
-
-import abc
-import collections
-import json
-import os
-import traceback
-
-HPE_ONEVIEW_IMP_ERR = None
-try:
- from hpOneView.oneview_client import OneViewClient
- HAS_HPE_ONEVIEW = True
-except ImportError:
- HPE_ONEVIEW_IMP_ERR = traceback.format_exc()
- HAS_HPE_ONEVIEW = False
-
-from ansible.module_utils import six
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils._text import to_native
-from ansible.module_utils.common._collections_compat import Mapping
-
-
-def transform_list_to_dict(list_):
- """
- Transforms a list into a dictionary, putting values as keys.
-
- :arg list list_: List of values
- :return: dict: dictionary built
- """
-
- ret = {}
-
- if not list_:
- return ret
-
- for value in list_:
- if isinstance(value, Mapping):
- ret.update(value)
- else:
- ret[to_native(value, errors='surrogate_or_strict')] = True
-
- return ret
-
-
-def merge_list_by_key(original_list, updated_list, key, ignore_when_null=None):
- """
- Merge two lists by the key. It basically:
-
- 1. Adds the items that are present on updated_list and are absent on original_list.
-
- 2. Removes items that are absent on updated_list and are present on original_list.
-
- 3. For all items that are in both lists, overwrites the values from the original item by the updated item.
-
- :arg list original_list: original list.
- :arg list updated_list: list with changes.
- :arg str key: unique identifier.
- :arg list ignore_when_null: list with the keys from the updated items that should be ignored in the merge,
- if its values are null.
- :return: list: Lists merged.
- """
- ignore_when_null = [] if ignore_when_null is None else ignore_when_null
-
- if not original_list:
- return updated_list
-
- items_map = collections.OrderedDict([(i[key], i.copy()) for i in original_list])
-
- merged_items = collections.OrderedDict()
-
- for item in updated_list:
- item_key = item[key]
- if item_key in items_map:
- for ignored_key in ignore_when_null:
- if ignored_key in item and item[ignored_key] is None:
- item.pop(ignored_key)
- merged_items[item_key] = items_map[item_key]
- merged_items[item_key].update(item)
- else:
- merged_items[item_key] = item
-
- return list(merged_items.values())
-
-
-def _str_sorted(obj):
- if isinstance(obj, Mapping):
- return json.dumps(obj, sort_keys=True)
- else:
- return str(obj)
-
-
-def _standardize_value(value):
- """
- Convert value to string to enhance the comparison.
-
- :arg value: Any object type.
-
- :return: str: Converted value.
- """
- if isinstance(value, float) and value.is_integer():
- # Workaround to avoid erroneous comparison between int and float
- # Removes zero from integer floats
- value = int(value)
-
- return str(value)
-
-
-class OneViewModuleException(Exception):
- """
- OneView base Exception.
-
- Attributes:
- msg (str): Exception message.
- oneview_response (dict): OneView rest response.
- """
-
- def __init__(self, data):
- self.msg = None
- self.oneview_response = None
-
- if isinstance(data, six.string_types):
- self.msg = data
- else:
- self.oneview_response = data
-
- if data and isinstance(data, dict):
- self.msg = data.get('message')
-
- if self.oneview_response:
- Exception.__init__(self, self.msg, self.oneview_response)
- else:
- Exception.__init__(self, self.msg)
-
-
-class OneViewModuleTaskError(OneViewModuleException):
- """
- OneView Task Error Exception.
-
- Attributes:
- msg (str): Exception message.
- error_code (str): A code which uniquely identifies the specific error.
- """
-
- def __init__(self, msg, error_code=None):
- super(OneViewModuleTaskError, self).__init__(msg)
- self.error_code = error_code
-
-
-class OneViewModuleValueError(OneViewModuleException):
- """
- OneView Value Error.
- The exception is raised when the data contains an inappropriate value.
-
- Attributes:
- msg (str): Exception message.
- """
- pass
-
-
-class OneViewModuleResourceNotFound(OneViewModuleException):
- """
- OneView Resource Not Found Exception.
- The exception is raised when an associated resource was not found.
-
- Attributes:
- msg (str): Exception message.
- """
- pass
-
-
-@six.add_metaclass(abc.ABCMeta)
-class OneViewModuleBase(object):
- MSG_CREATED = 'Resource created successfully.'
- MSG_UPDATED = 'Resource updated successfully.'
- MSG_DELETED = 'Resource deleted successfully.'
- MSG_ALREADY_PRESENT = 'Resource is already present.'
- MSG_ALREADY_ABSENT = 'Resource is already absent.'
- MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. '
-
- ONEVIEW_COMMON_ARGS = dict(
- config=dict(type='path'),
- hostname=dict(type='str'),
- username=dict(type='str'),
- password=dict(type='str', no_log=True),
- api_version=dict(type='int'),
- image_streamer_hostname=dict(type='str')
- )
-
- ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type='bool', default=True))
-
- resource_client = None
-
- def __init__(self, additional_arg_spec=None, validate_etag_support=False):
- """
- OneViewModuleBase constructor.
-
- :arg dict additional_arg_spec: Additional argument spec definition.
- :arg bool validate_etag_support: Enables support to eTag validation.
- """
- argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support)
-
- self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
-
- self._check_hpe_oneview_sdk()
- self._create_oneview_client()
-
- self.state = self.module.params.get('state')
- self.data = self.module.params.get('data')
-
- # Preload params for get_all - used by facts
- self.facts_params = self.module.params.get('params') or {}
-
- # Preload options as dict - used by facts
- self.options = transform_list_to_dict(self.module.params.get('options'))
-
- self.validate_etag_support = validate_etag_support
-
- def _build_argument_spec(self, additional_arg_spec, validate_etag_support):
-
- merged_arg_spec = dict()
- merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS)
-
- if validate_etag_support:
- merged_arg_spec.update(self.ONEVIEW_VALIDATE_ETAG_ARGS)
-
- if additional_arg_spec:
- merged_arg_spec.update(additional_arg_spec)
-
- return merged_arg_spec
-
- def _check_hpe_oneview_sdk(self):
- if not HAS_HPE_ONEVIEW:
- self.module.fail_json(msg=missing_required_lib('hpOneView'), exception=HPE_ONEVIEW_IMP_ERR)
-
- def _create_oneview_client(self):
- if self.module.params.get('hostname'):
- config = dict(ip=self.module.params['hostname'],
- credentials=dict(userName=self.module.params['username'], password=self.module.params['password']),
- api_version=self.module.params['api_version'],
- image_streamer_ip=self.module.params['image_streamer_hostname'])
- self.oneview_client = OneViewClient(config)
- elif not self.module.params['config']:
- self.oneview_client = OneViewClient.from_environment_variables()
- else:
- self.oneview_client = OneViewClient.from_json_file(self.module.params['config'])
-
- @abc.abstractmethod
- def execute_module(self):
- """
- Abstract method, must be implemented by the inheritor.
-
- This method is called from the run method. It should contains the module logic
-
- :return: dict: It must return a dictionary with the attributes for the module result,
- such as ansible_facts, msg and changed.
- """
- pass
-
- def run(self):
- """
- Common implementation of the OneView run modules.
-
- It calls the inheritor 'execute_module' function and sends the return to the Ansible.
-
- It handles any OneViewModuleException in order to signal a failure to Ansible, with a descriptive error message.
-
- """
- try:
- if self.validate_etag_support:
- if not self.module.params.get('validate_etag'):
- self.oneview_client.connection.disable_etag_validation()
-
- result = self.execute_module()
-
- if "changed" not in result:
- result['changed'] = False
-
- self.module.exit_json(**result)
-
- except OneViewModuleException as exception:
- error_msg = '; '.join(to_native(e) for e in exception.args)
- self.module.fail_json(msg=error_msg, exception=traceback.format_exc())
-
- def resource_absent(self, resource, method='delete'):
- """
- Generic implementation of the absent state for the OneView resources.
-
- It checks if the resource needs to be removed.
-
- :arg dict resource: Resource to delete.
- :arg str method: Function of the OneView client that will be called for resource deletion.
- Usually delete or remove.
- :return: A dictionary with the expected arguments for the AnsibleModule.exit_json
- """
- if resource:
- getattr(self.resource_client, method)(resource)
-
- return {"changed": True, "msg": self.MSG_DELETED}
- else:
- return {"changed": False, "msg": self.MSG_ALREADY_ABSENT}
-
- def get_by_name(self, name):
- """
- Generic get by name implementation.
-
- :arg str name: Resource name to search for.
-
- :return: The resource found or None.
- """
- result = self.resource_client.get_by('name', name)
- return result[0] if result else None
-
- def resource_present(self, resource, fact_name, create_method='create'):
- """
- Generic implementation of the present state for the OneView resources.
-
- It checks if the resource needs to be created or updated.
-
- :arg dict resource: Resource to create or update.
- :arg str fact_name: Name of the fact returned to the Ansible.
- :arg str create_method: Function of the OneView client that will be called for resource creation.
- Usually create or add.
- :return: A dictionary with the expected arguments for the AnsibleModule.exit_json
- """
-
- changed = False
- if "newName" in self.data:
- self.data["name"] = self.data.pop("newName")
-
- if not resource:
- resource = getattr(self.resource_client, create_method)(self.data)
- msg = self.MSG_CREATED
- changed = True
-
- else:
- merged_data = resource.copy()
- merged_data.update(self.data)
-
- if self.compare(resource, merged_data):
- msg = self.MSG_ALREADY_PRESENT
- else:
- resource = self.resource_client.update(merged_data)
- changed = True
- msg = self.MSG_UPDATED
-
- return dict(
- msg=msg,
- changed=changed,
- ansible_facts={fact_name: resource}
- )
-
- def resource_scopes_set(self, state, fact_name, scope_uris):
- """
- Generic implementation of the scopes update PATCH for the OneView resources.
- It checks if the resource needs to be updated with the current scopes.
- This method is meant to be run after ensuring the present state.
- :arg dict state: Dict containing the data from the last state results in the resource.
- It needs to have the 'msg', 'changed', and 'ansible_facts' entries.
- :arg str fact_name: Name of the fact returned to the Ansible.
- :arg list scope_uris: List with all the scope URIs to be added to the resource.
- :return: A dictionary with the expected arguments for the AnsibleModule.exit_json
- """
- if scope_uris is None:
- scope_uris = []
- resource = state['ansible_facts'][fact_name]
- operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris)
-
- if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris):
- state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data)
- state['changed'] = True
- state['msg'] = self.MSG_UPDATED
-
- return state
-
- def compare(self, first_resource, second_resource):
- """
- Recursively compares dictionary contents equivalence, ignoring types and elements order.
- Particularities of the comparison:
- - Inexistent key = None
- - These values are considered equal: None, empty, False
- - Lists are compared value by value after a sort, if they have same size.
- - Each element is converted to str before the comparison.
- :arg dict first_resource: first dictionary
- :arg dict second_resource: second dictionary
- :return: bool: True when equal, False when different.
- """
- resource1 = first_resource
- resource2 = second_resource
-
- debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
-
- # The first resource is True / Not Null and the second resource is False / Null
- if resource1 and not resource2:
- self.module.log("resource1 and not resource2. " + debug_resources)
- return False
-
- # Checks all keys in first dict against the second dict
- for key in resource1:
- if key not in resource2:
- if resource1[key] is not None:
- # Inexistent key is equivalent to exist with value None
- self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
- return False
- # If both values are null, empty or False it will be considered equal.
- elif not resource1[key] and not resource2[key]:
- continue
- elif isinstance(resource1[key], Mapping):
- # recursive call
- if not self.compare(resource1[key], resource2[key]):
- self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
- return False
- elif isinstance(resource1[key], list):
- # change comparison function to compare_list
- if not self.compare_list(resource1[key], resource2[key]):
- self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
- return False
- elif _standardize_value(resource1[key]) != _standardize_value(resource2[key]):
- self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
- return False
-
- # Checks all keys in the second dict, looking for missing elements
- for key in resource2.keys():
- if key not in resource1:
- if resource2[key] is not None:
- # Inexistent key is equivalent to exist with value None
- self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
- return False
-
- return True
-
- def compare_list(self, first_resource, second_resource):
- """
- Recursively compares lists contents equivalence, ignoring types and element orders.
- Lists with same size are compared value by value after a sort,
- each element is converted to str before the comparison.
- :arg list first_resource: first list
- :arg list second_resource: second list
- :return: True when equal; False when different.
- """
-
- resource1 = first_resource
- resource2 = second_resource
-
- debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
-
- # The second list is null / empty / False
- if not resource2:
- self.module.log("resource 2 is null. " + debug_resources)
- return False
-
- if len(resource1) != len(resource2):
- self.module.log("resources have different length. " + debug_resources)
- return False
-
- resource1 = sorted(resource1, key=_str_sorted)
- resource2 = sorted(resource2, key=_str_sorted)
-
- for i, val in enumerate(resource1):
- if isinstance(val, Mapping):
- # change comparison function to compare dictionaries
- if not self.compare(val, resource2[i]):
- self.module.log("resources are different. " + debug_resources)
- return False
- elif isinstance(val, list):
- # recursive call
- if not self.compare_list(val, resource2[i]):
- self.module.log("lists are different. " + debug_resources)
- return False
- elif _standardize_value(val) != _standardize_value(resource2[i]):
- self.module.log("values are different. " + debug_resources)
- return False
-
- # no differences found
- return True
diff --git a/lib/ansible/module_utils/online.py b/lib/ansible/module_utils/online.py
deleted file mode 100644
index 464e454288..0000000000
--- a/lib/ansible/module_utils/online.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-import sys
-
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.urls import fetch_url
-
-
-def online_argument_spec():
- return dict(
- api_token=dict(required=True, fallback=(env_fallback, ['ONLINE_TOKEN', 'ONLINE_API_KEY', 'ONLINE_OAUTH_TOKEN', 'ONLINE_API_TOKEN']),
- no_log=True, aliases=['oauth_token']),
- api_url=dict(fallback=(env_fallback, ['ONLINE_API_URL']), default='https://api.online.net', aliases=['base_url']),
- api_timeout=dict(type='int', default=30, aliases=['timeout']),
- validate_certs=dict(default=True, type='bool'),
- )
-
-
-class OnlineException(Exception):
-
- def __init__(self, message):
- self.message = message
-
-
-class Response(object):
-
- def __init__(self, resp, info):
- self.body = None
- if resp:
- self.body = resp.read()
- self.info = info
-
- @property
- def json(self):
- if not self.body:
- if "body" in self.info:
- return json.loads(self.info["body"])
- return None
- try:
- return json.loads(self.body)
- except ValueError:
- return None
-
- @property
- def status_code(self):
- return self.info["status"]
-
- @property
- def ok(self):
- return self.status_code in (200, 201, 202, 204)
-
-
-class Online(object):
-
- def __init__(self, module):
- self.module = module
- self.headers = {
- 'Authorization': "Bearer %s" % self.module.params.get('api_token'),
- 'User-Agent': self.get_user_agent_string(module),
- 'Content-type': 'application/json',
- }
- self.name = None
-
- def get_resources(self):
- results = self.get('/%s' % self.name)
- if not results.ok:
- raise OnlineException('Error fetching {0} ({1}) [{2}: {3}]'.format(
- self.name, '%s/%s' % (self.module.params.get('api_url'), self.name),
- results.status_code, results.json['message']
- ))
-
- return results.json
-
- def _url_builder(self, path):
- if path[0] == '/':
- path = path[1:]
- return '%s/%s' % (self.module.params.get('api_url'), path)
-
- def send(self, method, path, data=None, headers=None):
- url = self._url_builder(path)
- data = self.module.jsonify(data)
-
- if headers is not None:
- self.headers.update(headers)
-
- resp, info = fetch_url(
- self.module, url, data=data, headers=self.headers, method=method,
- timeout=self.module.params.get('api_timeout')
- )
-
- # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
- if info['status'] == -1:
- self.module.fail_json(msg=info['msg'])
-
- return Response(resp, info)
-
- @staticmethod
- def get_user_agent_string(module):
- return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0])
-
- def get(self, path, data=None, headers=None):
- return self.send('GET', path, data, headers)
-
- def put(self, path, data=None, headers=None):
- return self.send('PUT', path, data, headers)
-
- def post(self, path, data=None, headers=None):
- return self.send('POST', path, data, headers)
-
- def delete(self, path, data=None, headers=None):
- return self.send('DELETE', path, data, headers)
-
- def patch(self, path, data=None, headers=None):
- return self.send("PATCH", path, data, headers)
-
- def update(self, path, data=None, headers=None):
- return self.send("UPDATE", path, data, headers)
diff --git a/lib/ansible/module_utils/opennebula.py b/lib/ansible/module_utils/opennebula.py
deleted file mode 100644
index a520e32187..0000000000
--- a/lib/ansible/module_utils/opennebula.py
+++ /dev/null
@@ -1,306 +0,0 @@
-#
-# Copyright 2018 www.privaz.io Valletech AB
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-
-import time
-import ssl
-from os import environ
-from ansible.module_utils.six import string_types
-from ansible.module_utils.basic import AnsibleModule
-
-
-HAS_PYONE = True
-
-try:
- from pyone import OneException
- from pyone.server import OneServer
-except ImportError:
- OneException = Exception
- HAS_PYONE = False
-
-
-class OpenNebulaModule:
- """
- Base class for all OpenNebula Ansible Modules.
- This is basically a wrapper of the common arguments, the pyone client and
- some utility methods.
- """
-
- common_args = dict(
- api_url=dict(type='str', aliases=['api_endpoint'], default=environ.get("ONE_URL")),
- api_username=dict(type='str', default=environ.get("ONE_USERNAME")),
- api_password=dict(type='str', no_log=True, aliases=['api_token'], default=environ.get("ONE_PASSWORD")),
- validate_certs=dict(default=True, type='bool'),
- wait_timeout=dict(type='int', default=300),
- )
-
- def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None):
-
- module_args = OpenNebulaModule.common_args
- module_args.update(argument_spec)
-
- self.module = AnsibleModule(argument_spec=module_args,
- supports_check_mode=supports_check_mode,
- mutually_exclusive=mutually_exclusive)
- self.result = dict(changed=False,
- original_message='',
- message='')
- self.one = self.create_one_client()
-
- self.resolved_parameters = self.resolve_parameters()
-
- def create_one_client(self):
- """
- Creates an XMLPRC client to OpenNebula.
-
- Returns: the new xmlrpc client.
-
- """
-
- # context required for not validating SSL, old python versions won't validate anyway.
- if hasattr(ssl, '_create_unverified_context'):
- no_ssl_validation_context = ssl._create_unverified_context()
- else:
- no_ssl_validation_context = None
-
- # Check if the module can run
- if not HAS_PYONE:
- self.fail("pyone is required for this module")
-
- if self.module.params.get("api_url"):
- url = self.module.params.get("api_url")
- else:
- self.fail("Either api_url or the environment variable ONE_URL must be provided")
-
- if self.module.params.get("api_username"):
- username = self.module.params.get("api_username")
- else:
- self.fail("Either api_username or the environment vairable ONE_USERNAME must be provided")
-
- if self.module.params.get("api_password"):
- password = self.module.params.get("api_password")
- else:
- self.fail("Either api_password or the environment vairable ONE_PASSWORD must be provided")
-
- session = "%s:%s" % (username, password)
-
- if not self.module.params.get("validate_certs") and "PYTHONHTTPSVERIFY" not in environ:
- return OneServer(url, session=session, context=no_ssl_validation_context)
- else:
- return OneServer(url, session)
-
- def close_one_client(self):
- """
- Close the pyone session.
- """
- self.one.server_close()
-
- def fail(self, msg):
- """
- Utility failure method, will ensure pyone is properly closed before failing.
- Args:
- msg: human readable failure reason.
- """
- if hasattr(self, 'one'):
- self.close_one_client()
- self.module.fail_json(msg=msg)
-
- def exit(self):
- """
- Utility exit method, will ensure pyone is properly closed before exiting.
-
- """
- if hasattr(self, 'one'):
- self.close_one_client()
- self.module.exit_json(**self.result)
-
- def resolve_parameters(self):
- """
- This method resolves parameters provided by a secondary ID to the primary ID.
- For example if cluster_name is present, cluster_id will be introduced by performing
- the required resolution
-
- Returns: a copy of the parameters that includes the resolved parameters.
-
- """
-
- resolved_params = dict(self.module.params)
-
- if 'cluster_name' in self.module.params:
- clusters = self.one.clusterpool.info()
- for cluster in clusters.CLUSTER:
- if cluster.NAME == self.module.params.get('cluster_name'):
- resolved_params['cluster_id'] = cluster.ID
-
- return resolved_params
-
- def is_parameter(self, name):
- """
- Utility method to check if a parameter was provided or is resolved
- Args:
- name: the parameter to check
- """
- if name in self.resolved_parameters:
- return self.get_parameter(name) is not None
- else:
- return False
-
- def get_parameter(self, name):
- """
- Utility method for accessing parameters that includes resolved ID
- parameters from provided Name parameters.
- """
- return self.resolved_parameters.get(name)
-
- def get_host_by_name(self, name):
- '''
- Returns a host given its name.
- Args:
- name: the name of the host
-
- Returns: the host object or None if the host is absent.
-
- '''
- hosts = self.one.hostpool.info()
- for h in hosts.HOST:
- if h.NAME == name:
- return h
- return None
-
- def get_cluster_by_name(self, name):
- """
- Returns a cluster given its name.
- Args:
- name: the name of the cluster
-
- Returns: the cluster object or None if the host is absent.
- """
-
- clusters = self.one.clusterpool.info()
- for c in clusters.CLUSTER:
- if c.NAME == name:
- return c
- return None
-
- def get_template_by_name(self, name):
- '''
- Returns a template given its name.
- Args:
- name: the name of the template
-
- Returns: the template object or None if the host is absent.
-
- '''
- templates = self.one.templatepool.info()
- for t in templates.TEMPLATE:
- if t.NAME == name:
- return t
- return None
-
- def cast_template(self, template):
- """
- OpenNebula handles all template elements as strings
- At some point there is a cast being performed on types provided by the user
- This function mimics that transformation so that required template updates are detected properly
- additionally an array will be converted to a comma separated list,
- which works for labels and hopefully for something more.
-
- Args:
- template: the template to transform
-
- Returns: the transformed template with data casts applied.
- """
-
- # TODO: check formally available data types in templates
- # TODO: some arrays might be converted to space separated
-
- for key in template:
- value = template[key]
- if isinstance(value, dict):
- self.cast_template(template[key])
- elif isinstance(value, list):
- template[key] = ', '.join(value)
- elif not isinstance(value, string_types):
- template[key] = str(value)
-
- def requires_template_update(self, current, desired):
- """
- This function will help decide if a template update is required or not
- If a desired key is missing from the current dictionary an update is required
- If the intersection of both dictionaries is not deep equal, an update is required
- Args:
- current: current template as a dictionary
- desired: desired template as a dictionary
-
- Returns: True if a template update is required
- """
-
- if not desired:
- return False
-
- self.cast_template(desired)
- intersection = dict()
- for dkey in desired.keys():
- if dkey in current.keys():
- intersection[dkey] = current[dkey]
- else:
- return True
- return not (desired == intersection)
-
- def wait_for_state(self, element_name, state, state_name, target_states,
- invalid_states=None, transition_states=None,
- wait_timeout=None):
- """
- Args:
- element_name: the name of the object we are waiting for: HOST, VM, etc.
- state: lambda that returns the current state, will be queried until target state is reached
- state_name: lambda that returns the readable form of a given state
- target_states: states expected to be reached
- invalid_states: if any of this states is reached, fail
- transition_states: when used, these are the valid states during the transition.
- wait_timeout: timeout period in seconds. Defaults to the provided parameter.
- """
-
- if not wait_timeout:
- wait_timeout = self.module.params.get("wait_timeout")
-
- start_time = time.time()
-
- while (time.time() - start_time) < wait_timeout:
- current_state = state()
-
- if current_state in invalid_states:
- self.fail('invalid %s state %s' % (element_name, state_name(current_state)))
-
- if transition_states:
- if current_state not in transition_states:
- self.fail('invalid %s transition state %s' % (element_name, state_name(current_state)))
-
- if current_state in target_states:
- return True
-
- time.sleep(self.one.server_retry_interval())
-
- self.fail(msg="Wait timeout has expired!")
-
- def run_module(self):
- """
- trigger the start of the execution of the module.
- Returns:
-
- """
- try:
- self.run(self.one, self.module, self.result)
- except OneException as e:
- self.fail(msg="OpenNebula Exception: %s" % e)
-
- def run(self, one, module, result):
- """
- to be implemented by subclass with the actual module actions.
- Args:
- one: the OpenNebula XMLRPC client
- module: the Ansible Module object
- result: the Ansible result
- """
- raise NotImplementedError("Method requires implementation")
diff --git a/lib/ansible/module_utils/oracle/__init__.py b/lib/ansible/module_utils/oracle/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/oracle/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/oracle/oci_utils.py b/lib/ansible/module_utils/oracle/oci_utils.py
deleted file mode 100644
index 368337a496..0000000000
--- a/lib/ansible/module_utils/oracle/oci_utils.py
+++ /dev/null
@@ -1,1961 +0,0 @@
-# Copyright (c) 2017, 2018, 2019 Oracle and/or its affiliates.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import
-
-import logging
-import logging.config
-import os
-import tempfile
-from datetime import datetime
-from operator import eq
-
-import time
-
-try:
- import yaml
-
- import oci
- from oci.constants import HEADER_NEXT_PAGE
-
- from oci.exceptions import (
- InvalidConfig,
- InvalidPrivateKey,
- MissingPrivateKeyPassphrase,
- ConfigFileNotFound,
- ServiceError,
- MaximumWaitTimeExceeded,
- )
- from oci.identity.identity_client import IdentityClient
- from oci.object_storage.models import CreateBucketDetails
- from oci.object_storage.models import UpdateBucketDetails
- from oci.retry import RetryStrategyBuilder
- from oci.util import to_dict, Sentinel
-
- HAS_OCI_PY_SDK = True
-except ImportError:
- HAS_OCI_PY_SDK = False
-
-
-from ansible.module_utils._text import to_bytes
-from ansible.module_utils.six import iteritems
-
-__version__ = "1.6.0-dev"
-
-MAX_WAIT_TIMEOUT_IN_SECONDS = 1200
-
-# If a resource is in one of these states it would be considered inactive
-DEAD_STATES = [
- "TERMINATING",
- "TERMINATED",
- "FAULTY",
- "FAILED",
- "DELETING",
- "DELETED",
- "UNKNOWN_ENUM_VALUE",
- "DETACHING",
- "DETACHED",
-]
-
-# If a resource is in one of these states it would be considered available
-DEFAULT_READY_STATES = [
- "AVAILABLE",
- "ACTIVE",
- "RUNNING",
- "PROVISIONED",
- "ATTACHED",
- "ASSIGNED",
- "SUCCEEDED",
- "PENDING_PROVIDER",
-]
-
-# If a resource is in one of these states, it would be considered deleted
-DEFAULT_TERMINATED_STATES = ["TERMINATED", "DETACHED", "DELETED"]
-
-
-def get_common_arg_spec(supports_create=False, supports_wait=False):
- """
- Return the common set of module arguments for all OCI cloud modules.
- :param supports_create: Variable to decide whether to add options related to idempotency of create operation.
- :param supports_wait: Variable to decide whether to add options related to waiting for completion.
- :return: A dict with applicable module options.
- """
- # Note: This method is used by most OCI ansible resource modules during initialization. When making changes to this
- # method, ensure that no `oci` python sdk dependencies are introduced in this method. This ensures that the modules
- # can check for absence of OCI Python SDK and fail with an appropriate message. Introducing an OCI dependency in
- # this method would break that error handling logic.
- common_args = dict(
- config_file_location=dict(type="str"),
- config_profile_name=dict(type="str", default="DEFAULT"),
- api_user=dict(type="str"),
- api_user_fingerprint=dict(type="str", no_log=True),
- api_user_key_file=dict(type="str"),
- api_user_key_pass_phrase=dict(type="str", no_log=True),
- auth_type=dict(
- type="str",
- required=False,
- choices=["api_key", "instance_principal"],
- default="api_key",
- ),
- tenancy=dict(type="str"),
- region=dict(type="str"),
- )
-
- if supports_create:
- common_args.update(
- key_by=dict(type="list"),
- force_create=dict(type="bool", default=False),
- )
-
- if supports_wait:
- common_args.update(
- wait=dict(type="bool", default=True),
- wait_timeout=dict(
- type="int", default=MAX_WAIT_TIMEOUT_IN_SECONDS
- ),
- wait_until=dict(type="str"),
- )
-
- return common_args
-
-
-def get_facts_module_arg_spec(filter_by_name=False):
- # Note: This method is used by most OCI ansible fact modules during initialization. When making changes to this
- # method, ensure that no `oci` python sdk dependencies are introduced in this method. This ensures that the modules
- # can check for absence of OCI Python SDK and fail with an appropriate message. Introducing an OCI dependency in
- # this method would break that error handling logic.
- facts_module_arg_spec = get_common_arg_spec()
- if filter_by_name:
- facts_module_arg_spec.update(name=dict(type="str"))
- else:
- facts_module_arg_spec.update(display_name=dict(type="str"))
- return facts_module_arg_spec
-
-
-def get_oci_config(module, service_client_class=None):
- """Return the OCI configuration to use for all OCI API calls. The effective OCI configuration is derived by merging
- any overrides specified for configuration attributes through Ansible module options or environment variables. The
- order of precedence for deriving the effective configuration dict is:
- 1. If a config file is provided, use that to setup the initial config dict.
- 2. If a config profile is specified, use that config profile to setup the config dict.
- 3. For each authentication attribute, check if an override is provided either through
- a. Ansible Module option
- b. Environment variable
- and override the value in the config dict in that order."""
- config = {}
-
- config_file = module.params.get("config_file_location")
- _debug("Config file through module options - {0} ".format(config_file))
- if not config_file:
- if "OCI_CONFIG_FILE" in os.environ:
- config_file = os.environ["OCI_CONFIG_FILE"]
- _debug(
- "Config file through OCI_CONFIG_FILE environment variable - {0}".format(
- config_file
- )
- )
- else:
- config_file = "~/.oci/config"
- _debug("Config file (fallback) - {0} ".format(config_file))
-
- config_profile = module.params.get("config_profile_name")
- if not config_profile:
- if "OCI_CONFIG_PROFILE" in os.environ:
- config_profile = os.environ["OCI_CONFIG_PROFILE"]
- else:
- config_profile = "DEFAULT"
- try:
- config = oci.config.from_file(
- file_location=config_file, profile_name=config_profile
- )
- except (
- ConfigFileNotFound,
- InvalidConfig,
- InvalidPrivateKey,
- MissingPrivateKeyPassphrase,
- ) as ex:
- if not _is_instance_principal_auth(module):
- # When auth_type is not instance_principal, config file is required
- module.fail_json(msg=str(ex))
- else:
- _debug(
- "Ignore {0} as the auth_type is set to instance_principal".format(
- str(ex)
- )
- )
- # if instance_principal auth is used, an empty 'config' map is used below.
-
- config["additional_user_agent"] = "Oracle-Ansible/{0}".format(__version__)
- # Merge any overrides through other IAM options
- _merge_auth_option(
- config,
- module,
- module_option_name="api_user",
- env_var_name="OCI_USER_ID",
- config_attr_name="user",
- )
- _merge_auth_option(
- config,
- module,
- module_option_name="api_user_fingerprint",
- env_var_name="OCI_USER_FINGERPRINT",
- config_attr_name="fingerprint",
- )
- _merge_auth_option(
- config,
- module,
- module_option_name="api_user_key_file",
- env_var_name="OCI_USER_KEY_FILE",
- config_attr_name="key_file",
- )
- _merge_auth_option(
- config,
- module,
- module_option_name="api_user_key_pass_phrase",
- env_var_name="OCI_USER_KEY_PASS_PHRASE",
- config_attr_name="pass_phrase",
- )
- _merge_auth_option(
- config,
- module,
- module_option_name="tenancy",
- env_var_name="OCI_TENANCY",
- config_attr_name="tenancy",
- )
- _merge_auth_option(
- config,
- module,
- module_option_name="region",
- env_var_name="OCI_REGION",
- config_attr_name="region",
- )
-
- # Redirect calls to home region for IAM service.
- do_not_redirect = module.params.get(
- "do_not_redirect_to_home_region", False
- ) or os.environ.get("OCI_IDENTITY_DO_NOT_REDIRECT_TO_HOME_REGION")
- if service_client_class == IdentityClient and not do_not_redirect:
- _debug("Region passed for module invocation - {0} ".format(config["region"]))
- identity_client = IdentityClient(config)
- region_subscriptions = identity_client.list_region_subscriptions(
- config["tenancy"]
- ).data
- # Replace the region in the config with the home region.
- [config["region"]] = [
- rs.region_name for rs in region_subscriptions if rs.is_home_region is True
- ]
- _debug(
- "Setting region in the config to home region - {0} ".format(
- config["region"]
- )
- )
-
- return config
-
-
-def create_service_client(module, service_client_class):
- """
- Creates a service client using the common module options provided by the user.
- :param module: An AnsibleModule that represents user provided options for a Task
- :param service_client_class: A class that represents a client to an OCI Service
- :return: A fully configured client
- """
- config = get_oci_config(module, service_client_class)
- kwargs = {}
-
- if _is_instance_principal_auth(module):
- try:
- signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
- except Exception as ex:
- message = (
- "Failed retrieving certificates from localhost. Instance principal based authentication is only"
- "possible from within OCI compute instances. Exception: {0}".format(
- str(ex)
- )
- )
- module.fail_json(msg=message)
-
- kwargs["signer"] = signer
-
- # XXX: Validate configuration -- this may be redundant, as all Client constructors perform a validation
- try:
- oci.config.validate_config(config, **kwargs)
- except oci.exceptions.InvalidConfig as ic:
- module.fail_json(
- msg="Invalid OCI configuration. Exception: {0}".format(str(ic))
- )
-
- # Create service client class with the signer
- client = service_client_class(config, **kwargs)
-
- return client
-
-
-def _is_instance_principal_auth(module):
- # check if auth type is overridden via module params
- instance_principal_auth = (
- "auth_type" in module.params
- and module.params["auth_type"] == "instance_principal"
- )
- if not instance_principal_auth:
- instance_principal_auth = (
- "OCI_ANSIBLE_AUTH_TYPE" in os.environ
- and os.environ["OCI_ANSIBLE_AUTH_TYPE"] == "instance_principal"
- )
- return instance_principal_auth
-
-
-def _merge_auth_option(
- config, module, module_option_name, env_var_name, config_attr_name
-):
- """Merge the values for an authentication attribute from ansible module options and
- environment variables with the values specified in a configuration file"""
- _debug("Merging {0}".format(module_option_name))
-
- auth_attribute = module.params.get(module_option_name)
- _debug(
- "\t Ansible module option {0} = {1}".format(module_option_name, auth_attribute)
- )
- if not auth_attribute:
- if env_var_name in os.environ:
- auth_attribute = os.environ[env_var_name]
- _debug(
- "\t Environment variable {0} = {1}".format(env_var_name, auth_attribute)
- )
-
- # An authentication attribute has been provided through an env-variable or an ansible
- # option and must override the corresponding attribute's value specified in the
- # config file [profile].
- if auth_attribute:
- _debug(
- "Updating config attribute {0} -> {1} ".format(
- config_attr_name, auth_attribute
- )
- )
- config.update({config_attr_name: auth_attribute})
-
-
-def bucket_details_factory(bucket_details_type, module):
- bucket_details = None
- if bucket_details_type == "create":
- bucket_details = CreateBucketDetails()
- elif bucket_details_type == "update":
- bucket_details = UpdateBucketDetails()
-
- bucket_details.compartment_id = module.params["compartment_id"]
- bucket_details.name = module.params["name"]
- bucket_details.public_access_type = module.params["public_access_type"]
- bucket_details.metadata = module.params["metadata"]
-
- return bucket_details
-
-
-def filter_resources(all_resources, filter_params):
- if not filter_params:
- return all_resources
- filtered_resources = []
- filtered_resources.extend(
- [
- resource
- for resource in all_resources
- for key, value in filter_params.items()
- if getattr(resource, key) == value
- ]
- )
- return filtered_resources
-
-
-def list_all_resources(target_fn, **kwargs):
- """
- Return all resources after paging through all results returned by target_fn. If a `display_name` or `name` is
- provided as a kwarg, then only resources matching the specified name are returned.
- :param target_fn: The target OCI SDK paged function to call
- :param kwargs: All arguments that the OCI SDK paged function expects
- :return: List of all objects returned by target_fn
- :raises ServiceError: When the Service returned an Error response
- :raises MaximumWaitTimeExceededError: When maximum wait time is exceeded while invoking target_fn
- """
- filter_params = None
- try:
- response = call_with_backoff(target_fn, **kwargs)
- except ValueError as ex:
- if "unknown kwargs" in str(ex):
- if "display_name" in kwargs:
- if kwargs["display_name"]:
- filter_params = {"display_name": kwargs["display_name"]}
- del kwargs["display_name"]
- elif "name" in kwargs:
- if kwargs["name"]:
- filter_params = {"name": kwargs["name"]}
- del kwargs["name"]
- response = call_with_backoff(target_fn, **kwargs)
-
- existing_resources = response.data
- while response.has_next_page:
- kwargs.update(page=response.headers.get(HEADER_NEXT_PAGE))
- response = call_with_backoff(target_fn, **kwargs)
- existing_resources += response.data
-
- # If the underlying SDK Service list* method doesn't support filtering by name or display_name, filter the resources
- # and return the matching list of resources
- return filter_resources(existing_resources, filter_params)
-
-
-def _debug(s):
- get_logger("oci_utils").debug(s)
-
-
-def get_logger(module_name):
- oci_logging = setup_logging()
- return oci_logging.getLogger(module_name)
-
-
-def setup_logging(
- default_level="INFO",
-):
- """Setup logging configuration"""
- env_log_path = "LOG_PATH"
- env_log_level = "LOG_LEVEL"
-
- default_log_path = tempfile.gettempdir()
- log_path = os.getenv(env_log_path, default_log_path)
- log_level_str = os.getenv(env_log_level, default_level)
- log_level = logging.getLevelName(log_level_str)
- log_file_path = os.path.join(log_path, "oci_ansible_module.log")
- logging.basicConfig(filename=log_file_path, filemode="a", level=log_level)
- return logging
-
-
-def check_and_update_attributes(
- target_instance, attr_name, input_value, existing_value, changed
-):
- """
- This function checks the difference between two resource attributes of literal types and sets the attrbute
- value in the target instance type holding the attribute.
- :param target_instance: The instance which contains the attribute whose values to be compared
- :param attr_name: Name of the attribute whose value required to be compared
- :param input_value: The value of the attribute provided by user
- :param existing_value: The value of the attribute in the existing resource
- :param changed: Flag to indicate whether there is any difference between the values
- :return: Returns a boolean value indicating whether there is any difference between the values
- """
- if input_value is not None and not eq(input_value, existing_value):
- changed = True
- target_instance.__setattr__(attr_name, input_value)
- else:
- target_instance.__setattr__(attr_name, existing_value)
- return changed
-
-
-def check_and_update_resource(
- resource_type,
- get_fn,
- kwargs_get,
- update_fn,
- primitive_params_update,
- kwargs_non_primitive_update,
- module,
- update_attributes,
- client=None,
- sub_attributes_of_update_model=None,
- wait_applicable=True,
- states=None,
-):
-
- """
- This function handles update operation on a resource. It checks whether update is required and accordingly returns
- the resource and the changed status.
- :param wait_applicable: Indicates if the resource support wait
- :param client: The resource Client class to use to perform the wait checks. This param must be specified if
- wait_applicable is True
- :param resource_type: The type of the resource. e.g. "private_ip"
- :param get_fn: Function used to get the resource. e.g. virtual_network_client.get_private_ip
- :param kwargs_get: Dictionary containing the arguments to be used to call get function.
- e.g. {"private_ip_id": module.params["private_ip_id"]}
- :param update_fn: Function used to update the resource. e.g virtual_network_client.update_private_ip
- :param primitive_params_update: List of primitive parameters used for update function. e.g. ['private_ip_id']
- :param kwargs_non_primitive_update: Dictionary containing the non-primitive arguments to be used to call get
- function with key as the non-primitive argument type & value as the name of the non-primitive argument to be passed
- to the update function. e.g. {UpdatePrivateIpDetails: "update_private_ip_details"}
- :param module: Instance of AnsibleModule
- :param update_attributes: Attributes in update model.
- :param states: List of lifecycle states to watch for while waiting after create_fn is called.
- e.g. [module.params['wait_until'], "FAULTY"]
- :param sub_attributes_of_update_model: Dictionary of non-primitive sub-attributes of update model. for example,
- {'services': [ServiceIdRequestDetails()]} as in UpdateServiceGatewayDetails.
- :return: Returns a dictionary containing the "changed" status and the resource.
- """
- try:
- result = dict(changed=False)
- attributes_to_update, resource = get_attr_to_update(
- get_fn, kwargs_get, module, update_attributes
- )
-
- if attributes_to_update:
- kwargs_update = get_kwargs_update(
- attributes_to_update,
- kwargs_non_primitive_update,
- module,
- primitive_params_update,
- sub_attributes_of_update_model,
- )
- resource = call_with_backoff(update_fn, **kwargs_update).data
- if wait_applicable:
- if client is None:
- module.fail_json(
- msg="wait_applicable is True, but client is not specified."
- )
- resource = wait_for_resource_lifecycle_state(
- client, module, True, kwargs_get, get_fn, None, resource, states
- )
- result["changed"] = True
- result[resource_type] = to_dict(resource)
- return result
- except ServiceError as ex:
- module.fail_json(msg=ex.message)
-
-
-def get_kwargs_update(
- attributes_to_update,
- kwargs_non_primitive_update,
- module,
- primitive_params_update,
- sub_attributes_of_update_model=None,
-):
- kwargs_update = dict()
- for param in primitive_params_update:
- kwargs_update[param] = module.params[param]
- for param in kwargs_non_primitive_update:
- update_object = param()
- for key in update_object.attribute_map:
- if key in attributes_to_update:
- if (
- sub_attributes_of_update_model
- and key in sub_attributes_of_update_model
- ):
- setattr(update_object, key, sub_attributes_of_update_model[key])
- else:
- setattr(update_object, key, module.params[key])
- kwargs_update[kwargs_non_primitive_update[param]] = update_object
- return kwargs_update
-
-
-def is_dictionary_subset(sub, super_dict):
- """
- This function checks if `sub` dictionary is a subset of `super` dictionary.
- :param sub: subset dictionary, for example user_provided_attr_value.
- :param super_dict: super dictionary, for example resources_attr_value.
- :return: True if sub is contained in super.
- """
- for key in sub:
- if sub[key] != super_dict[key]:
- return False
- return True
-
-
-def are_lists_equal(s, t):
- if s is None and t is None:
- return True
-
- if (s is None and len(t) >= 0) or (t is None and len(s) >= 0) or (len(s) != len(t)):
- return False
-
- if len(s) == 0:
- return True
-
- s = to_dict(s)
- t = to_dict(t)
-
- if type(s[0]) == dict:
- # Handle list of dicts. Dictionary returned by the API may have additional keys. For example, a get call on
- # service gateway has an attribute `services` which is a list of `ServiceIdResponseDetails`. This has a key
- # `service_name` which is not provided in the list of `services` by a user while making an update call; only
- # `service_id` is provided by the user in the update call.
- sorted_s = sort_list_of_dictionary(s)
- sorted_t = sort_list_of_dictionary(t)
- for index, d in enumerate(sorted_s):
- if not is_dictionary_subset(d, sorted_t[index]):
- return False
- return True
- else:
- # Handle lists of primitive types.
- try:
- for elem in s:
- t.remove(elem)
- except ValueError:
- return False
- return not t
-
-
-def get_attr_to_update(get_fn, kwargs_get, module, update_attributes):
- try:
- resource = call_with_backoff(get_fn, **kwargs_get).data
- except ServiceError as ex:
- module.fail_json(msg=ex.message)
-
- attributes_to_update = []
-
- for attr in update_attributes:
- resources_attr_value = getattr(resource, attr, None)
- user_provided_attr_value = module.params.get(attr, None)
-
- unequal_list_attr = (
- type(resources_attr_value) == list or type(user_provided_attr_value) == list
- ) and not are_lists_equal(user_provided_attr_value, resources_attr_value)
- unequal_attr = type(resources_attr_value) != list and to_dict(
- resources_attr_value
- ) != to_dict(user_provided_attr_value)
- if unequal_list_attr or unequal_attr:
- # only update if the user has explicitly provided a value for this attribute
- # otherwise, no update is necessary because the user hasn't expressed a particular
- # value for that attribute
- if module.params.get(attr, None):
- attributes_to_update.append(attr)
-
- return attributes_to_update, resource
-
-
-def get_taggable_arg_spec(supports_create=False, supports_wait=False):
- """
- Returns an arg_spec that is valid for taggable OCI resources.
- :return: A dict that represents an ansible arg spec that builds over the common_arg_spec and adds free-form and
- defined tags.
- """
- tag_arg_spec = get_common_arg_spec(supports_create, supports_wait)
- tag_arg_spec.update(
- dict(freeform_tags=dict(type="dict"), defined_tags=dict(type="dict"))
- )
- return tag_arg_spec
-
-
-def add_tags_to_model_from_module(model, module):
- """
- Adds free-form and defined tags from an ansible module to a resource model
- :param model: A resource model instance that supports 'freeform_tags' and 'defined_tags' as attributes
- :param module: An AnsibleModule representing the options provided by the user
- :return: The updated model class with the tags specified by the user.
- """
- freeform_tags = module.params.get("freeform_tags", None)
- defined_tags = module.params.get("defined_tags", None)
- return add_tags_to_model_class(model, freeform_tags, defined_tags)
-
-
-def add_tags_to_model_class(model, freeform_tags, defined_tags):
- """
- Add free-form and defined tags to a resource model.
- :param model: A resource model instance that supports 'freeform_tags' and 'defined_tags' as attributes
- :param freeform_tags: A dict representing the freeform_tags to be applied to the model
- :param defined_tags: A dict representing the defined_tags to be applied to the model
- :return: The updated model class with the tags specified by the user
- """
- try:
- if freeform_tags is not None:
- _debug("Model {0} set freeform tags to {1}".format(model, freeform_tags))
- model.__setattr__("freeform_tags", freeform_tags)
-
- if defined_tags is not None:
- _debug("Model {0} set defined tags to {1}".format(model, defined_tags))
- model.__setattr__("defined_tags", defined_tags)
- except AttributeError as ae:
- _debug("Model {0} doesn't support tags. Error {1}".format(model, ae))
-
- return model
-
-
-def check_and_create_resource(
- resource_type,
- create_fn,
- kwargs_create,
- list_fn,
- kwargs_list,
- module,
- model,
- existing_resources=None,
- exclude_attributes=None,
- dead_states=None,
- default_attribute_values=None,
- supports_sort_by_time_created=True,
-):
- """
- This function checks whether there is a resource with same attributes as specified in the module options. If not,
- it creates and returns the resource.
- :param resource_type: Type of the resource to be created.
- :param create_fn: Function used in the module to handle create operation. The function should return a dict with
- keys as resource & changed.
- :param kwargs_create: Dictionary of parameters for create operation.
- :param list_fn: List function in sdk to list all the resources of type resource_type.
- :param kwargs_list: Dictionary of parameters for list operation.
- :param module: Instance of AnsibleModule
- :param model: Model used to create a resource.
- :param exclude_attributes: The attributes which should not be used to distinguish the resource. e.g. display_name,
- dns_label.
- :param dead_states: List of states which can't transition to any of the usable states of the resource. This deafults
- to ["TERMINATING", "TERMINATED", "FAULTY", "FAILED", "DELETING", "DELETED", "UNKNOWN_ENUM_VALUE"]
- :param default_attribute_values: A dictionary containing default values for attributes.
- :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
- """
-
- if module.params.get("force_create", None):
- _debug("Force creating {0}".format(resource_type))
- result = call_with_backoff(create_fn, **kwargs_create)
- return result
-
- # Get the existing resources list sorted by creation time in descending order. Return the latest matching resource
- # in case of multiple resource matches.
- if exclude_attributes is None:
- exclude_attributes = {}
- if default_attribute_values is None:
- default_attribute_values = {}
- try:
- if existing_resources is None:
- if supports_sort_by_time_created:
- kwargs_list["sort_by"] = "TIMECREATED"
- existing_resources = list_all_resources(list_fn, **kwargs_list)
- except ValueError:
- # list_fn doesn't support sort_by, so remove the sort_by key in kwargs_list and retry
- kwargs_list.pop("sort_by", None)
- try:
- existing_resources = list_all_resources(list_fn, **kwargs_list)
- # Handle errors like 404 due to bad arguments to the list_all_resources call.
- except ServiceError as ex:
- module.fail_json(msg=ex.message)
- except ServiceError as ex:
- module.fail_json(msg=ex.message)
-
- result = dict()
-
- attributes_to_consider = _get_attributes_to_consider(
- exclude_attributes, model, module
- )
- if "defined_tags" not in default_attribute_values:
- default_attribute_values["defined_tags"] = {}
- resource_matched = None
- _debug(
- "Trying to find a match within {0} existing resources".format(
- len(existing_resources)
- )
- )
-
- for resource in existing_resources:
- if _is_resource_active(resource, dead_states):
- _debug(
- "Comparing user specified values {0} against an existing resource's "
- "values {1}".format(module.params, to_dict(resource))
- )
- if does_existing_resource_match_user_inputs(
- to_dict(resource),
- module,
- attributes_to_consider,
- exclude_attributes,
- default_attribute_values,
- ):
- resource_matched = to_dict(resource)
- break
-
- if resource_matched:
- _debug("Resource with same attributes found: {0}.".format(resource_matched))
- result[resource_type] = resource_matched
- result["changed"] = False
- else:
- _debug("No matching resource found. Attempting to create a new resource.")
- result = call_with_backoff(create_fn, **kwargs_create)
-
- return result
-
-
-def _get_attributes_to_consider(exclude_attributes, model, module):
- """
- Determine the attributes to detect if an existing resource already matches the requested resource state
- :param exclude_attributes: Attributes to not consider for matching
- :param model: The model class used to create the Resource
- :param module: An instance of AnsibleModule that contains user's desires around a resource's state
- :return: A list of attributes that needs to be matched
- """
-
- # If a user explicitly requests us to match only against a set of resources (using 'key_by', use that as the list
- # of attributes to consider for matching.
- if "key_by" in module.params and module.params["key_by"] is not None:
- attributes_to_consider = module.params["key_by"]
- else:
- # Consider all attributes except freeform_tags as freeform tags do not distinguish a resource.
- attributes_to_consider = list(model.attribute_map)
- if "freeform_tags" in attributes_to_consider:
- attributes_to_consider.remove("freeform_tags")
- # Temporarily removing node_count as the exisiting resource does not reflect it
- if "node_count" in attributes_to_consider:
- attributes_to_consider.remove("node_count")
- _debug("attributes to consider: {0}".format(attributes_to_consider))
- return attributes_to_consider
-
-
-def _is_resource_active(resource, dead_states):
- if dead_states is None:
- dead_states = DEAD_STATES
-
- if "lifecycle_state" not in resource.attribute_map:
- return True
- return resource.lifecycle_state not in dead_states
-
-
-def is_attr_assigned_default(default_attribute_values, attr, assigned_value):
- if not default_attribute_values:
- return False
-
- if attr in default_attribute_values:
- default_val_for_attr = default_attribute_values.get(attr, None)
- if isinstance(default_val_for_attr, dict):
- # When default value for a resource's attribute is empty dictionary, check if the corresponding value of the
- # existing resource's attribute is also empty.
- if not default_val_for_attr:
- return not assigned_value
- # only compare keys that are in default_attribute_values[attr]
- # this is to ensure forward compatibility when the API returns new keys that are not known during
- # the time when the module author provided default values for the attribute
- keys = {}
- for k, v in iteritems(assigned_value.items()):
- if k in default_val_for_attr:
- keys[k] = v
-
- return default_val_for_attr == keys
- # non-dict, normal comparison
- return default_val_for_attr == assigned_value
- else:
- # module author has not provided a default value for attr
- return True
-
-
-def create_resource(resource_type, create_fn, kwargs_create, module):
- """
- Create an OCI resource
- :param resource_type: Type of the resource to be created. e.g.: "vcn"
- :param create_fn: Function in the SDK to create the resource. e.g. virtual_network_client.create_vcn
- :param kwargs_create: Dictionary containing arguments to be used to call the create function create_fn
- :param module: Instance of AnsibleModule
- """
- result = dict(changed=False)
- try:
- resource = to_dict(call_with_backoff(create_fn, **kwargs_create).data)
- _debug("Created {0}, {1}".format(resource_type, resource))
- result["changed"] = True
- result[resource_type] = resource
- return result
- except (ServiceError, TypeError) as ex:
- module.fail_json(msg=str(ex))
-
-
-def does_existing_resource_match_user_inputs(
- existing_resource,
- module,
- attributes_to_compare,
- exclude_attributes,
- default_attribute_values=None,
-):
- """
- Check if 'attributes_to_compare' in an existing_resource match the desired state provided by a user in 'module'.
- :param existing_resource: A dictionary representing an existing resource's values.
- :param module: The AnsibleModule representing the options provided by the user.
- :param attributes_to_compare: A list of attributes of a resource that are used to compare if an existing resource
- matches the desire state of the resource expressed by the user in 'module'.
- :param exclude_attributes: The attributes, that a module author provides, which should not be used to match the
- resource. This dictionary typically includes: (a) attributes which are initialized with dynamic default values
- like 'display_name', 'security_list_ids' for subnets and (b) attributes that don't have any defaults like
- 'dns_label' in VCNs. The attributes are part of keys and 'True' is the value for all existing keys.
- :param default_attribute_values: A dictionary containing default values for attributes.
- :return: True if the values for the list of attributes is the same in the existing_resource and module instances.
- """
- if not default_attribute_values:
- default_attribute_values = {}
- for attr in attributes_to_compare:
- attribute_with_default_metadata = None
- if attr in existing_resource:
- resources_value_for_attr = existing_resource[attr]
- # Check if the user has explicitly provided the value for attr.
- user_provided_value_for_attr = _get_user_provided_value(module, attr)
- if user_provided_value_for_attr is not None:
- res = [True]
- check_if_user_value_matches_resources_attr(
- attr,
- resources_value_for_attr,
- user_provided_value_for_attr,
- exclude_attributes,
- default_attribute_values,
- res,
- )
- if not res[0]:
- _debug(
- "Mismatch on attribute '{0}'. User provided value is {1} & existing resource's value"
- "is {2}.".format(
- attr, user_provided_value_for_attr, resources_value_for_attr
- )
- )
- return False
- else:
- # If the user has not explicitly provided the value for attr and attr is in exclude_list, we can
- # consider this as a 'pass'. For example, if an attribute 'display_name' is not specified by user and
- # that attribute is in the 'exclude_list' according to the module author(Not User), then exclude
- if (
- exclude_attributes.get(attr) is None
- and resources_value_for_attr is not None
- ):
- if module.argument_spec.get(attr):
- attribute_with_default_metadata = module.argument_spec.get(attr)
- default_attribute_value = attribute_with_default_metadata.get(
- "default", None
- )
- if default_attribute_value is not None:
- if existing_resource[attr] != default_attribute_value:
- return False
- # Check if attr has a value that is not default. For example, a custom `security_list_id`
- # is assigned to the subnet's attribute `security_list_ids`. If the attribute is assigned a
- # value that is not the default, then it must be considered a mismatch and false returned.
- elif not is_attr_assigned_default(
- default_attribute_values, attr, existing_resource[attr]
- ):
- return False
-
- else:
- _debug(
- "Attribute {0} is in the create model of resource {1}"
- "but doesn't exist in the get model of the resource".format(
- attr, existing_resource.__class__
- )
- )
- return True
-
-
-def tuplize(d):
- """
- This function takes a dictionary and converts it to a list of tuples recursively.
- :param d: A dictionary.
- :return: List of tuples.
- """
- list_of_tuples = []
- key_list = sorted(list(d.keys()))
- for key in key_list:
- if type(d[key]) == list:
- # Convert a value which is itself a list of dict to a list of tuples.
- if d[key] and type(d[key][0]) == dict:
- sub_tuples = []
- for sub_dict in d[key]:
- sub_tuples.append(tuplize(sub_dict))
- # To handle comparing two None values, while creating a tuple for a {key: value}, make the first element
- # in the tuple a boolean `True` if value is None so that attributes with None value are put at last
- # in the sorted list.
- list_of_tuples.append((sub_tuples is None, key, sub_tuples))
- else:
- list_of_tuples.append((d[key] is None, key, d[key]))
- elif type(d[key]) == dict:
- tupled_value = tuplize(d[key])
- list_of_tuples.append((tupled_value is None, key, tupled_value))
- else:
- list_of_tuples.append((d[key] is None, key, d[key]))
- return list_of_tuples
-
-
-def get_key_for_comparing_dict(d):
- tuple_form_of_d = tuplize(d)
- return tuple_form_of_d
-
-
-def sort_dictionary(d):
- """
- This function sorts values of a dictionary recursively.
- :param d: A dictionary.
- :return: Dictionary with sorted elements.
- """
- sorted_d = {}
- for key in d:
- if type(d[key]) == list:
- if d[key] and type(d[key][0]) == dict:
- sorted_value = sort_list_of_dictionary(d[key])
- sorted_d[key] = sorted_value
- else:
- sorted_d[key] = sorted(d[key])
- elif type(d[key]) == dict:
- sorted_d[key] = sort_dictionary(d[key])
- else:
- sorted_d[key] = d[key]
- return sorted_d
-
-
-def sort_list_of_dictionary(list_of_dict):
- """
- This functions sorts a list of dictionaries. It first sorts each value of the dictionary and then sorts the list of
- individually sorted dictionaries. For sorting, each dictionary's tuple equivalent is used.
- :param list_of_dict: List of dictionaries.
- :return: A sorted dictionary.
- """
- list_with_sorted_dict = []
- for d in list_of_dict:
- sorted_d = sort_dictionary(d)
- list_with_sorted_dict.append(sorted_d)
- return sorted(list_with_sorted_dict, key=get_key_for_comparing_dict)
-
-
-def check_if_user_value_matches_resources_attr(
- attribute_name,
- resources_value_for_attr,
- user_provided_value_for_attr,
- exclude_attributes,
- default_attribute_values,
- res,
-):
- if isinstance(default_attribute_values.get(attribute_name), dict):
- default_attribute_values = default_attribute_values.get(attribute_name)
-
- if isinstance(exclude_attributes.get(attribute_name), dict):
- exclude_attributes = exclude_attributes.get(attribute_name)
-
- if isinstance(resources_value_for_attr, list) or isinstance(
- user_provided_value_for_attr, list
- ):
- # Perform a deep equivalence check for a List attribute
- if exclude_attributes.get(attribute_name):
- return
- if (
- user_provided_value_for_attr is None
- and default_attribute_values.get(attribute_name) is not None
- ):
- user_provided_value_for_attr = default_attribute_values.get(attribute_name)
-
- if resources_value_for_attr is None and user_provided_value_for_attr is None:
- return
-
- if (
- resources_value_for_attr is None
- and len(user_provided_value_for_attr) >= 0
- or user_provided_value_for_attr is None
- and len(resources_value_for_attr) >= 0
- ):
- res[0] = False
- return
-
- if (
- resources_value_for_attr is not None
- and user_provided_value_for_attr is not None
- and len(resources_value_for_attr) != len(user_provided_value_for_attr)
- ):
- res[0] = False
- return
-
- if (
- user_provided_value_for_attr
- and type(user_provided_value_for_attr[0]) == dict
- ):
- # Process a list of dict
- sorted_user_provided_value_for_attr = sort_list_of_dictionary(
- user_provided_value_for_attr
- )
- sorted_resources_value_for_attr = sort_list_of_dictionary(
- resources_value_for_attr
- )
-
- else:
- sorted_user_provided_value_for_attr = sorted(user_provided_value_for_attr)
- sorted_resources_value_for_attr = sorted(resources_value_for_attr)
-
- # Walk through the sorted list values of the resource's value for this attribute, and compare against user
- # provided values.
- for index, resources_value_for_attr_part in enumerate(
- sorted_resources_value_for_attr
- ):
- check_if_user_value_matches_resources_attr(
- attribute_name,
- resources_value_for_attr_part,
- sorted_user_provided_value_for_attr[index],
- exclude_attributes,
- default_attribute_values,
- res,
- )
-
- elif isinstance(resources_value_for_attr, dict):
- # Perform a deep equivalence check for dict typed attributes
-
- if not resources_value_for_attr and user_provided_value_for_attr:
- res[0] = False
- for key in resources_value_for_attr:
- if (
- user_provided_value_for_attr is not None
- and user_provided_value_for_attr
- ):
- check_if_user_value_matches_resources_attr(
- key,
- resources_value_for_attr.get(key),
- user_provided_value_for_attr.get(key),
- exclude_attributes,
- default_attribute_values,
- res,
- )
- else:
- if exclude_attributes.get(key) is None:
- if default_attribute_values.get(key) is not None:
- user_provided_value_for_attr = default_attribute_values.get(key)
- check_if_user_value_matches_resources_attr(
- key,
- resources_value_for_attr.get(key),
- user_provided_value_for_attr,
- exclude_attributes,
- default_attribute_values,
- res,
- )
- else:
- res[0] = is_attr_assigned_default(
- default_attribute_values,
- attribute_name,
- resources_value_for_attr.get(key),
- )
-
- elif resources_value_for_attr != user_provided_value_for_attr:
- if (
- exclude_attributes.get(attribute_name) is None
- and default_attribute_values.get(attribute_name) is not None
- ):
- # As the user has not specified a value for an optional attribute, if the existing resource's
- # current state has a DEFAULT value for that attribute, we must not consider this incongruence
- # an issue and continue with other checks. If the existing resource's value for the attribute
- # is not the default value, then the existing resource is not a match.
- if not is_attr_assigned_default(
- default_attribute_values, attribute_name, resources_value_for_attr
- ):
- res[0] = False
- elif user_provided_value_for_attr is not None:
- res[0] = False
-
-
-def are_dicts_equal(
- option_name,
- existing_resource_dict,
- user_provided_dict,
- exclude_list,
- default_attribute_values,
-):
- if not user_provided_dict:
- # User has not provided a value for the map option. In this case, the user hasn't expressed an intent around
- # this optional attribute. Check if existing_resource_dict matches default.
- # For example, source_details attribute in volume is optional and does not have any defaults.
- return is_attr_assigned_default(
- default_attribute_values, option_name, existing_resource_dict
- )
-
- # If the existing resource has an empty dict, while the user has provided entries, dicts are not equal
- if not existing_resource_dict and user_provided_dict:
- return False
-
- # check if all keys of an existing resource's dict attribute matches user-provided dict's entries
- for sub_attr in existing_resource_dict:
- # If user has provided value for sub-attribute, then compare it with corresponding key in existing resource.
- if sub_attr in user_provided_dict:
- if existing_resource_dict[sub_attr] != user_provided_dict[sub_attr]:
- _debug(
- "Failed to match: Existing resource's attr {0} sub-attr {1} value is {2}, while user "
- "provided value is {3}".format(
- option_name,
- sub_attr,
- existing_resource_dict[sub_attr],
- user_provided_dict.get(sub_attr, None),
- )
- )
- return False
-
- # If sub_attr not provided by user, check if the sub-attribute value of existing resource matches default value.
- else:
- if not should_dict_attr_be_excluded(option_name, sub_attr, exclude_list):
- default_value_for_dict_attr = default_attribute_values.get(
- option_name, None
- )
- if default_value_for_dict_attr:
- # if a default value for the sub-attr was provided by the module author, fail if the existing
- # resource's value for the sub-attr is not the default
- if not is_attr_assigned_default(
- default_value_for_dict_attr,
- sub_attr,
- existing_resource_dict[sub_attr],
- ):
- return False
- else:
- # No default value specified by module author for sub_attr
- _debug(
- "Consider as match: Existing resource's attr {0} sub-attr {1} value is {2}, while user did"
- "not provide a value for it. The module author also has not provided a default value for it"
- "or marked it for exclusion. So ignoring this attribute during matching and continuing with"
- "other checks".format(
- option_name, sub_attr, existing_resource_dict[sub_attr]
- )
- )
-
- return True
-
-
-def should_dict_attr_be_excluded(map_option_name, option_key, exclude_list):
- """An entry for the Exclude list for excluding a map's key is specifed as a dict with the map option name as the
- key, and the value as a list of keys to be excluded within that map. For example, if the keys "k1" and "k2" of a map
- option named "m1" needs to be excluded, the exclude list must have an entry {'m1': ['k1','k2']} """
- for exclude_item in exclude_list:
- if isinstance(exclude_item, dict):
- if map_option_name in exclude_item:
- if option_key in exclude_item[map_option_name]:
- return True
- return False
-
-
-def create_and_wait(
- resource_type,
- client,
- create_fn,
- kwargs_create,
- get_fn,
- get_param,
- module,
- states=None,
- wait_applicable=True,
- kwargs_get=None,
-):
- """
- A utility function to create a resource and wait for the resource to get into the state as specified in the module
- options.
- :param wait_applicable: Specifies if wait for create is applicable for this resource
- :param resource_type: Type of the resource to be created. e.g. "vcn"
- :param client: OCI service client instance to call the service periodically to retrieve data.
- e.g. VirtualNetworkClient()
- :param create_fn: Function in the SDK to create the resource. e.g. virtual_network_client.create_vcn
- :param kwargs_create: Dictionary containing arguments to be used to call the create function create_fn.
- :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
- :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
- :param module: Instance of AnsibleModule.
- :param states: List of lifecycle states to watch for while waiting after create_fn is called.
- e.g. [module.params['wait_until'], "FAULTY"]
- :param kwargs_get: Dictionary containing arguments to be used to call a multi-argument `get` function
- :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
- """
- try:
- return create_or_update_resource_and_wait(
- resource_type,
- create_fn,
- kwargs_create,
- module,
- wait_applicable,
- get_fn,
- get_param,
- states,
- client,
- kwargs_get,
- )
- except MaximumWaitTimeExceeded as ex:
- module.fail_json(msg=str(ex))
- except ServiceError as ex:
- module.fail_json(msg=ex.message)
-
-
-def update_and_wait(
- resource_type,
- client,
- update_fn,
- kwargs_update,
- get_fn,
- get_param,
- module,
- states=None,
- wait_applicable=True,
- kwargs_get=None,
-):
- """
- A utility function to update a resource and wait for the resource to get into the state as specified in the module
- options. It wraps the create_and_wait method as apart from the method and arguments, everything else is similar.
- :param wait_applicable: Specifies if wait for create is applicable for this resource
- :param resource_type: Type of the resource to be created. e.g. "vcn"
- :param client: OCI service client instance to call the service periodically to retrieve data.
- e.g. VirtualNetworkClient()
- :param update_fn: Function in the SDK to update the resource. e.g. virtual_network_client.update_vcn
- :param kwargs_update: Dictionary containing arguments to be used to call the update function update_fn.
- :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
- :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
- :param module: Instance of AnsibleModule.
- :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments.
- :param states: List of lifecycle states to watch for while waiting after update_fn is called.
- e.g. [module.params['wait_until'], "FAULTY"]
- :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
- """
- try:
- return create_or_update_resource_and_wait(
- resource_type,
- update_fn,
- kwargs_update,
- module,
- wait_applicable,
- get_fn,
- get_param,
- states,
- client,
- kwargs_get=kwargs_get,
- )
- except MaximumWaitTimeExceeded as ex:
- module.fail_json(msg=str(ex))
- except ServiceError as ex:
- module.fail_json(msg=ex.message)
-
-
-def create_or_update_resource_and_wait(
- resource_type,
- function,
- kwargs_function,
- module,
- wait_applicable,
- get_fn,
- get_param,
- states,
- client,
- update_target_resource_id_in_get_param=False,
- kwargs_get=None,
-):
- """
- A utility function to create or update a resource and wait for the resource to get into the state as specified in
- the module options.
- :param resource_type: Type of the resource to be created. e.g. "vcn"
- :param function: Function in the SDK to create or update the resource.
- :param kwargs_function: Dictionary containing arguments to be used to call the create or update function
- :param module: Instance of AnsibleModule.
- :param wait_applicable: Specifies if wait for create is applicable for this resource
- :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
- :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
- :param states: List of lifecycle states to watch for while waiting after create_fn is called.
- e.g. [module.params['wait_until'], "FAULTY"]
- :param client: OCI service client instance to call the service periodically to retrieve data.
- e.g. VirtualNetworkClient()
- :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments.
- :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
- """
- result = create_resource(resource_type, function, kwargs_function, module)
- resource = result[resource_type]
- result[resource_type] = wait_for_resource_lifecycle_state(
- client,
- module,
- wait_applicable,
- kwargs_get,
- get_fn,
- get_param,
- resource,
- states,
- resource_type,
- )
- return result
-
-
-def wait_for_resource_lifecycle_state(
- client,
- module,
- wait_applicable,
- kwargs_get,
- get_fn,
- get_param,
- resource,
- states,
- resource_type=None,
-):
- """
- A utility function to wait for the resource to get into the state as specified in
- the module options.
- :param client: OCI service client instance to call the service periodically to retrieve data.
- e.g. VirtualNetworkClient
- :param module: Instance of AnsibleModule.
- :param wait_applicable: Specifies if wait for create is applicable for this resource
- :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments.
- :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
- :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
- :param resource_type: Type of the resource to be created. e.g. "vcn"
- :param states: List of lifecycle states to watch for while waiting after create_fn is called.
- e.g. [module.params['wait_until'], "FAULTY"]
- :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
- """
- if wait_applicable and module.params.get("wait", None):
- if resource_type == "compartment":
- # An immediate attempt to retrieve a compartment after a compartment is created fails with
- # 'Authorization failed or requested resource not found', 'status': 404}.
- # This is because it takes few seconds for the permissions on a compartment to be ready.
- # Wait for few seconds before attempting a get call on compartment.
- _debug(
- "Pausing execution for permission on the newly created compartment to be ready."
- )
- time.sleep(15)
- if kwargs_get:
- _debug(
- "Waiting for resource to reach READY state. get_args: {0}".format(
- kwargs_get
- )
- )
- response_get = call_with_backoff(get_fn, **kwargs_get)
- else:
- _debug(
- "Waiting for resource with id {0} to reach READY state.".format(
- resource["id"]
- )
- )
- response_get = call_with_backoff(get_fn, **{get_param: resource["id"]})
- if states is None:
- states = module.params.get("wait_until") or DEFAULT_READY_STATES
- resource = to_dict(
- oci.wait_until(
- client,
- response_get,
- evaluate_response=lambda r: r.data.lifecycle_state in states,
- max_wait_seconds=module.params.get(
- "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
- ),
- ).data
- )
- return resource
-
-
-def wait_on_work_request(client, response, module):
- try:
- if module.params.get("wait", None):
- _debug(
- "Waiting for work request with id {0} to reach SUCCEEDED state.".format(
- response.data.id
- )
- )
- wait_response = oci.wait_until(
- client,
- response,
- evaluate_response=lambda r: r.data.status == "SUCCEEDED",
- max_wait_seconds=module.params.get(
- "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
- ),
- )
- else:
- _debug(
- "Waiting for work request with id {0} to reach ACCEPTED state.".format(
- response.data.id
- )
- )
- wait_response = oci.wait_until(
- client,
- response,
- evaluate_response=lambda r: r.data.status == "ACCEPTED",
- max_wait_seconds=module.params.get(
- "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
- ),
- )
- except MaximumWaitTimeExceeded as ex:
- _debug(str(ex))
- module.fail_json(msg=str(ex))
- except ServiceError as ex:
- _debug(str(ex))
- module.fail_json(msg=str(ex))
- return wait_response.data
-
-
-def delete_and_wait(
- resource_type,
- client,
- get_fn,
- kwargs_get,
- delete_fn,
- kwargs_delete,
- module,
- states=None,
- wait_applicable=True,
- process_work_request=False,
-):
- """A utility function to delete a resource and wait for the resource to get into the state as specified in the
- module options.
- :param wait_applicable: Specifies if wait for delete is applicable for this resource
- :param resource_type: Type of the resource to be deleted. e.g. "vcn"
- :param client: OCI service client instance to call the service periodically to retrieve data.
- e.g. VirtualNetworkClient()
- :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
- :param kwargs_get: Dictionary of arguments for get function get_fn. e.g. {"vcn_id": module.params["id"]}
- :param delete_fn: Function in the SDK to delete the resource. e.g. virtual_network_client.delete_vcn
- :param kwargs_delete: Dictionary of arguments for delete function delete_fn. e.g. {"vcn_id": module.params["id"]}
- :param module: Instance of AnsibleModule.
- :param states: List of lifecycle states to watch for while waiting after delete_fn is called. If nothing is passed,
- defaults to ["TERMINATED", "DETACHED", "DELETED"].
- :param process_work_request: Whether a work request is generated on an API call and if it needs to be handled.
- :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
- """
-
- states_set = set(["DETACHING", "DETACHED", "DELETING", "DELETED", "TERMINATING", "TERMINATED"])
- result = dict(changed=False)
- result[resource_type] = dict()
- try:
- resource = to_dict(call_with_backoff(get_fn, **kwargs_get).data)
- if resource:
- if "lifecycle_state" not in resource or resource["lifecycle_state"] not in states_set:
- response = call_with_backoff(delete_fn, **kwargs_delete)
- if process_work_request:
- wr_id = response.headers.get("opc-work-request-id")
- get_wr_response = call_with_backoff(
- client.get_work_request, work_request_id=wr_id
- )
- result["work_request"] = to_dict(
- wait_on_work_request(client, get_wr_response, module)
- )
- # Set changed to True as work request has been created to delete the resource.
- result["changed"] = True
- resource = to_dict(call_with_backoff(get_fn, **kwargs_get).data)
- else:
- _debug("Deleted {0}, {1}".format(resource_type, resource))
- result["changed"] = True
-
- if wait_applicable and module.params.get("wait", None):
- if states is None:
- states = (
- module.params.get("wait_until")
- or DEFAULT_TERMINATED_STATES
- )
- try:
- wait_response = oci.wait_until(
- client,
- get_fn(**kwargs_get),
- evaluate_response=lambda r: r.data.lifecycle_state
- in states,
- max_wait_seconds=module.params.get(
- "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
- ),
- succeed_on_not_found=True,
- )
- except MaximumWaitTimeExceeded as ex:
- module.fail_json(msg=str(ex))
- except ServiceError as ex:
- if ex.status != 404:
- module.fail_json(msg=ex.message)
- else:
- # While waiting for resource to get into terminated state, if the resource is not found.
- _debug(
- "API returned Status:404(Not Found) while waiting for resource to get into"
- " terminated state."
- )
- resource["lifecycle_state"] = "DELETED"
- result[resource_type] = resource
- return result
- # oci.wait_until() returns an instance of oci.util.Sentinel in case the resource is not found.
- if type(wait_response) is not Sentinel:
- resource = to_dict(wait_response.data)
- else:
- resource["lifecycle_state"] = "DELETED"
-
- result[resource_type] = resource
- else:
- _debug(
- "Resource {0} with {1} already deleted. So returning changed=False".format(
- resource_type, kwargs_get
- )
- )
- except ServiceError as ex:
- # DNS API throws a 400 InvalidParameter when a zone id is provided for zone_name_or_id and if the zone
- # resource is not available, instead of the expected 404. So working around this for now.
- if type(client) == oci.dns.DnsClient:
- if ex.status == 400 and ex.code == "InvalidParameter":
- _debug(
- "Resource {0} with {1} already deleted. So returning changed=False".format(
- resource_type, kwargs_get
- )
- )
- elif ex.status != 404:
- module.fail_json(msg=ex.message)
- result[resource_type] = dict()
- return result
-
-
-def are_attrs_equal(current_resource, module, attributes):
- """
- Check if the specified attributes are equal in the specified 'model' and 'module'. This is used to check if an OCI
- Model instance already has the values specified by an Ansible user while invoking an OCI Ansible module and if a
- resource needs to be updated.
- :param current_resource: A resource model instance
- :param module: The AnsibleModule representing the options provided by the user
- :param attributes: A list of attributes that would need to be compared in the model and the module instances.
- :return: True if the values for the list of attributes is the same in the model and module instances
- """
- for attr in attributes:
- curr_value = getattr(current_resource, attr, None)
- user_provided_value = _get_user_provided_value(module, attribute_name=attr)
-
- if user_provided_value is not None:
- if curr_value != user_provided_value:
- _debug(
- "are_attrs_equal - current resource's attribute "
- + attr
- + " value is "
- + str(curr_value)
- + " and this doesn't match user provided value of "
- + str(user_provided_value)
- )
- return False
- return True
-
-
-def _get_user_provided_value(module, attribute_name):
- """
- Returns the user provided value for "attribute_name". We consider aliases in the module.
- """
- user_provided_value = module.params.get(attribute_name, None)
- if user_provided_value is None:
- # If the attribute_name is set as an alias for some option X and user has provided value in the playbook using
- # option X, then user provided value for attribute_name is equal to value for X.
- # Get option name for attribute_name from module.aliases.
- # module.aliases is a dictionary with key as alias name and its value as option name.
- option_alias_for_attribute = module.aliases.get(attribute_name, None)
- if option_alias_for_attribute is not None:
- user_provided_value = module.params.get(option_alias_for_attribute, None)
- return user_provided_value
-
-
-def update_model_with_user_options(curr_model, update_model, module):
- """
- Update the 'update_model' with user provided values in 'module' for the specified 'attributes' if they are different
- from the values in the 'curr_model'.
- :param curr_model: A resource model instance representing the state of the current resource
- :param update_model: An instance of the update resource model for the current resource's type
- :param module: An AnsibleModule representing the options provided by the user
- :return: An updated 'update_model' instance filled with values that would need to be updated in the current resource
- state to satisfy the user's requested state.
- """
- attributes = update_model.attribute_map.keys()
- for attr in attributes:
- curr_value_for_attr = getattr(curr_model, attr, None)
- user_provided_value = _get_user_provided_value(module, attribute_name=attr)
-
- if curr_value_for_attr != user_provided_value:
- if user_provided_value is not None:
- # Only update if a user has specified a value for an option
- _debug(
- "User requested {0} for attribute {1}, whereas the current value is {2}. So adding it "
- "to the update model".format(
- user_provided_value, attr, curr_value_for_attr
- )
- )
- setattr(update_model, attr, user_provided_value)
- else:
- # Always set current values of the resource in the update model if there is no request for change in
- # values
- setattr(update_model, attr, curr_value_for_attr)
- return update_model
-
-
-def _get_retry_strategy():
- retry_strategy_builder = RetryStrategyBuilder(
- max_attempts_check=True,
- max_attempts=10,
- retry_max_wait_between_calls_seconds=30,
- retry_base_sleep_time_seconds=3,
- backoff_type=oci.retry.BACKOFF_FULL_JITTER_EQUAL_ON_THROTTLE_VALUE,
- )
- retry_strategy_builder.add_service_error_check(
- service_error_retry_config={
- 429: [],
- 400: ["QuotaExceeded", "LimitExceeded"],
- 409: ["Conflict"],
- },
- service_error_retry_on_any_5xx=True,
- )
- return retry_strategy_builder.get_retry_strategy()
-
-
-def call_with_backoff(fn, **kwargs):
- if "retry_strategy" not in kwargs:
- kwargs["retry_strategy"] = _get_retry_strategy()
- try:
- return fn(**kwargs)
- except TypeError as te:
- if "unexpected keyword argument" in str(te):
- # to handle older SDKs that did not support retry_strategy
- del kwargs["retry_strategy"]
- return fn(**kwargs)
- else:
- # A validation error raised by the SDK, throw it back
- raise
-
-
-def generic_hash(obj):
- """
- Compute a hash of all the fields in the object
- :param obj: Object whose hash needs to be computed
- :return: a hash value for the object
- """
- sum = 0
- for field in obj.attribute_map.keys():
- field_value = getattr(obj, field)
- if isinstance(field_value, list):
- for value in field_value:
- sum = sum + hash(value)
- elif isinstance(field_value, dict):
- for k, v in field_value.items():
- sum = sum + hash(hash(k) + hash(":") + hash(v))
- else:
- sum = sum + hash(getattr(obj, field))
- return sum
-
-
-def generic_eq(s, other):
- if other is None:
- return False
- return s.__dict__ == other.__dict__
-
-
-def generate_subclass(parent_class):
- """Make a class hash-able by generating a subclass with a __hash__ method that returns the sum of all fields within
- the parent class"""
- dict_of_method_in_subclass = {
- "__init__": parent_class.__init__,
- "__hash__": generic_hash,
- "__eq__": generic_eq,
- }
- subclass_name = "GeneratedSub" + parent_class.__name__
- generated_sub_class = type(
- subclass_name, (parent_class,), dict_of_method_in_subclass
- )
- return generated_sub_class
-
-
-def create_hashed_instance(class_type):
- hashed_class = generate_subclass(class_type)
- return hashed_class()
-
-
-def get_hashed_object_list(class_type, object_with_values, attributes_class_type=None):
- if object_with_values is None:
- return None
- hashed_class_instances = []
- for object_with_value in object_with_values:
- hashed_class_instances.append(
- get_hashed_object(class_type, object_with_value, attributes_class_type)
- )
- return hashed_class_instances
-
-
-def get_hashed_object(
- class_type, object_with_value, attributes_class_type=None, supported_attributes=None
-):
- """
- Convert any class instance into hashable so that the
- instances are eligible for various comparison
- operation available under set() object.
- :param class_type: Any class type whose instances needs to be hashable
- :param object_with_value: Instance of the class type with values which
- would be set in the resulting isinstance
- :param attributes_class_type: A list of class types of attributes, if attribute is a custom class instance
- :param supported_attributes: A list of attributes which should be considered while populating the instance
- with the values in the object. This helps in avoiding new attributes of the class_type which are still not
- supported by the current implementation.
- :return: A hashable instance with same state of the provided object_with_value
- """
- if object_with_value is None:
- return None
-
- HashedClass = generate_subclass(class_type)
- hashed_class_instance = HashedClass()
-
- if supported_attributes:
- class_attributes = list(
- set(hashed_class_instance.attribute_map) & set(supported_attributes)
- )
- else:
- class_attributes = hashed_class_instance.attribute_map
-
- for attribute in class_attributes:
- attribute_value = getattr(object_with_value, attribute)
- if attributes_class_type:
- for attribute_class_type in attributes_class_type:
- if isinstance(attribute_value, attribute_class_type):
- attribute_value = get_hashed_object(
- attribute_class_type, attribute_value
- )
- hashed_class_instance.__setattr__(attribute, attribute_value)
-
- return hashed_class_instance
-
-
-def update_class_type_attr_difference(
- update_class_details, existing_instance, attr_name, attr_class, input_attr_value
-):
- """
- Checks the difference and updates an attribute which is represented by a class
- instance. Not aplicable if the attribute type is a primitive value.
- For example, if a class name is A with an attribute x, then if A.x = X(), then only
- this method works.
- :param update_class_details The instance which should be updated if there is change in
- attribute value
- :param existing_instance The instance whose attribute value is compared with input
- attribute value
- :param attr_name Name of the attribute whose value should be compared
- :param attr_class Class type of the attribute
- :param input_attr_value The value of input attribute which should replaced the current
- value in case of mismatch
- :return: A boolean value indicating whether attribute value has been replaced
- """
- changed = False
- # Here existing attribute values is an instance
- existing_attr_value = get_hashed_object(
- attr_class, getattr(existing_instance, attr_name)
- )
- if input_attr_value is None:
- update_class_details.__setattr__(attr_name, existing_attr_value)
- else:
- changed = not input_attr_value.__eq__(existing_attr_value)
- if changed:
- update_class_details.__setattr__(attr_name, input_attr_value)
- else:
- update_class_details.__setattr__(attr_name, existing_attr_value)
-
- return changed
-
-
-def get_existing_resource(target_fn, module, **kwargs):
- """
- Returns the requested resource if it exists based on the input arguments.
- :param target_fn The function which should be used to find the requested resource
- :param module Instance of AnsibleModule attribute value
- :param kwargs A map of arguments consisting of values based on which requested resource should be searched
- :return: Instance of requested resource
- """
- existing_resource = None
- try:
- response = call_with_backoff(target_fn, **kwargs)
- existing_resource = response.data
- except ServiceError as ex:
- if ex.status != 404:
- module.fail_json(msg=ex.message)
-
- return existing_resource
-
-
-def get_attached_instance_info(
- module, lookup_attached_instance, list_attachments_fn, list_attachments_args
-):
- config = get_oci_config(module)
- identity_client = create_service_client(module, IdentityClient)
-
- volume_attachments = []
-
- if lookup_attached_instance:
- # Get all the compartments in the tenancy
- compartments = to_dict(
- identity_client.list_compartments(
- config.get("tenancy"), compartment_id_in_subtree=True
- ).data
- )
- # For each compartment, get the volume attachments for the compartment_id with the other args in
- # list_attachments_args.
- for compartment in compartments:
- list_attachments_args["compartment_id"] = compartment["id"]
- try:
- volume_attachments += list_all_resources(
- list_attachments_fn, **list_attachments_args
- )
-
- # Pass ServiceError due to authorization issue in accessing volume attachments of a compartment
- except ServiceError as ex:
- if ex.status == 404:
- pass
-
- else:
- volume_attachments = list_all_resources(
- list_attachments_fn, **list_attachments_args
- )
-
- volume_attachments = to_dict(volume_attachments)
- # volume_attachments has attachments in DETACHING or DETACHED state. Return the volume attachment in ATTACHING or
- # ATTACHED state
-
- return next(
- (
- volume_attachment
- for volume_attachment in volume_attachments
- if volume_attachment["lifecycle_state"] in ["ATTACHING", "ATTACHED"]
- ),
- None,
- )
-
-
-def check_mode(fn):
- def wrapper(*args, **kwargs):
- if os.environ.get("OCI_ANSIBLE_EXPERIMENTAL", None):
- return fn(*args, **kwargs)
- return None
-
- return wrapper
-
-
-def check_and_return_component_list_difference(
- input_component_list, existing_components, purge_components, delete_components=False
-):
- if input_component_list:
- existing_components, changed = get_component_list_difference(
- input_component_list,
- existing_components,
- purge_components,
- delete_components,
- )
- else:
- existing_components = []
- changed = True
- return existing_components, changed
-
-
-def get_component_list_difference(
- input_component_list, existing_components, purge_components, delete_components=False
-):
- if delete_components:
- if existing_components is None:
- return None, False
- component_differences = set(existing_components).intersection(
- set(input_component_list)
- )
- if component_differences:
- return list(set(existing_components) - component_differences), True
- else:
- return None, False
- if existing_components is None:
- return input_component_list, True
- if purge_components:
- components_differences = set(input_component_list).symmetric_difference(
- set(existing_components)
- )
-
- if components_differences:
- return input_component_list, True
-
- components_differences = set(input_component_list).difference(
- set(existing_components)
- )
- if components_differences:
- return list(components_differences) + existing_components, True
- return None, False
-
-
-def write_to_file(path, content):
- with open(to_bytes(path), "wb") as dest_file:
- dest_file.write(content)
-
-
-def get_target_resource_from_list(
- module, list_resource_fn, target_resource_id=None, **kwargs
-):
- """
- Returns a resource filtered by identifer from a list of resources. This method should be
- used as an alternative of 'get resource' method when 'get resource' is nor provided by
- resource api. This method returns a wrapper of response object but that should not be
- used as an input to 'wait_until' utility as this is only a partial wrapper of response object.
- :param module The AnsibleModule representing the options provided by the user
- :param list_resource_fn The function which lists all the resources
- :param target_resource_id The identifier of the resource which should be filtered from the list
- :param kwargs A map of arguments consisting of values based on which requested resource should be searched
- :return: A custom wrapper which partially wraps a response object where the data field contains the target
- resource, if found.
- """
-
- class ResponseWrapper:
- def __init__(self, data):
- self.data = data
-
- try:
- resources = list_all_resources(list_resource_fn, **kwargs)
- if resources is not None:
- for resource in resources:
- if resource.id == target_resource_id:
- # Returning an object that mimics an OCI response as oci_utils methods assumes an Response-ish
- # object
- return ResponseWrapper(data=resource)
- return ResponseWrapper(data=None)
- except ServiceError as ex:
- module.fail_json(msg=ex.message)
diff --git a/lib/ansible/module_utils/postgres.py b/lib/ansible/module_utils/postgres.py
deleted file mode 100644
index 63811c3055..0000000000
--- a/lib/ansible/module_utils/postgres.py
+++ /dev/null
@@ -1,330 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Ted Timmons <ted@timmons.me>, 2017.
-# Most of this was originally added by other creators in the postgresql_user module.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-psycopg2 = None # This line needs for unit tests
-try:
- import psycopg2
- HAS_PSYCOPG2 = True
-except ImportError:
- HAS_PSYCOPG2 = False
-
-from ansible.module_utils.basic import missing_required_lib
-from ansible.module_utils._text import to_native
-from ansible.module_utils.six import iteritems
-from distutils.version import LooseVersion
-
-
-def postgres_common_argument_spec():
- """
- Return a dictionary with connection options.
-
- The options are commonly used by most of PostgreSQL modules.
- """
- return dict(
- login_user=dict(default='postgres'),
- login_password=dict(default='', no_log=True),
- login_host=dict(default=''),
- login_unix_socket=dict(default=''),
- port=dict(type='int', default=5432, aliases=['login_port']),
- ssl_mode=dict(default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
- ca_cert=dict(aliases=['ssl_rootcert']),
- )
-
-
-def ensure_required_libs(module):
- """Check required libraries."""
- if not HAS_PSYCOPG2:
- module.fail_json(msg=missing_required_lib('psycopg2'))
-
- if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'):
- module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter')
-
-
-def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True):
- """Connect to a PostgreSQL database.
-
- Return psycopg2 connection object.
-
- Args:
- module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
- conn_params (dict) -- dictionary with connection parameters
-
- Kwargs:
- autocommit (bool) -- commit automatically (default False)
- fail_on_conn (bool) -- fail if connection failed or just warn and return None (default True)
- """
- ensure_required_libs(module)
-
- db_connection = None
- try:
- db_connection = psycopg2.connect(**conn_params)
- if autocommit:
- if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'):
- db_connection.set_session(autocommit=True)
- else:
- db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
-
- # Switch role, if specified:
- if module.params.get('session_role'):
- cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
-
- try:
- cursor.execute('SET ROLE "%s"' % module.params['session_role'])
- except Exception as e:
- module.fail_json(msg="Could not switch role: %s" % to_native(e))
- finally:
- cursor.close()
-
- except TypeError as e:
- if 'sslrootcert' in e.args[0]:
- module.fail_json(msg='Postgresql server must be at least '
- 'version 8.4 to support sslrootcert')
-
- if fail_on_conn:
- module.fail_json(msg="unable to connect to database: %s" % to_native(e))
- else:
- module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
- db_connection = None
-
- except Exception as e:
- if fail_on_conn:
- module.fail_json(msg="unable to connect to database: %s" % to_native(e))
- else:
- module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
- db_connection = None
-
- return db_connection
-
-
-def exec_sql(obj, query, query_params=None, ddl=False, add_to_executed=True, dont_exec=False):
- """Execute SQL.
-
- Auxiliary function for PostgreSQL user classes.
-
- Returns a query result if possible or True/False if ddl=True arg was passed.
- It necessary for statements that don't return any result (like DDL queries).
-
- Args:
- obj (obj) -- must be an object of a user class.
- The object must have module (AnsibleModule class object) and
- cursor (psycopg cursor object) attributes
- query (str) -- SQL query to execute
-
- Kwargs:
- query_params (dict or tuple) -- Query parameters to prevent SQL injections,
- could be a dict or tuple
- ddl (bool) -- must return True or False instead of rows (typical for DDL queries)
- (default False)
- add_to_executed (bool) -- append the query to obj.executed_queries attribute
- dont_exec (bool) -- used with add_to_executed=True to generate a query, add it
- to obj.executed_queries list and return True (default False)
- """
-
- if dont_exec:
- # This is usually needed to return queries in check_mode
- # without execution
- query = obj.cursor.mogrify(query, query_params)
- if add_to_executed:
- obj.executed_queries.append(query)
-
- return True
-
- try:
- if query_params is not None:
- obj.cursor.execute(query, query_params)
- else:
- obj.cursor.execute(query)
-
- if add_to_executed:
- if query_params is not None:
- obj.executed_queries.append(obj.cursor.mogrify(query, query_params))
- else:
- obj.executed_queries.append(query)
-
- if not ddl:
- res = obj.cursor.fetchall()
- return res
- return True
- except Exception as e:
- obj.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
- return False
-
-
-def get_conn_params(module, params_dict, warn_db_default=True):
- """Get connection parameters from the passed dictionary.
-
- Return a dictionary with parameters to connect to PostgreSQL server.
-
- Args:
- module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
- params_dict (dict) -- dictionary with variables
-
- Kwargs:
- warn_db_default (bool) -- warn that the default DB is used (default True)
- """
- # To use defaults values, keyword arguments must be absent, so
- # check which values are empty and don't include in the return dictionary
- params_map = {
- "login_host": "host",
- "login_user": "user",
- "login_password": "password",
- "port": "port",
- "ssl_mode": "sslmode",
- "ca_cert": "sslrootcert"
- }
-
- # Might be different in the modules:
- if params_dict.get('db'):
- params_map['db'] = 'database'
- elif params_dict.get('database'):
- params_map['database'] = 'database'
- elif params_dict.get('login_db'):
- params_map['login_db'] = 'database'
- else:
- if warn_db_default:
- module.warn('Database name has not been passed, '
- 'used default database to connect to.')
-
- kw = dict((params_map[k], v) for (k, v) in iteritems(params_dict)
- if k in params_map and v != '' and v is not None)
-
- # If a login_unix_socket is specified, incorporate it here.
- is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
- if is_localhost and params_dict["login_unix_socket"] != "":
- kw["host"] = params_dict["login_unix_socket"]
-
- return kw
-
-
-class PgMembership(object):
- def __init__(self, module, cursor, groups, target_roles, fail_on_role=True):
- self.module = module
- self.cursor = cursor
- self.target_roles = [r.strip() for r in target_roles]
- self.groups = [r.strip() for r in groups]
- self.executed_queries = []
- self.granted = {}
- self.revoked = {}
- self.fail_on_role = fail_on_role
- self.non_existent_roles = []
- self.changed = False
- self.__check_roles_exist()
-
- def grant(self):
- for group in self.groups:
- self.granted[group] = []
-
- for role in self.target_roles:
- # If role is in a group now, pass:
- if self.__check_membership(group, role):
- continue
-
- query = 'GRANT "%s" TO "%s"' % (group, role)
- self.changed = exec_sql(self, query, ddl=True)
-
- if self.changed:
- self.granted[group].append(role)
-
- return self.changed
-
- def revoke(self):
- for group in self.groups:
- self.revoked[group] = []
-
- for role in self.target_roles:
- # If role is not in a group now, pass:
- if not self.__check_membership(group, role):
- continue
-
- query = 'REVOKE "%s" FROM "%s"' % (group, role)
- self.changed = exec_sql(self, query, ddl=True)
-
- if self.changed:
- self.revoked[group].append(role)
-
- return self.changed
-
- def __check_membership(self, src_role, dst_role):
- query = ("SELECT ARRAY(SELECT b.rolname FROM "
- "pg_catalog.pg_auth_members m "
- "JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) "
- "WHERE m.member = r.oid) "
- "FROM pg_catalog.pg_roles r "
- "WHERE r.rolname = %(dst_role)s")
-
- res = exec_sql(self, query, query_params={'dst_role': dst_role}, add_to_executed=False)
- membership = []
- if res:
- membership = res[0][0]
-
- if not membership:
- return False
-
- if src_role in membership:
- return True
-
- return False
-
- def __check_roles_exist(self):
- existent_groups = self.__roles_exist(self.groups)
- existent_roles = self.__roles_exist(self.target_roles)
-
- for group in self.groups:
- if group not in existent_groups:
- if self.fail_on_role:
- self.module.fail_json(msg="Role %s does not exist" % group)
- else:
- self.module.warn("Role %s does not exist, pass" % group)
- self.non_existent_roles.append(group)
-
- for role in self.target_roles:
- if role not in existent_roles:
- if self.fail_on_role:
- self.module.fail_json(msg="Role %s does not exist" % role)
- else:
- self.module.warn("Role %s does not exist, pass" % role)
-
- if role not in self.groups:
- self.non_existent_roles.append(role)
-
- else:
- if self.fail_on_role:
- self.module.exit_json(msg="Role role '%s' is a member of role '%s'" % (role, role))
- else:
- self.module.warn("Role role '%s' is a member of role '%s', pass" % (role, role))
-
- # Update role lists, excluding non existent roles:
- self.groups = [g for g in self.groups if g not in self.non_existent_roles]
-
- self.target_roles = [r for r in self.target_roles if r not in self.non_existent_roles]
-
- def __roles_exist(self, roles):
- tmp = ["'" + x + "'" for x in roles]
- query = "SELECT rolname FROM pg_roles WHERE rolname IN (%s)" % ','.join(tmp)
- return [x[0] for x in exec_sql(self, query, add_to_executed=False)]
diff --git a/lib/ansible/module_utils/pure.py b/lib/ansible/module_utils/pure.py
deleted file mode 100644
index 019c11add2..0000000000
--- a/lib/ansible/module_utils/pure.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Simon Dodsley <simon@purestorage.com>,2017
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-HAS_PURESTORAGE = True
-try:
- from purestorage import purestorage
-except ImportError:
- HAS_PURESTORAGE = False
-
-HAS_PURITY_FB = True
-try:
- from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix, rest
-except ImportError:
- HAS_PURITY_FB = False
-
-from functools import wraps
-from os import environ
-from os import path
-import platform
-
-VERSION = 1.2
-USER_AGENT_BASE = 'Ansible'
-API_AGENT_VERSION = 1.5
-
-
-def get_system(module):
- """Return System Object or Fail"""
- user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
- 'base': USER_AGENT_BASE,
- 'class': __name__,
- 'version': VERSION,
- 'platform': platform.platform()
- }
- array_name = module.params['fa_url']
- api = module.params['api_token']
-
- if array_name and api:
- system = purestorage.FlashArray(array_name, api_token=api, user_agent=user_agent)
- elif environ.get('PUREFA_URL') and environ.get('PUREFA_API'):
- system = purestorage.FlashArray(environ.get('PUREFA_URL'), api_token=(environ.get('PUREFA_API')), user_agent=user_agent)
- else:
- module.fail_json(msg="You must set PUREFA_URL and PUREFA_API environment variables or the fa_url and api_token module arguments")
- try:
- system.get()
- except Exception:
- module.fail_json(msg="Pure Storage FlashArray authentication failed. Check your credentials")
- return system
-
-
-def get_blade(module):
- """Return System Object or Fail"""
- user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
- 'base': USER_AGENT_BASE,
- 'class': __name__,
- 'version': VERSION,
- 'platform': platform.platform()
- }
- blade_name = module.params['fb_url']
- api = module.params['api_token']
-
- if blade_name and api:
- blade = PurityFb(blade_name)
- blade.disable_verify_ssl()
- try:
- blade.login(api)
- versions = blade.api_version.list_versions().versions
- if API_AGENT_VERSION in versions:
- blade._api_client.user_agent = user_agent
- except rest.ApiException as e:
- module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials")
- elif environ.get('PUREFB_URL') and environ.get('PUREFB_API'):
- blade = PurityFb(environ.get('PUREFB_URL'))
- blade.disable_verify_ssl()
- try:
- blade.login(environ.get('PUREFB_API'))
- versions = blade.api_version.list_versions().versions
- if API_AGENT_VERSION in versions:
- blade._api_client.user_agent = user_agent
- except rest.ApiException as e:
- module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials")
- else:
- module.fail_json(msg="You must set PUREFB_URL and PUREFB_API environment variables or the fb_url and api_token module arguments")
- return blade
-
-
-def purefa_argument_spec():
- """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
-
- return dict(
- fa_url=dict(),
- api_token=dict(no_log=True),
- )
-
-
-def purefb_argument_spec():
- """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
-
- return dict(
- fb_url=dict(),
- api_token=dict(no_log=True),
- )
diff --git a/lib/ansible/module_utils/rabbitmq.py b/lib/ansible/module_utils/rabbitmq.py
deleted file mode 100644
index cf76400644..0000000000
--- a/lib/ansible/module_utils/rabbitmq.py
+++ /dev/null
@@ -1,220 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2016, Jorge Rodriguez <jorge.rodriguez@tiriel.eu>
-# Copyright: (c) 2018, John Imison <john+github@imison.net>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-from ansible.module_utils._text import to_native
-from ansible.module_utils.basic import missing_required_lib
-from ansible.module_utils.six.moves.urllib import parse as urllib_parse
-from mimetypes import MimeTypes
-
-import os
-import json
-import traceback
-
-PIKA_IMP_ERR = None
-try:
- import pika
- import pika.exceptions
- from pika import spec
- HAS_PIKA = True
-except ImportError:
- PIKA_IMP_ERR = traceback.format_exc()
- HAS_PIKA = False
-
-
-def rabbitmq_argument_spec():
- return dict(
- login_user=dict(type='str', default='guest'),
- login_password=dict(type='str', default='guest', no_log=True),
- login_host=dict(type='str', default='localhost'),
- login_port=dict(type='str', default='15672'),
- login_protocol=dict(type='str', default='http', choices=['http', 'https']),
- ca_cert=dict(type='path', aliases=['cacert']),
- client_cert=dict(type='path', aliases=['cert']),
- client_key=dict(type='path', aliases=['key']),
- vhost=dict(type='str', default='/'),
- )
-
-
-# notification/rabbitmq_basic_publish.py
-class RabbitClient():
- def __init__(self, module):
- self.module = module
- self.params = module.params
- self.check_required_library()
- self.check_host_params()
- self.url = self.params['url']
- self.proto = self.params['proto']
- self.username = self.params['username']
- self.password = self.params['password']
- self.host = self.params['host']
- self.port = self.params['port']
- self.vhost = self.params['vhost']
- self.queue = self.params['queue']
- self.headers = self.params['headers']
- self.cafile = self.params['cafile']
- self.certfile = self.params['certfile']
- self.keyfile = self.params['keyfile']
-
- if self.host is not None:
- self.build_url()
-
- if self.cafile is not None:
- self.append_ssl_certs()
-
- self.connect_to_rabbitmq()
-
- def check_required_library(self):
- if not HAS_PIKA:
- self.module.fail_json(msg=missing_required_lib("pika"), exception=PIKA_IMP_ERR)
-
- def check_host_params(self):
- # Fail if url is specified and other conflicting parameters have been specified
- if self.params['url'] is not None and any(self.params[k] is not None for k in ['proto', 'host', 'port', 'password', 'username', 'vhost']):
- self.module.fail_json(msg="url and proto, host, port, vhost, username or password cannot be specified at the same time.")
-
- # Fail if url not specified and there is a missing parameter to build the url
- if self.params['url'] is None and any(self.params[k] is None for k in ['proto', 'host', 'port', 'password', 'username', 'vhost']):
- self.module.fail_json(msg="Connection parameters must be passed via url, or, proto, host, port, vhost, username or password.")
-
- def append_ssl_certs(self):
- ssl_options = {}
- if self.cafile:
- ssl_options['cafile'] = self.cafile
- if self.certfile:
- ssl_options['certfile'] = self.certfile
- if self.keyfile:
- ssl_options['keyfile'] = self.keyfile
-
- self.url = self.url + '?ssl_options=' + urllib_parse.quote(json.dumps(ssl_options))
-
- @staticmethod
- def rabbitmq_argument_spec():
- return dict(
- url=dict(type='str'),
- proto=dict(type='str', choices=['amqp', 'amqps']),
- host=dict(type='str'),
- port=dict(type='int'),
- username=dict(type='str'),
- password=dict(type='str', no_log=True),
- vhost=dict(type='str'),
- queue=dict(type='str')
- )
-
- ''' Consider some file size limits here '''
- def _read_file(self, path):
- try:
- with open(path, "rb") as file_handle:
- return file_handle.read()
- except IOError as e:
- self.module.fail_json(msg="Unable to open file %s: %s" % (path, to_native(e)))
-
- @staticmethod
- def _check_file_mime_type(path):
- mime = MimeTypes()
- return mime.guess_type(path)
-
- def build_url(self):
- self.url = '{0}://{1}:{2}@{3}:{4}/{5}'.format(self.proto,
- self.username,
- self.password,
- self.host,
- self.port,
- self.vhost)
-
- def connect_to_rabbitmq(self):
- """
- Function to connect to rabbitmq using username and password
- """
- try:
- parameters = pika.URLParameters(self.url)
- except Exception as e:
- self.module.fail_json(msg="URL malformed: %s" % to_native(e))
-
- try:
- self.connection = pika.BlockingConnection(parameters)
- except Exception as e:
- self.module.fail_json(msg="Connection issue: %s" % to_native(e))
-
- try:
- self.conn_channel = self.connection.channel()
- except pika.exceptions.AMQPChannelError as e:
- self.close_connection()
- self.module.fail_json(msg="Channel issue: %s" % to_native(e))
-
- def close_connection(self):
- try:
- self.connection.close()
- except pika.exceptions.AMQPConnectionError:
- pass
-
- def basic_publish(self):
- self.content_type = self.params.get("content_type")
-
- if self.params.get("body") is not None:
- args = dict(
- body=self.params.get("body"),
- exchange=self.params.get("exchange"),
- routing_key=self.params.get("routing_key"),
- properties=pika.BasicProperties(content_type=self.content_type, delivery_mode=1, headers=self.headers))
-
- # If src (file) is defined and content_type is left as default, do a mime lookup on the file
- if self.params.get("src") is not None and self.content_type == 'text/plain':
- self.content_type = RabbitClient._check_file_mime_type(self.params.get("src"))[0]
- self.headers.update(
- filename=os.path.basename(self.params.get("src"))
- )
-
- args = dict(
- body=self._read_file(self.params.get("src")),
- exchange=self.params.get("exchange"),
- routing_key=self.params.get("routing_key"),
- properties=pika.BasicProperties(content_type=self.content_type,
- delivery_mode=1,
- headers=self.headers
- ))
- elif self.params.get("src") is not None:
- args = dict(
- body=self._read_file(self.params.get("src")),
- exchange=self.params.get("exchange"),
- routing_key=self.params.get("routing_key"),
- properties=pika.BasicProperties(content_type=self.content_type,
- delivery_mode=1,
- headers=self.headers
- ))
-
- try:
- # If queue is not defined, RabbitMQ will return the queue name of the automatically generated queue.
- if self.queue is None:
- result = self.conn_channel.queue_declare(durable=self.params.get("durable"),
- exclusive=self.params.get("exclusive"),
- auto_delete=self.params.get("auto_delete"))
- self.conn_channel.confirm_delivery()
- self.queue = result.method.queue
- else:
- self.conn_channel.queue_declare(queue=self.queue,
- durable=self.params.get("durable"),
- exclusive=self.params.get("exclusive"),
- auto_delete=self.params.get("auto_delete"))
- self.conn_channel.confirm_delivery()
- except Exception as e:
- self.module.fail_json(msg="Queue declare issue: %s" % to_native(e))
-
- # https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/cloudstack.py#L150
- if args['routing_key'] is None:
- args['routing_key'] = self.queue
-
- if args['exchange'] is None:
- args['exchange'] = ''
-
- try:
- self.conn_channel.basic_publish(**args)
- return True
- except pika.exceptions.UnroutableError:
- return False
diff --git a/lib/ansible/module_utils/rax.py b/lib/ansible/module_utils/rax.py
deleted file mode 100644
index d8607541f2..0000000000
--- a/lib/ansible/module_utils/rax.py
+++ /dev/null
@@ -1,331 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by
-# Ansible still belong to the author of the module, and may assign their own
-# license to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-
-import os
-import re
-from uuid import UUID
-
-from ansible.module_utils.six import text_type, binary_type
-
-FINAL_STATUSES = ('ACTIVE', 'ERROR')
-VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
- 'error', 'error_deleting')
-
-CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
- 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
-CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
- 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
- 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
-
-NON_CALLABLES = (text_type, binary_type, bool, dict, int, list, type(None))
-PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
-SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
-
-
-def rax_slugify(value):
- """Prepend a key with rax_ and normalize the key name"""
- return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
-
-
-def rax_clb_node_to_dict(obj):
- """Function to convert a CLB Node object to a dict"""
- if not obj:
- return {}
- node = obj.to_dict()
- node['id'] = obj.id
- node['weight'] = obj.weight
- return node
-
-
-def rax_to_dict(obj, obj_type='standard'):
- """Generic function to convert a pyrax object to a dict
-
- obj_type values:
- standard
- clb
- server
-
- """
- instance = {}
- for key in dir(obj):
- value = getattr(obj, key)
- if obj_type == 'clb' and key == 'nodes':
- instance[key] = []
- for node in value:
- instance[key].append(rax_clb_node_to_dict(node))
- elif (isinstance(value, list) and len(value) > 0 and
- not isinstance(value[0], NON_CALLABLES)):
- instance[key] = []
- for item in value:
- instance[key].append(rax_to_dict(item))
- elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
- if obj_type == 'server':
- if key == 'image':
- if not value:
- instance['rax_boot_source'] = 'volume'
- else:
- instance['rax_boot_source'] = 'local'
- key = rax_slugify(key)
- instance[key] = value
-
- if obj_type == 'server':
- for attr in ['id', 'accessIPv4', 'name', 'status']:
- instance[attr] = instance.get(rax_slugify(attr))
-
- return instance
-
-
-def rax_find_bootable_volume(module, rax_module, server, exit=True):
- """Find a servers bootable volume"""
- cs = rax_module.cloudservers
- cbs = rax_module.cloud_blockstorage
- server_id = rax_module.utils.get_id(server)
- volumes = cs.volumes.get_server_volumes(server_id)
- bootable_volumes = []
- for volume in volumes:
- vol = cbs.get(volume)
- if module.boolean(vol.bootable):
- bootable_volumes.append(vol)
- if not bootable_volumes:
- if exit:
- module.fail_json(msg='No bootable volumes could be found for '
- 'server %s' % server_id)
- else:
- return False
- elif len(bootable_volumes) > 1:
- if exit:
- module.fail_json(msg='Multiple bootable volumes found for server '
- '%s' % server_id)
- else:
- return False
-
- return bootable_volumes[0]
-
-
-def rax_find_image(module, rax_module, image, exit=True):
- """Find a server image by ID or Name"""
- cs = rax_module.cloudservers
- try:
- UUID(image)
- except ValueError:
- try:
- image = cs.images.find(human_id=image)
- except(cs.exceptions.NotFound,
- cs.exceptions.NoUniqueMatch):
- try:
- image = cs.images.find(name=image)
- except (cs.exceptions.NotFound,
- cs.exceptions.NoUniqueMatch):
- if exit:
- module.fail_json(msg='No matching image found (%s)' %
- image)
- else:
- return False
-
- return rax_module.utils.get_id(image)
-
-
-def rax_find_volume(module, rax_module, name):
- """Find a Block storage volume by ID or name"""
- cbs = rax_module.cloud_blockstorage
- try:
- UUID(name)
- volume = cbs.get(name)
- except ValueError:
- try:
- volume = cbs.find(name=name)
- except rax_module.exc.NotFound:
- volume = None
- except Exception as e:
- module.fail_json(msg='%s' % e)
- return volume
-
-
-def rax_find_network(module, rax_module, network):
- """Find a cloud network by ID or name"""
- cnw = rax_module.cloud_networks
- try:
- UUID(network)
- except ValueError:
- if network.lower() == 'public':
- return cnw.get_server_networks(PUBLIC_NET_ID)
- elif network.lower() == 'private':
- return cnw.get_server_networks(SERVICE_NET_ID)
- else:
- try:
- network_obj = cnw.find_network_by_label(network)
- except (rax_module.exceptions.NetworkNotFound,
- rax_module.exceptions.NetworkLabelNotUnique):
- module.fail_json(msg='No matching network found (%s)' %
- network)
- else:
- return cnw.get_server_networks(network_obj)
- else:
- return cnw.get_server_networks(network)
-
-
-def rax_find_server(module, rax_module, server):
- """Find a Cloud Server by ID or name"""
- cs = rax_module.cloudservers
- try:
- UUID(server)
- server = cs.servers.get(server)
- except ValueError:
- servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
- if not servers:
- module.fail_json(msg='No Server was matched by name, '
- 'try using the Server ID instead')
- if len(servers) > 1:
- module.fail_json(msg='Multiple servers matched by name, '
- 'try using the Server ID instead')
-
- # We made it this far, grab the first and hopefully only server
- # in the list
- server = servers[0]
- return server
-
-
-def rax_find_loadbalancer(module, rax_module, loadbalancer):
- """Find a Cloud Load Balancer by ID or name"""
- clb = rax_module.cloud_loadbalancers
- try:
- found = clb.get(loadbalancer)
- except Exception:
- found = []
- for lb in clb.list():
- if loadbalancer == lb.name:
- found.append(lb)
-
- if not found:
- module.fail_json(msg='No loadbalancer was matched')
-
- if len(found) > 1:
- module.fail_json(msg='Multiple loadbalancers matched')
-
- # We made it this far, grab the first and hopefully only item
- # in the list
- found = found[0]
-
- return found
-
-
-def rax_argument_spec():
- """Return standard base dictionary used for the argument_spec
- argument in AnsibleModule
-
- """
- return dict(
- api_key=dict(type='str', aliases=['password'], no_log=True),
- auth_endpoint=dict(type='str'),
- credentials=dict(type='path', aliases=['creds_file']),
- env=dict(type='str'),
- identity_type=dict(type='str', default='rackspace'),
- region=dict(type='str'),
- tenant_id=dict(type='str'),
- tenant_name=dict(type='str'),
- username=dict(type='str'),
- validate_certs=dict(type='bool', aliases=['verify_ssl']),
- )
-
-
-def rax_required_together():
- """Return the default list used for the required_together argument to
- AnsibleModule"""
- return [['api_key', 'username']]
-
-
-def setup_rax_module(module, rax_module, region_required=True):
- """Set up pyrax in a standard way for all modules"""
- rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version,
- rax_module.USER_AGENT)
-
- api_key = module.params.get('api_key')
- auth_endpoint = module.params.get('auth_endpoint')
- credentials = module.params.get('credentials')
- env = module.params.get('env')
- identity_type = module.params.get('identity_type')
- region = module.params.get('region')
- tenant_id = module.params.get('tenant_id')
- tenant_name = module.params.get('tenant_name')
- username = module.params.get('username')
- verify_ssl = module.params.get('validate_certs')
-
- if env is not None:
- rax_module.set_environment(env)
-
- rax_module.set_setting('identity_type', identity_type)
- if verify_ssl is not None:
- rax_module.set_setting('verify_ssl', verify_ssl)
- if auth_endpoint is not None:
- rax_module.set_setting('auth_endpoint', auth_endpoint)
- if tenant_id is not None:
- rax_module.set_setting('tenant_id', tenant_id)
- if tenant_name is not None:
- rax_module.set_setting('tenant_name', tenant_name)
-
- try:
- username = username or os.environ.get('RAX_USERNAME')
- if not username:
- username = rax_module.get_setting('keyring_username')
- if username:
- api_key = 'USE_KEYRING'
- if not api_key:
- api_key = os.environ.get('RAX_API_KEY')
- credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
- os.environ.get('RAX_CREDS_FILE'))
- region = (region or os.environ.get('RAX_REGION') or
- rax_module.get_setting('region'))
- except KeyError as e:
- module.fail_json(msg='Unable to load %s' % e.message)
-
- try:
- if api_key and username:
- if api_key == 'USE_KEYRING':
- rax_module.keyring_auth(username, region=region)
- else:
- rax_module.set_credentials(username, api_key=api_key,
- region=region)
- elif credentials:
- credentials = os.path.expanduser(credentials)
- rax_module.set_credential_file(credentials, region=region)
- else:
- raise Exception('No credentials supplied!')
- except Exception as e:
- if e.message:
- msg = str(e.message)
- else:
- msg = repr(e)
- module.fail_json(msg=msg)
-
- if region_required and region not in rax_module.regions:
- module.fail_json(msg='%s is not a valid region, must be one of: %s' %
- (region, ','.join(rax_module.regions)))
-
- return rax_module
diff --git a/lib/ansible/module_utils/redfish_utils.py b/lib/ansible/module_utils/redfish_utils.py
deleted file mode 100644
index 8fc6b42e4d..0000000000
--- a/lib/ansible/module_utils/redfish_utils.py
+++ /dev/null
@@ -1,2458 +0,0 @@
-# Copyright (c) 2017-2018 Dell EMC Inc.
-# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import json
-from ansible.module_utils.urls import open_url
-from ansible.module_utils._text import to_text
-from ansible.module_utils.six.moves import http_client
-from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
-
-GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'}
-POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json',
- 'OData-Version': '4.0'}
-PATCH_HEADERS = {'content-type': 'application/json', 'accept': 'application/json',
- 'OData-Version': '4.0'}
-DELETE_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'}
-
-DEPRECATE_MSG = 'Issuing a data modification command without specifying the '\
- 'ID of the target %(resource)s resource when there is more '\
- 'than one %(resource)s will use the first one in the '\
- 'collection. Use the `resource_id` option to specify the '\
- 'target %(resource)s ID'
-
-
-class RedfishUtils(object):
-
- def __init__(self, creds, root_uri, timeout, module, resource_id=None,
- data_modification=False):
- self.root_uri = root_uri
- self.creds = creds
- self.timeout = timeout
- self.module = module
- self.service_root = '/redfish/v1/'
- self.resource_id = resource_id
- self.data_modification = data_modification
- self._init_session()
-
- # The following functions are to send GET/POST/PATCH/DELETE requests
- def get_request(self, uri):
- try:
- resp = open_url(uri, method="GET", headers=GET_HEADERS,
- url_username=self.creds['user'],
- url_password=self.creds['pswd'],
- force_basic_auth=True, validate_certs=False,
- follow_redirects='all',
- use_proxy=True, timeout=self.timeout)
- data = json.loads(resp.read())
- headers = dict((k.lower(), v) for (k, v) in resp.info().items())
- except HTTPError as e:
- msg = self._get_extended_message(e)
- return {'ret': False,
- 'msg': "HTTP Error %s on GET request to '%s', extended message: '%s'"
- % (e.code, uri, msg),
- 'status': e.code}
- except URLError as e:
- return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'"
- % (uri, e.reason)}
- # Almost all errors should be caught above, but just in case
- except Exception as e:
- return {'ret': False,
- 'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))}
- return {'ret': True, 'data': data, 'headers': headers}
-
- def post_request(self, uri, pyld):
- try:
- resp = open_url(uri, data=json.dumps(pyld),
- headers=POST_HEADERS, method="POST",
- url_username=self.creds['user'],
- url_password=self.creds['pswd'],
- force_basic_auth=True, validate_certs=False,
- follow_redirects='all',
- use_proxy=True, timeout=self.timeout)
- except HTTPError as e:
- msg = self._get_extended_message(e)
- return {'ret': False,
- 'msg': "HTTP Error %s on POST request to '%s', extended message: '%s'"
- % (e.code, uri, msg),
- 'status': e.code}
- except URLError as e:
- return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'"
- % (uri, e.reason)}
- # Almost all errors should be caught above, but just in case
- except Exception as e:
- return {'ret': False,
- 'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))}
- return {'ret': True, 'resp': resp}
-
- def patch_request(self, uri, pyld):
- headers = PATCH_HEADERS
- r = self.get_request(uri)
- if r['ret']:
- # Get etag from etag header or @odata.etag property
- etag = r['headers'].get('etag')
- if not etag:
- etag = r['data'].get('@odata.etag')
- if etag:
- # Make copy of headers and add If-Match header
- headers = dict(headers)
- headers['If-Match'] = etag
- try:
- resp = open_url(uri, data=json.dumps(pyld),
- headers=headers, method="PATCH",
- url_username=self.creds['user'],
- url_password=self.creds['pswd'],
- force_basic_auth=True, validate_certs=False,
- follow_redirects='all',
- use_proxy=True, timeout=self.timeout)
- except HTTPError as e:
- msg = self._get_extended_message(e)
- return {'ret': False,
- 'msg': "HTTP Error %s on PATCH request to '%s', extended message: '%s'"
- % (e.code, uri, msg),
- 'status': e.code}
- except URLError as e:
- return {'ret': False, 'msg': "URL Error on PATCH request to '%s': '%s'"
- % (uri, e.reason)}
- # Almost all errors should be caught above, but just in case
- except Exception as e:
- return {'ret': False,
- 'msg': "Failed PATCH request to '%s': '%s'" % (uri, to_text(e))}
- return {'ret': True, 'resp': resp}
-
- def delete_request(self, uri, pyld=None):
- try:
- data = json.dumps(pyld) if pyld else None
- resp = open_url(uri, data=data,
- headers=DELETE_HEADERS, method="DELETE",
- url_username=self.creds['user'],
- url_password=self.creds['pswd'],
- force_basic_auth=True, validate_certs=False,
- follow_redirects='all',
- use_proxy=True, timeout=self.timeout)
- except HTTPError as e:
- msg = self._get_extended_message(e)
- return {'ret': False,
- 'msg': "HTTP Error %s on DELETE request to '%s', extended message: '%s'"
- % (e.code, uri, msg),
- 'status': e.code}
- except URLError as e:
- return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'"
- % (uri, e.reason)}
- # Almost all errors should be caught above, but just in case
- except Exception as e:
- return {'ret': False,
- 'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))}
- return {'ret': True, 'resp': resp}
-
- @staticmethod
- def _get_extended_message(error):
- """
- Get Redfish ExtendedInfo message from response payload if present
- :param error: an HTTPError exception
- :type error: HTTPError
- :return: the ExtendedInfo message if present, else standard HTTP error
- """
- msg = http_client.responses.get(error.code, '')
- if error.code >= 400:
- try:
- body = error.read().decode('utf-8')
- data = json.loads(body)
- ext_info = data['error']['@Message.ExtendedInfo']
- msg = ext_info[0]['Message']
- except Exception:
- pass
- return msg
-
- def _init_session(self):
- pass
-
- def _find_accountservice_resource(self):
- response = self.get_request(self.root_uri + self.service_root)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'AccountService' not in data:
- return {'ret': False, 'msg': "AccountService resource not found"}
- else:
- account_service = data["AccountService"]["@odata.id"]
- response = self.get_request(self.root_uri + account_service)
- if response['ret'] is False:
- return response
- data = response['data']
- accounts = data['Accounts']['@odata.id']
- if accounts[-1:] == '/':
- accounts = accounts[:-1]
- self.accounts_uri = accounts
- return {'ret': True}
-
- def _find_sessionservice_resource(self):
- response = self.get_request(self.root_uri + self.service_root)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'SessionService' not in data:
- return {'ret': False, 'msg': "SessionService resource not found"}
- else:
- session_service = data["SessionService"]["@odata.id"]
- response = self.get_request(self.root_uri + session_service)
- if response['ret'] is False:
- return response
- data = response['data']
- sessions = data['Sessions']['@odata.id']
- if sessions[-1:] == '/':
- sessions = sessions[:-1]
- self.sessions_uri = sessions
- return {'ret': True}
-
- def _get_resource_uri_by_id(self, uris, id_prop):
- for uri in uris:
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- continue
- data = response['data']
- if id_prop == data.get('Id'):
- return uri
- return None
-
- def _find_systems_resource(self):
- response = self.get_request(self.root_uri + self.service_root)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'Systems' not in data:
- return {'ret': False, 'msg': "Systems resource not found"}
- response = self.get_request(self.root_uri + data['Systems']['@odata.id'])
- if response['ret'] is False:
- return response
- self.systems_uris = [
- i['@odata.id'] for i in response['data'].get('Members', [])]
- if not self.systems_uris:
- return {
- 'ret': False,
- 'msg': "ComputerSystem's Members array is either empty or missing"}
- self.systems_uri = self.systems_uris[0]
- if self.data_modification:
- if self.resource_id:
- self.systems_uri = self._get_resource_uri_by_id(self.systems_uris,
- self.resource_id)
- if not self.systems_uri:
- return {
- 'ret': False,
- 'msg': "System resource %s not found" % self.resource_id}
- elif len(self.systems_uris) > 1:
- self.module.deprecate(DEPRECATE_MSG % {'resource': 'System'},
- version='2.14')
- return {'ret': True}
-
- def _find_updateservice_resource(self):
- response = self.get_request(self.root_uri + self.service_root)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'UpdateService' not in data:
- return {'ret': False, 'msg': "UpdateService resource not found"}
- else:
- update = data["UpdateService"]["@odata.id"]
- self.update_uri = update
- response = self.get_request(self.root_uri + update)
- if response['ret'] is False:
- return response
- data = response['data']
- self.firmware_uri = self.software_uri = None
- if 'FirmwareInventory' in data:
- self.firmware_uri = data['FirmwareInventory'][u'@odata.id']
- if 'SoftwareInventory' in data:
- self.software_uri = data['SoftwareInventory'][u'@odata.id']
- return {'ret': True}
-
- def _find_chassis_resource(self):
- response = self.get_request(self.root_uri + self.service_root)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'Chassis' not in data:
- return {'ret': False, 'msg': "Chassis resource not found"}
- chassis = data["Chassis"]["@odata.id"]
- response = self.get_request(self.root_uri + chassis)
- if response['ret'] is False:
- return response
- self.chassis_uris = [
- i['@odata.id'] for i in response['data'].get('Members', [])]
- if not self.chassis_uris:
- return {'ret': False,
- 'msg': "Chassis Members array is either empty or missing"}
- self.chassis_uri = self.chassis_uris[0]
- if self.data_modification:
- if self.resource_id:
- self.chassis_uri = self._get_resource_uri_by_id(self.chassis_uris,
- self.resource_id)
- if not self.chassis_uri:
- return {
- 'ret': False,
- 'msg': "Chassis resource %s not found" % self.resource_id}
- elif len(self.chassis_uris) > 1:
- self.module.deprecate(DEPRECATE_MSG % {'resource': 'Chassis'},
- version='2.14')
- return {'ret': True}
-
- def _find_managers_resource(self):
- response = self.get_request(self.root_uri + self.service_root)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'Managers' not in data:
- return {'ret': False, 'msg': "Manager resource not found"}
- manager = data["Managers"]["@odata.id"]
- response = self.get_request(self.root_uri + manager)
- if response['ret'] is False:
- return response
- self.manager_uris = [
- i['@odata.id'] for i in response['data'].get('Members', [])]
- if not self.manager_uris:
- return {'ret': False,
- 'msg': "Managers Members array is either empty or missing"}
- self.manager_uri = self.manager_uris[0]
- if self.data_modification:
- if self.resource_id:
- self.manager_uri = self._get_resource_uri_by_id(self.manager_uris,
- self.resource_id)
- if not self.manager_uri:
- return {
- 'ret': False,
- 'msg': "Manager resource %s not found" % self.resource_id}
- elif len(self.manager_uris) > 1:
- self.module.deprecate(DEPRECATE_MSG % {'resource': 'Manager'},
- version='2.14')
- return {'ret': True}
-
- def get_logs(self):
- log_svcs_uri_list = []
- list_of_logs = []
- properties = ['Severity', 'Created', 'EntryType', 'OemRecordFormat',
- 'Message', 'MessageId', 'MessageArgs']
-
- # Find LogService
- response = self.get_request(self.root_uri + self.manager_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'LogServices' not in data:
- return {'ret': False, 'msg': "LogServices resource not found"}
-
- # Find all entries in LogServices
- logs_uri = data["LogServices"]["@odata.id"]
- response = self.get_request(self.root_uri + logs_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- for log_svcs_entry in data.get('Members', []):
- response = self.get_request(self.root_uri + log_svcs_entry[u'@odata.id'])
- if response['ret'] is False:
- return response
- _data = response['data']
- if 'Entries' in _data:
- log_svcs_uri_list.append(_data['Entries'][u'@odata.id'])
-
- # For each entry in LogServices, get log name and all log entries
- for log_svcs_uri in log_svcs_uri_list:
- logs = {}
- list_of_log_entries = []
- response = self.get_request(self.root_uri + log_svcs_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- logs['Description'] = data.get('Description',
- 'Collection of log entries')
- # Get all log entries for each type of log found
- for logEntry in data.get('Members', []):
- entry = {}
- for prop in properties:
- if prop in logEntry:
- entry[prop] = logEntry.get(prop)
- if entry:
- list_of_log_entries.append(entry)
- log_name = log_svcs_uri.split('/')[-1]
- logs[log_name] = list_of_log_entries
- list_of_logs.append(logs)
-
- # list_of_logs[logs{list_of_log_entries[entry{}]}]
- return {'ret': True, 'entries': list_of_logs}
-
- def clear_logs(self):
- # Find LogService
- response = self.get_request(self.root_uri + self.manager_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'LogServices' not in data:
- return {'ret': False, 'msg': "LogServices resource not found"}
-
- # Find all entries in LogServices
- logs_uri = data["LogServices"]["@odata.id"]
- response = self.get_request(self.root_uri + logs_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- for log_svcs_entry in data[u'Members']:
- response = self.get_request(self.root_uri + log_svcs_entry["@odata.id"])
- if response['ret'] is False:
- return response
- _data = response['data']
- # Check to make sure option is available, otherwise error is ugly
- if "Actions" in _data:
- if "#LogService.ClearLog" in _data[u"Actions"]:
- self.post_request(self.root_uri + _data[u"Actions"]["#LogService.ClearLog"]["target"], {})
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def aggregate(self, func, uri_list, uri_name):
- ret = True
- entries = []
- for uri in uri_list:
- inventory = func(uri)
- ret = inventory.pop('ret') and ret
- if 'entries' in inventory:
- entries.append(({uri_name: uri},
- inventory['entries']))
- return dict(ret=ret, entries=entries)
-
- def aggregate_chassis(self, func):
- return self.aggregate(func, self.chassis_uris, 'chassis_uri')
-
- def aggregate_managers(self, func):
- return self.aggregate(func, self.manager_uris, 'manager_uri')
-
- def aggregate_systems(self, func):
- return self.aggregate(func, self.systems_uris, 'system_uri')
-
- def get_storage_controller_inventory(self, systems_uri):
- result = {}
- controller_list = []
- controller_results = []
- # Get these entries, but does not fail if not found
- properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers',
- 'Location', 'Manufacturer', 'Model', 'Name',
- 'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status']
- key = "StorageControllers"
-
- # Find Storage service
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- if 'Storage' not in data:
- return {'ret': False, 'msg': "Storage resource not found"}
-
- # Get a list of all storage controllers and build respective URIs
- storage_uri = data['Storage']["@odata.id"]
- response = self.get_request(self.root_uri + storage_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- # Loop through Members and their StorageControllers
- # and gather properties from each StorageController
- if data[u'Members']:
- for storage_member in data[u'Members']:
- storage_member_uri = storage_member[u'@odata.id']
- response = self.get_request(self.root_uri + storage_member_uri)
- data = response['data']
-
- if key in data:
- controller_list = data[key]
- for controller in controller_list:
- controller_result = {}
- for property in properties:
- if property in controller:
- controller_result[property] = controller[property]
- controller_results.append(controller_result)
- result['entries'] = controller_results
- return result
- else:
- return {'ret': False, 'msg': "Storage resource not found"}
-
- def get_multi_storage_controller_inventory(self):
- return self.aggregate_systems(self.get_storage_controller_inventory)
-
- def get_disk_inventory(self, systems_uri):
- result = {'entries': []}
- controller_list = []
- # Get these entries, but does not fail if not found
- properties = ['BlockSizeBytes', 'CapableSpeedGbs', 'CapacityBytes',
- 'EncryptionAbility', 'EncryptionStatus',
- 'FailurePredicted', 'HotspareType', 'Id', 'Identifiers',
- 'Manufacturer', 'MediaType', 'Model', 'Name',
- 'PartNumber', 'PhysicalLocation', 'Protocol', 'Revision',
- 'RotationSpeedRPM', 'SerialNumber', 'Status']
-
- # Find Storage service
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- if 'SimpleStorage' not in data and 'Storage' not in data:
- return {'ret': False, 'msg': "SimpleStorage and Storage resource \
- not found"}
-
- if 'Storage' in data:
- # Get a list of all storage controllers and build respective URIs
- storage_uri = data[u'Storage'][u'@odata.id']
- response = self.get_request(self.root_uri + storage_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if data[u'Members']:
- for controller in data[u'Members']:
- controller_list.append(controller[u'@odata.id'])
- for c in controller_list:
- uri = self.root_uri + c
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- data = response['data']
- controller_name = 'Controller 1'
- if 'StorageControllers' in data:
- sc = data['StorageControllers']
- if sc:
- if 'Name' in sc[0]:
- controller_name = sc[0]['Name']
- else:
- sc_id = sc[0].get('Id', '1')
- controller_name = 'Controller %s' % sc_id
- drive_results = []
- if 'Drives' in data:
- for device in data[u'Drives']:
- disk_uri = self.root_uri + device[u'@odata.id']
- response = self.get_request(disk_uri)
- data = response['data']
-
- drive_result = {}
- for property in properties:
- if property in data:
- if data[property] is not None:
- drive_result[property] = data[property]
- drive_results.append(drive_result)
- drives = {'Controller': controller_name,
- 'Drives': drive_results}
- result["entries"].append(drives)
-
- if 'SimpleStorage' in data:
- # Get a list of all storage controllers and build respective URIs
- storage_uri = data["SimpleStorage"]["@odata.id"]
- response = self.get_request(self.root_uri + storage_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for controller in data[u'Members']:
- controller_list.append(controller[u'@odata.id'])
-
- for c in controller_list:
- uri = self.root_uri + c
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'Name' in data:
- controller_name = data['Name']
- else:
- sc_id = data.get('Id', '1')
- controller_name = 'Controller %s' % sc_id
- drive_results = []
- for device in data[u'Devices']:
- drive_result = {}
- for property in properties:
- if property in device:
- drive_result[property] = device[property]
- drive_results.append(drive_result)
- drives = {'Controller': controller_name,
- 'Drives': drive_results}
- result["entries"].append(drives)
-
- return result
-
- def get_multi_disk_inventory(self):
- return self.aggregate_systems(self.get_disk_inventory)
-
- def get_volume_inventory(self, systems_uri):
- result = {'entries': []}
- controller_list = []
- volume_list = []
- # Get these entries, but does not fail if not found
- properties = ['Id', 'Name', 'RAIDType', 'VolumeType', 'BlockSizeBytes',
- 'Capacity', 'CapacityBytes', 'CapacitySources',
- 'Encrypted', 'EncryptionTypes', 'Identifiers',
- 'Operations', 'OptimumIOSizeBytes', 'AccessCapabilities',
- 'AllocatedPools', 'Status']
-
- # Find Storage service
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- if 'SimpleStorage' not in data and 'Storage' not in data:
- return {'ret': False, 'msg': "SimpleStorage and Storage resource \
- not found"}
-
- if 'Storage' in data:
- # Get a list of all storage controllers and build respective URIs
- storage_uri = data[u'Storage'][u'@odata.id']
- response = self.get_request(self.root_uri + storage_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if data.get('Members'):
- for controller in data[u'Members']:
- controller_list.append(controller[u'@odata.id'])
- for c in controller_list:
- uri = self.root_uri + c
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- data = response['data']
- controller_name = 'Controller 1'
- if 'StorageControllers' in data:
- sc = data['StorageControllers']
- if sc:
- if 'Name' in sc[0]:
- controller_name = sc[0]['Name']
- else:
- sc_id = sc[0].get('Id', '1')
- controller_name = 'Controller %s' % sc_id
- volume_results = []
- if 'Volumes' in data:
- # Get a list of all volumes and build respective URIs
- volumes_uri = data[u'Volumes'][u'@odata.id']
- response = self.get_request(self.root_uri + volumes_uri)
- data = response['data']
-
- if data.get('Members'):
- for volume in data[u'Members']:
- volume_list.append(volume[u'@odata.id'])
- for v in volume_list:
- uri = self.root_uri + v
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- volume_result = {}
- for property in properties:
- if property in data:
- if data[property] is not None:
- volume_result[property] = data[property]
-
- # Get related Drives Id
- drive_id_list = []
- if 'Links' in data:
- if 'Drives' in data[u'Links']:
- for link in data[u'Links'][u'Drives']:
- drive_id_link = link[u'@odata.id']
- drive_id = drive_id_link.split("/")[-1]
- drive_id_list.append({'Id': drive_id})
- volume_result['Linked_drives'] = drive_id_list
- volume_results.append(volume_result)
- volumes = {'Controller': controller_name,
- 'Volumes': volume_results}
- result["entries"].append(volumes)
- else:
- return {'ret': False, 'msg': "Storage resource not found"}
-
- return result
-
- def get_multi_volume_inventory(self):
- return self.aggregate_systems(self.get_volume_inventory)
-
- def restart_manager_gracefully(self):
- result = {}
- key = "Actions"
-
- # Search for 'key' entry and extract URI from it
- response = self.get_request(self.root_uri + self.manager_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- action_uri = data[key]["#Manager.Reset"]["target"]
-
- payload = {'ResetType': 'GracefulRestart'}
- response = self.post_request(self.root_uri + action_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def manage_indicator_led(self, command):
- result = {}
- key = 'IndicatorLED'
-
- payloads = {'IndicatorLedOn': 'Lit', 'IndicatorLedOff': 'Off', "IndicatorLedBlink": 'Blinking'}
-
- result = {}
- response = self.get_request(self.root_uri + self.chassis_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- if command in payloads.keys():
- payload = {'IndicatorLED': payloads[command]}
- response = self.patch_request(self.root_uri + self.chassis_uri, payload)
- if response['ret'] is False:
- return response
- else:
- return {'ret': False, 'msg': 'Invalid command'}
-
- return result
-
- def _map_reset_type(self, reset_type, allowable_values):
- equiv_types = {
- 'On': 'ForceOn',
- 'ForceOn': 'On',
- 'ForceOff': 'GracefulShutdown',
- 'GracefulShutdown': 'ForceOff',
- 'GracefulRestart': 'ForceRestart',
- 'ForceRestart': 'GracefulRestart'
- }
-
- if reset_type in allowable_values:
- return reset_type
- if reset_type not in equiv_types:
- return reset_type
- mapped_type = equiv_types[reset_type]
- if mapped_type in allowable_values:
- return mapped_type
- return reset_type
-
- def manage_system_power(self, command):
- key = "Actions"
- reset_type_values = ['On', 'ForceOff', 'GracefulShutdown',
- 'GracefulRestart', 'ForceRestart', 'Nmi',
- 'ForceOn', 'PushPowerButton', 'PowerCycle']
-
- # command should be PowerOn, PowerForceOff, etc.
- if not command.startswith('Power'):
- return {'ret': False, 'msg': 'Invalid Command (%s)' % command}
- reset_type = command[5:]
-
- # map Reboot to a ResetType that does a reboot
- if reset_type == 'Reboot':
- reset_type = 'GracefulRestart'
-
- if reset_type not in reset_type_values:
- return {'ret': False, 'msg': 'Invalid Command (%s)' % command}
-
- # read the system resource and get the current power state
- response = self.get_request(self.root_uri + self.systems_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- power_state = data.get('PowerState')
-
- # if power is already in target state, nothing to do
- if power_state == "On" and reset_type in ['On', 'ForceOn']:
- return {'ret': True, 'changed': False}
- if power_state == "Off" and reset_type in ['GracefulShutdown', 'ForceOff']:
- return {'ret': True, 'changed': False}
-
- # get the #ComputerSystem.Reset Action and target URI
- if key not in data or '#ComputerSystem.Reset' not in data[key]:
- return {'ret': False, 'msg': 'Action #ComputerSystem.Reset not found'}
- reset_action = data[key]['#ComputerSystem.Reset']
- if 'target' not in reset_action:
- return {'ret': False,
- 'msg': 'target URI missing from Action #ComputerSystem.Reset'}
- action_uri = reset_action['target']
-
- # get AllowableValues from ActionInfo
- allowable_values = None
- if '@Redfish.ActionInfo' in reset_action:
- action_info_uri = reset_action.get('@Redfish.ActionInfo')
- response = self.get_request(self.root_uri + action_info_uri)
- if response['ret'] is True:
- data = response['data']
- if 'Parameters' in data:
- params = data['Parameters']
- for param in params:
- if param.get('Name') == 'ResetType':
- allowable_values = param.get('AllowableValues')
- break
-
- # fallback to @Redfish.AllowableValues annotation
- if allowable_values is None:
- allowable_values = reset_action.get('ResetType@Redfish.AllowableValues', [])
-
- # map ResetType to an allowable value if needed
- if reset_type not in allowable_values:
- reset_type = self._map_reset_type(reset_type, allowable_values)
-
- # define payload
- payload = {'ResetType': reset_type}
-
- # POST to Action URI
- response = self.post_request(self.root_uri + action_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True}
-
- def _find_account_uri(self, username=None, acct_id=None):
- if not any((username, acct_id)):
- return {'ret': False, 'msg':
- 'Must provide either account_id or account_username'}
-
- response = self.get_request(self.root_uri + self.accounts_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- uris = [a.get('@odata.id') for a in data.get('Members', []) if
- a.get('@odata.id')]
- for uri in uris:
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- continue
- data = response['data']
- headers = response['headers']
- if username:
- if username == data.get('UserName'):
- return {'ret': True, 'data': data,
- 'headers': headers, 'uri': uri}
- if acct_id:
- if acct_id == data.get('Id'):
- return {'ret': True, 'data': data,
- 'headers': headers, 'uri': uri}
-
- return {'ret': False, 'no_match': True, 'msg':
- 'No account with the given account_id or account_username found'}
-
- def _find_empty_account_slot(self):
- response = self.get_request(self.root_uri + self.accounts_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- uris = [a.get('@odata.id') for a in data.get('Members', []) if
- a.get('@odata.id')]
- if uris:
- # first slot may be reserved, so move to end of list
- uris += [uris.pop(0)]
- for uri in uris:
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- continue
- data = response['data']
- headers = response['headers']
- if data.get('UserName') == "" and not data.get('Enabled', True):
- return {'ret': True, 'data': data,
- 'headers': headers, 'uri': uri}
-
- return {'ret': False, 'no_match': True, 'msg':
- 'No empty account slot found'}
-
- def list_users(self):
- result = {}
- # listing all users has always been slower than other operations, why?
- user_list = []
- users_results = []
- # Get these entries, but does not fail if not found
- properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled']
-
- response = self.get_request(self.root_uri + self.accounts_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for users in data.get('Members', []):
- user_list.append(users[u'@odata.id']) # user_list[] are URIs
-
- # for each user, get details
- for uri in user_list:
- user = {}
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- for property in properties:
- if property in data:
- user[property] = data[property]
-
- users_results.append(user)
- result["entries"] = users_results
- return result
-
- def add_user_via_patch(self, user):
- if user.get('account_id'):
- # If Id slot specified, use it
- response = self._find_account_uri(acct_id=user.get('account_id'))
- else:
- # Otherwise find first empty slot
- response = self._find_empty_account_slot()
-
- if not response['ret']:
- return response
- uri = response['uri']
- payload = {}
- if user.get('account_username'):
- payload['UserName'] = user.get('account_username')
- if user.get('account_password'):
- payload['Password'] = user.get('account_password')
- if user.get('account_roleid'):
- payload['RoleId'] = user.get('account_roleid')
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def add_user(self, user):
- if not user.get('account_username'):
- return {'ret': False, 'msg':
- 'Must provide account_username for AddUser command'}
-
- response = self._find_account_uri(username=user.get('account_username'))
- if response['ret']:
- # account_username already exists, nothing to do
- return {'ret': True, 'changed': False}
-
- response = self.get_request(self.root_uri + self.accounts_uri)
- if not response['ret']:
- return response
- headers = response['headers']
-
- if 'allow' in headers:
- methods = [m.strip() for m in headers.get('allow').split(',')]
- if 'POST' not in methods:
- # if Allow header present and POST not listed, add via PATCH
- return self.add_user_via_patch(user)
-
- payload = {}
- if user.get('account_username'):
- payload['UserName'] = user.get('account_username')
- if user.get('account_password'):
- payload['Password'] = user.get('account_password')
- if user.get('account_roleid'):
- payload['RoleId'] = user.get('account_roleid')
-
- response = self.post_request(self.root_uri + self.accounts_uri, payload)
- if not response['ret']:
- if response.get('status') == 405:
- # if POST returned a 405, try to add via PATCH
- return self.add_user_via_patch(user)
- else:
- return response
- return {'ret': True}
-
- def enable_user(self, user):
- response = self._find_account_uri(username=user.get('account_username'),
- acct_id=user.get('account_id'))
- if not response['ret']:
- return response
- uri = response['uri']
- data = response['data']
-
- if data.get('Enabled', True):
- # account already enabled, nothing to do
- return {'ret': True, 'changed': False}
-
- payload = {'Enabled': True}
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def delete_user_via_patch(self, user, uri=None, data=None):
- if not uri:
- response = self._find_account_uri(username=user.get('account_username'),
- acct_id=user.get('account_id'))
- if not response['ret']:
- return response
- uri = response['uri']
- data = response['data']
-
- if data and data.get('UserName') == '' and not data.get('Enabled', False):
- # account UserName already cleared, nothing to do
- return {'ret': True, 'changed': False}
-
- payload = {'UserName': ''}
- if data.get('Enabled', False):
- payload['Enabled'] = False
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def delete_user(self, user):
- response = self._find_account_uri(username=user.get('account_username'),
- acct_id=user.get('account_id'))
- if not response['ret']:
- if response.get('no_match'):
- # account does not exist, nothing to do
- return {'ret': True, 'changed': False}
- else:
- # some error encountered
- return response
-
- uri = response['uri']
- headers = response['headers']
- data = response['data']
-
- if 'allow' in headers:
- methods = [m.strip() for m in headers.get('allow').split(',')]
- if 'DELETE' not in methods:
- # if Allow header present and DELETE not listed, del via PATCH
- return self.delete_user_via_patch(user, uri=uri, data=data)
-
- response = self.delete_request(self.root_uri + uri)
- if not response['ret']:
- if response.get('status') == 405:
- # if DELETE returned a 405, try to delete via PATCH
- return self.delete_user_via_patch(user, uri=uri, data=data)
- else:
- return response
- return {'ret': True}
-
- def disable_user(self, user):
- response = self._find_account_uri(username=user.get('account_username'),
- acct_id=user.get('account_id'))
- if not response['ret']:
- return response
- uri = response['uri']
- data = response['data']
-
- if not data.get('Enabled'):
- # account already disabled, nothing to do
- return {'ret': True, 'changed': False}
-
- payload = {'Enabled': False}
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def update_user_role(self, user):
- if not user.get('account_roleid'):
- return {'ret': False, 'msg':
- 'Must provide account_roleid for UpdateUserRole command'}
-
- response = self._find_account_uri(username=user.get('account_username'),
- acct_id=user.get('account_id'))
- if not response['ret']:
- return response
- uri = response['uri']
- data = response['data']
-
- if data.get('RoleId') == user.get('account_roleid'):
- # account already has RoleId , nothing to do
- return {'ret': True, 'changed': False}
-
- payload = {'RoleId': user.get('account_roleid')}
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def update_user_password(self, user):
- response = self._find_account_uri(username=user.get('account_username'),
- acct_id=user.get('account_id'))
- if not response['ret']:
- return response
- uri = response['uri']
- payload = {'Password': user['account_password']}
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def update_user_name(self, user):
- if not user.get('account_updatename'):
- return {'ret': False, 'msg':
- 'Must provide account_updatename for UpdateUserName command'}
-
- response = self._find_account_uri(username=user.get('account_username'),
- acct_id=user.get('account_id'))
- if not response['ret']:
- return response
- uri = response['uri']
- payload = {'UserName': user['account_updatename']}
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def update_accountservice_properties(self, user):
- if user.get('account_properties') is None:
- return {'ret': False, 'msg':
- 'Must provide account_properties for UpdateAccountServiceProperties command'}
- account_properties = user.get('account_properties')
-
- # Find AccountService
- response = self.get_request(self.root_uri + self.service_root)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'AccountService' not in data:
- return {'ret': False, 'msg': "AccountService resource not found"}
- accountservice_uri = data["AccountService"]["@odata.id"]
-
- # Check support or not
- response = self.get_request(self.root_uri + accountservice_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- for property_name in account_properties.keys():
- if property_name not in data:
- return {'ret': False, 'msg':
- 'property %s not supported' % property_name}
-
- # if properties is already matched, nothing to do
- need_change = False
- for property_name in account_properties.keys():
- if account_properties[property_name] != data[property_name]:
- need_change = True
- break
-
- if not need_change:
- return {'ret': True, 'changed': False, 'msg': "AccountService properties already set"}
-
- payload = account_properties
- response = self.patch_request(self.root_uri + accountservice_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True, 'msg': "Modified AccountService properties"}
-
- def get_sessions(self):
- result = {}
- # listing all users has always been slower than other operations, why?
- session_list = []
- sessions_results = []
- # Get these entries, but does not fail if not found
- properties = ['Description', 'Id', 'Name', 'UserName']
-
- response = self.get_request(self.root_uri + self.sessions_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for sessions in data[u'Members']:
- session_list.append(sessions[u'@odata.id']) # session_list[] are URIs
-
- # for each session, get details
- for uri in session_list:
- session = {}
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- for property in properties:
- if property in data:
- session[property] = data[property]
-
- sessions_results.append(session)
- result["entries"] = sessions_results
- return result
-
- def clear_sessions(self):
- response = self.get_request(self.root_uri + self.sessions_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- # if no active sessions, return as success
- if data['Members@odata.count'] == 0:
- return {'ret': True, 'changed': False, 'msg': "There is no active sessions"}
-
- # loop to delete every active session
- for session in data[u'Members']:
- response = self.delete_request(self.root_uri + session[u'@odata.id'])
- if response['ret'] is False:
- return response
-
- return {'ret': True, 'changed': True, 'msg': "Clear all sessions successfully"}
-
- def get_firmware_update_capabilities(self):
- result = {}
- response = self.get_request(self.root_uri + self.update_uri)
- if response['ret'] is False:
- return response
-
- result['ret'] = True
-
- result['entries'] = {}
-
- data = response['data']
-
- if "Actions" in data:
- actions = data['Actions']
- if len(actions) > 0:
- for key in actions.keys():
- action = actions.get(key)
- if 'title' in action:
- title = action['title']
- else:
- title = key
- result['entries'][title] = action.get('TransferProtocol@Redfish.AllowableValues',
- ["Key TransferProtocol@Redfish.AllowableValues not found"])
- else:
- return {'ret': "False", 'msg': "Actions list is empty."}
- else:
- return {'ret': "False", 'msg': "Key Actions not found."}
- return result
-
- def _software_inventory(self, uri):
- result = {}
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- result['entries'] = []
- for member in data[u'Members']:
- uri = self.root_uri + member[u'@odata.id']
- # Get details for each software or firmware member
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- software = {}
- # Get these standard properties if present
- for key in ['Name', 'Id', 'Status', 'Version', 'Updateable',
- 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer',
- 'ReleaseDate']:
- if key in data:
- software[key] = data.get(key)
- result['entries'].append(software)
- return result
-
- def get_firmware_inventory(self):
- if self.firmware_uri is None:
- return {'ret': False, 'msg': 'No FirmwareInventory resource found'}
- else:
- return self._software_inventory(self.firmware_uri)
-
- def get_software_inventory(self):
- if self.software_uri is None:
- return {'ret': False, 'msg': 'No SoftwareInventory resource found'}
- else:
- return self._software_inventory(self.software_uri)
-
- def _get_allowable_values(self, action, name, default_values=None):
- if default_values is None:
- default_values = []
- allowable_values = None
- # get Allowable values from ActionInfo
- if '@Redfish.ActionInfo' in action:
- action_info_uri = action.get('@Redfish.ActionInfo')
- response = self.get_request(self.root_uri + action_info_uri)
- if response['ret'] is True:
- data = response['data']
- if 'Parameters' in data:
- params = data['Parameters']
- for param in params:
- if param.get('Name') == name:
- allowable_values = param.get('AllowableValues')
- break
- # fallback to @Redfish.AllowableValues annotation
- if allowable_values is None:
- prop = '%s@Redfish.AllowableValues' % name
- if prop in action:
- allowable_values = action[prop]
- # fallback to default values
- if allowable_values is None:
- allowable_values = default_values
- return allowable_values
-
- def simple_update(self, update_opts):
- image_uri = update_opts.get('update_image_uri')
- protocol = update_opts.get('update_protocol')
- targets = update_opts.get('update_targets')
- creds = update_opts.get('update_creds')
-
- if not image_uri:
- return {'ret': False, 'msg':
- 'Must specify update_image_uri for the SimpleUpdate command'}
-
- response = self.get_request(self.root_uri + self.update_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'Actions' not in data:
- return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
- if '#UpdateService.SimpleUpdate' not in data['Actions']:
- return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
- action = data['Actions']['#UpdateService.SimpleUpdate']
- if 'target' not in action:
- return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
- update_uri = action['target']
- if protocol:
- default_values = ['CIFS', 'FTP', 'SFTP', 'HTTP', 'HTTPS', 'NSF',
- 'SCP', 'TFTP', 'OEM', 'NFS']
- allowable_values = self._get_allowable_values(action,
- 'TransferProtocol',
- default_values)
- if protocol not in allowable_values:
- return {'ret': False,
- 'msg': 'Specified update_protocol (%s) not supported '
- 'by service. Supported protocols: %s' %
- (protocol, allowable_values)}
- if targets:
- allowable_values = self._get_allowable_values(action, 'Targets')
- if allowable_values:
- for target in targets:
- if target not in allowable_values:
- return {'ret': False,
- 'msg': 'Specified target (%s) not supported '
- 'by service. Supported targets: %s' %
- (target, allowable_values)}
-
- payload = {
- 'ImageURI': image_uri
- }
- if protocol:
- payload["TransferProtocol"] = protocol
- if targets:
- payload["Targets"] = targets
- if creds:
- if creds.get('username'):
- payload["Username"] = creds.get('username')
- if creds.get('password'):
- payload["Password"] = creds.get('password')
- response = self.post_request(self.root_uri + update_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True,
- 'msg': "SimpleUpdate requested"}
-
- def get_bios_attributes(self, systems_uri):
- result = {}
- bios_attributes = {}
- key = "Bios"
-
- # Search for 'key' entry and extract URI from it
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- bios_uri = data[key]["@odata.id"]
-
- response = self.get_request(self.root_uri + bios_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- for attribute in data[u'Attributes'].items():
- bios_attributes[attribute[0]] = attribute[1]
- result["entries"] = bios_attributes
- return result
-
- def get_multi_bios_attributes(self):
- return self.aggregate_systems(self.get_bios_attributes)
-
- def _get_boot_options_dict(self, boot):
- # Get these entries from BootOption, if present
- properties = ['DisplayName', 'BootOptionReference']
-
- # Retrieve BootOptions if present
- if 'BootOptions' in boot and '@odata.id' in boot['BootOptions']:
- boot_options_uri = boot['BootOptions']["@odata.id"]
- # Get BootOptions resource
- response = self.get_request(self.root_uri + boot_options_uri)
- if response['ret'] is False:
- return {}
- data = response['data']
-
- # Retrieve Members array
- if 'Members' not in data:
- return {}
- members = data['Members']
- else:
- members = []
-
- # Build dict of BootOptions keyed by BootOptionReference
- boot_options_dict = {}
- for member in members:
- if '@odata.id' not in member:
- return {}
- boot_option_uri = member['@odata.id']
- response = self.get_request(self.root_uri + boot_option_uri)
- if response['ret'] is False:
- return {}
- data = response['data']
- if 'BootOptionReference' not in data:
- return {}
- boot_option_ref = data['BootOptionReference']
-
- # fetch the props to display for this boot device
- boot_props = {}
- for prop in properties:
- if prop in data:
- boot_props[prop] = data[prop]
-
- boot_options_dict[boot_option_ref] = boot_props
-
- return boot_options_dict
-
- def get_boot_order(self, systems_uri):
- result = {}
-
- # Retrieve System resource
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- # Confirm needed Boot properties are present
- if 'Boot' not in data or 'BootOrder' not in data['Boot']:
- return {'ret': False, 'msg': "Key BootOrder not found"}
-
- boot = data['Boot']
- boot_order = boot['BootOrder']
- boot_options_dict = self._get_boot_options_dict(boot)
-
- # Build boot device list
- boot_device_list = []
- for ref in boot_order:
- boot_device_list.append(
- boot_options_dict.get(ref, {'BootOptionReference': ref}))
-
- result["entries"] = boot_device_list
- return result
-
- def get_multi_boot_order(self):
- return self.aggregate_systems(self.get_boot_order)
-
- def get_boot_override(self, systems_uri):
- result = {}
-
- properties = ["BootSourceOverrideEnabled", "BootSourceOverrideTarget",
- "BootSourceOverrideMode", "UefiTargetBootSourceOverride", "BootSourceOverrideTarget@Redfish.AllowableValues"]
-
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if 'Boot' not in data:
- return {'ret': False, 'msg': "Key Boot not found"}
-
- boot = data['Boot']
-
- boot_overrides = {}
- if "BootSourceOverrideEnabled" in boot:
- if boot["BootSourceOverrideEnabled"] is not False:
- for property in properties:
- if property in boot:
- if boot[property] is not None:
- boot_overrides[property] = boot[property]
- else:
- return {'ret': False, 'msg': "No boot override is enabled."}
-
- result['entries'] = boot_overrides
- return result
-
- def get_multi_boot_override(self):
- return self.aggregate_systems(self.get_boot_override)
-
- def set_bios_default_settings(self):
- result = {}
- key = "Bios"
-
- # Search for 'key' entry and extract URI from it
- response = self.get_request(self.root_uri + self.systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- bios_uri = data[key]["@odata.id"]
-
- # Extract proper URI
- response = self.get_request(self.root_uri + bios_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- reset_bios_settings_uri = data["Actions"]["#Bios.ResetBios"]["target"]
-
- response = self.post_request(self.root_uri + reset_bios_settings_uri, {})
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True, 'msg': "Set BIOS to default settings"}
-
- def set_one_time_boot_device(self, bootdevice, uefi_target, boot_next):
- result = {}
- key = "Boot"
-
- if not bootdevice:
- return {'ret': False,
- 'msg': "bootdevice option required for SetOneTimeBoot"}
-
- # Search for 'key' entry and extract URI from it
- response = self.get_request(self.root_uri + self.systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- boot = data[key]
-
- annotation = 'BootSourceOverrideTarget@Redfish.AllowableValues'
- if annotation in boot:
- allowable_values = boot[annotation]
- if isinstance(allowable_values, list) and bootdevice not in allowable_values:
- return {'ret': False,
- 'msg': "Boot device %s not in list of allowable values (%s)" %
- (bootdevice, allowable_values)}
-
- # read existing values
- enabled = boot.get('BootSourceOverrideEnabled')
- target = boot.get('BootSourceOverrideTarget')
- cur_uefi_target = boot.get('UefiTargetBootSourceOverride')
- cur_boot_next = boot.get('BootNext')
-
- if bootdevice == 'UefiTarget':
- if not uefi_target:
- return {'ret': False,
- 'msg': "uefi_target option required to SetOneTimeBoot for UefiTarget"}
- if enabled == 'Once' and target == bootdevice and uefi_target == cur_uefi_target:
- # If properties are already set, no changes needed
- return {'ret': True, 'changed': False}
- payload = {
- 'Boot': {
- 'BootSourceOverrideEnabled': 'Once',
- 'BootSourceOverrideTarget': bootdevice,
- 'UefiTargetBootSourceOverride': uefi_target
- }
- }
- elif bootdevice == 'UefiBootNext':
- if not boot_next:
- return {'ret': False,
- 'msg': "boot_next option required to SetOneTimeBoot for UefiBootNext"}
- if enabled == 'Once' and target == bootdevice and boot_next == cur_boot_next:
- # If properties are already set, no changes needed
- return {'ret': True, 'changed': False}
- payload = {
- 'Boot': {
- 'BootSourceOverrideEnabled': 'Once',
- 'BootSourceOverrideTarget': bootdevice,
- 'BootNext': boot_next
- }
- }
- else:
- if enabled == 'Once' and target == bootdevice:
- # If properties are already set, no changes needed
- return {'ret': True, 'changed': False}
- payload = {
- 'Boot': {
- 'BootSourceOverrideEnabled': 'Once',
- 'BootSourceOverrideTarget': bootdevice
- }
- }
-
- response = self.patch_request(self.root_uri + self.systems_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True}
-
- def set_bios_attributes(self, attributes):
- result = {}
- key = "Bios"
-
- # Search for 'key' entry and extract URI from it
- response = self.get_request(self.root_uri + self.systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- bios_uri = data[key]["@odata.id"]
-
- # Extract proper URI
- response = self.get_request(self.root_uri + bios_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- # Make a copy of the attributes dict
- attrs_to_patch = dict(attributes)
-
- # Check the attributes
- for attr in attributes:
- if attr not in data[u'Attributes']:
- return {'ret': False, 'msg': "BIOS attribute %s not found" % attr}
- # If already set to requested value, remove it from PATCH payload
- if data[u'Attributes'][attr] == attributes[attr]:
- del attrs_to_patch[attr]
-
- # Return success w/ changed=False if no attrs need to be changed
- if not attrs_to_patch:
- return {'ret': True, 'changed': False,
- 'msg': "BIOS attributes already set"}
-
- # Get the SettingsObject URI
- set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"]["@odata.id"]
-
- # Construct payload and issue PATCH command
- payload = {"Attributes": attrs_to_patch}
- response = self.patch_request(self.root_uri + set_bios_attr_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True, 'msg': "Modified BIOS attribute"}
-
- def set_boot_order(self, boot_list):
- if not boot_list:
- return {'ret': False,
- 'msg': "boot_order list required for SetBootOrder command"}
-
- systems_uri = self.systems_uri
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- # Confirm needed Boot properties are present
- if 'Boot' not in data or 'BootOrder' not in data['Boot']:
- return {'ret': False, 'msg': "Key BootOrder not found"}
-
- boot = data['Boot']
- boot_order = boot['BootOrder']
- boot_options_dict = self._get_boot_options_dict(boot)
-
- # validate boot_list against BootOptionReferences if available
- if boot_options_dict:
- boot_option_references = boot_options_dict.keys()
- for ref in boot_list:
- if ref not in boot_option_references:
- return {'ret': False,
- 'msg': "BootOptionReference %s not found in BootOptions" % ref}
-
- # If requested BootOrder is already set, nothing to do
- if boot_order == boot_list:
- return {'ret': True, 'changed': False,
- 'msg': "BootOrder already set to %s" % boot_list}
-
- payload = {
- 'Boot': {
- 'BootOrder': boot_list
- }
- }
- response = self.patch_request(self.root_uri + systems_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True, 'msg': "BootOrder set"}
-
- def set_default_boot_order(self):
- systems_uri = self.systems_uri
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- # get the #ComputerSystem.SetDefaultBootOrder Action and target URI
- action = '#ComputerSystem.SetDefaultBootOrder'
- if 'Actions' not in data or action not in data['Actions']:
- return {'ret': False, 'msg': 'Action %s not found' % action}
- if 'target' not in data['Actions'][action]:
- return {'ret': False,
- 'msg': 'target URI missing from Action %s' % action}
- action_uri = data['Actions'][action]['target']
-
- # POST to Action URI
- payload = {}
- response = self.post_request(self.root_uri + action_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True,
- 'msg': "BootOrder set to default"}
-
- def get_chassis_inventory(self):
- result = {}
- chassis_results = []
-
- # Get these entries, but does not fail if not found
- properties = ['ChassisType', 'PartNumber', 'AssetTag',
- 'Manufacturer', 'IndicatorLED', 'SerialNumber', 'Model']
-
- # Go through list
- for chassis_uri in self.chassis_uris:
- response = self.get_request(self.root_uri + chassis_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- chassis_result = {}
- for property in properties:
- if property in data:
- chassis_result[property] = data[property]
- chassis_results.append(chassis_result)
-
- result["entries"] = chassis_results
- return result
-
- def get_fan_inventory(self):
- result = {}
- fan_results = []
- key = "Thermal"
- # Get these entries, but does not fail if not found
- properties = ['FanName', 'Reading', 'ReadingUnits', 'Status']
-
- # Go through list
- for chassis_uri in self.chassis_uris:
- response = self.get_request(self.root_uri + chassis_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- if key in data:
- # match: found an entry for "Thermal" information = fans
- thermal_uri = data[key]["@odata.id"]
- response = self.get_request(self.root_uri + thermal_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for device in data[u'Fans']:
- fan = {}
- for property in properties:
- if property in device:
- fan[property] = device[property]
- fan_results.append(fan)
- result["entries"] = fan_results
- return result
-
- def get_chassis_power(self):
- result = {}
- key = "Power"
-
- # Get these entries, but does not fail if not found
- properties = ['Name', 'PowerAllocatedWatts',
- 'PowerAvailableWatts', 'PowerCapacityWatts',
- 'PowerConsumedWatts', 'PowerMetrics',
- 'PowerRequestedWatts', 'RelatedItem', 'Status']
-
- chassis_power_results = []
- # Go through list
- for chassis_uri in self.chassis_uris:
- chassis_power_result = {}
- response = self.get_request(self.root_uri + chassis_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- if key in data:
- response = self.get_request(self.root_uri + data[key]['@odata.id'])
- data = response['data']
- if 'PowerControl' in data:
- if len(data['PowerControl']) > 0:
- data = data['PowerControl'][0]
- for property in properties:
- if property in data:
- chassis_power_result[property] = data[property]
- else:
- return {'ret': False, 'msg': 'Key PowerControl not found.'}
- chassis_power_results.append(chassis_power_result)
- else:
- return {'ret': False, 'msg': 'Key Power not found.'}
-
- result['entries'] = chassis_power_results
- return result
-
- def get_chassis_thermals(self):
- result = {}
- sensors = []
- key = "Thermal"
-
- # Get these entries, but does not fail if not found
- properties = ['Name', 'PhysicalContext', 'UpperThresholdCritical',
- 'UpperThresholdFatal', 'UpperThresholdNonCritical',
- 'LowerThresholdCritical', 'LowerThresholdFatal',
- 'LowerThresholdNonCritical', 'MaxReadingRangeTemp',
- 'MinReadingRangeTemp', 'ReadingCelsius', 'RelatedItem',
- 'SensorNumber']
-
- # Go through list
- for chassis_uri in self.chassis_uris:
- response = self.get_request(self.root_uri + chassis_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- if key in data:
- thermal_uri = data[key]["@odata.id"]
- response = self.get_request(self.root_uri + thermal_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- if "Temperatures" in data:
- for sensor in data[u'Temperatures']:
- sensor_result = {}
- for property in properties:
- if property in sensor:
- if sensor[property] is not None:
- sensor_result[property] = sensor[property]
- sensors.append(sensor_result)
-
- if sensors is None:
- return {'ret': False, 'msg': 'Key Temperatures was not found.'}
-
- result['entries'] = sensors
- return result
-
- def get_cpu_inventory(self, systems_uri):
- result = {}
- cpu_list = []
- cpu_results = []
- key = "Processors"
- # Get these entries, but does not fail if not found
- properties = ['Id', 'Manufacturer', 'Model', 'MaxSpeedMHz', 'TotalCores',
- 'TotalThreads', 'Status']
-
- # Search for 'key' entry and extract URI from it
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- processors_uri = data[key]["@odata.id"]
-
- # Get a list of all CPUs and build respective URIs
- response = self.get_request(self.root_uri + processors_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for cpu in data[u'Members']:
- cpu_list.append(cpu[u'@odata.id'])
-
- for c in cpu_list:
- cpu = {}
- uri = self.root_uri + c
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- for property in properties:
- if property in data:
- cpu[property] = data[property]
-
- cpu_results.append(cpu)
- result["entries"] = cpu_results
- return result
-
- def get_multi_cpu_inventory(self):
- return self.aggregate_systems(self.get_cpu_inventory)
-
- def get_memory_inventory(self, systems_uri):
- result = {}
- memory_list = []
- memory_results = []
- key = "Memory"
- # Get these entries, but does not fail if not found
- properties = ['SerialNumber', 'MemoryDeviceType', 'PartNuber',
- 'MemoryLocation', 'RankCount', 'CapacityMiB', 'OperatingMemoryModes', 'Status', 'Manufacturer', 'Name']
-
- # Search for 'key' entry and extract URI from it
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- memory_uri = data[key]["@odata.id"]
-
- # Get a list of all DIMMs and build respective URIs
- response = self.get_request(self.root_uri + memory_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for dimm in data[u'Members']:
- memory_list.append(dimm[u'@odata.id'])
-
- for m in memory_list:
- dimm = {}
- uri = self.root_uri + m
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- if "Status" in data:
- if "State" in data["Status"]:
- if data["Status"]["State"] == "Absent":
- continue
- else:
- continue
-
- for property in properties:
- if property in data:
- dimm[property] = data[property]
-
- memory_results.append(dimm)
- result["entries"] = memory_results
- return result
-
- def get_multi_memory_inventory(self):
- return self.aggregate_systems(self.get_memory_inventory)
-
- def get_nic_inventory(self, resource_uri):
- result = {}
- nic_list = []
- nic_results = []
- key = "EthernetInterfaces"
- # Get these entries, but does not fail if not found
- properties = ['Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses',
- 'NameServers', 'MACAddress', 'PermanentMACAddress',
- 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status']
-
- response = self.get_request(self.root_uri + resource_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- ethernetinterfaces_uri = data[key]["@odata.id"]
-
- # Get a list of all network controllers and build respective URIs
- response = self.get_request(self.root_uri + ethernetinterfaces_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for nic in data[u'Members']:
- nic_list.append(nic[u'@odata.id'])
-
- for n in nic_list:
- nic = {}
- uri = self.root_uri + n
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- for property in properties:
- if property in data:
- nic[property] = data[property]
-
- nic_results.append(nic)
- result["entries"] = nic_results
- return result
-
- def get_multi_nic_inventory(self, resource_type):
- ret = True
- entries = []
-
- # Given resource_type, use the proper URI
- if resource_type == 'Systems':
- resource_uris = self.systems_uris
- elif resource_type == 'Manager':
- resource_uris = self.manager_uris
-
- for resource_uri in resource_uris:
- inventory = self.get_nic_inventory(resource_uri)
- ret = inventory.pop('ret') and ret
- if 'entries' in inventory:
- entries.append(({'resource_uri': resource_uri},
- inventory['entries']))
- return dict(ret=ret, entries=entries)
-
- def get_virtualmedia(self, resource_uri):
- result = {}
- virtualmedia_list = []
- virtualmedia_results = []
- key = "VirtualMedia"
- # Get these entries, but does not fail if not found
- properties = ['Description', 'ConnectedVia', 'Id', 'MediaTypes',
- 'Image', 'ImageName', 'Name', 'WriteProtected',
- 'TransferMethod', 'TransferProtocolType']
-
- response = self.get_request(self.root_uri + resource_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- virtualmedia_uri = data[key]["@odata.id"]
-
- # Get a list of all virtual media and build respective URIs
- response = self.get_request(self.root_uri + virtualmedia_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for virtualmedia in data[u'Members']:
- virtualmedia_list.append(virtualmedia[u'@odata.id'])
-
- for n in virtualmedia_list:
- virtualmedia = {}
- uri = self.root_uri + n
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- for property in properties:
- if property in data:
- virtualmedia[property] = data[property]
-
- virtualmedia_results.append(virtualmedia)
- result["entries"] = virtualmedia_results
- return result
-
- def get_multi_virtualmedia(self):
- ret = True
- entries = []
-
- resource_uris = self.manager_uris
-
- for resource_uri in resource_uris:
- virtualmedia = self.get_virtualmedia(resource_uri)
- ret = virtualmedia.pop('ret') and ret
- if 'entries' in virtualmedia:
- entries.append(({'resource_uri': resource_uri},
- virtualmedia['entries']))
- return dict(ret=ret, entries=entries)
-
- def get_psu_inventory(self):
- result = {}
- psu_list = []
- psu_results = []
- key = "PowerSupplies"
- # Get these entries, but does not fail if not found
- properties = ['Name', 'Model', 'SerialNumber', 'PartNumber', 'Manufacturer',
- 'FirmwareVersion', 'PowerCapacityWatts', 'PowerSupplyType',
- 'Status']
-
- # Get a list of all Chassis and build URIs, then get all PowerSupplies
- # from each Power entry in the Chassis
- chassis_uri_list = self.chassis_uris
- for chassis_uri in chassis_uri_list:
- response = self.get_request(self.root_uri + chassis_uri)
- if response['ret'] is False:
- return response
-
- result['ret'] = True
- data = response['data']
-
- if 'Power' in data:
- power_uri = data[u'Power'][u'@odata.id']
- else:
- continue
-
- response = self.get_request(self.root_uri + power_uri)
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- psu_list = data[key]
- for psu in psu_list:
- psu_not_present = False
- psu_data = {}
- for property in properties:
- if property in psu:
- if psu[property] is not None:
- if property == 'Status':
- if 'State' in psu[property]:
- if psu[property]['State'] == 'Absent':
- psu_not_present = True
- psu_data[property] = psu[property]
- if psu_not_present:
- continue
- psu_results.append(psu_data)
-
- result["entries"] = psu_results
- if not result["entries"]:
- return {'ret': False, 'msg': "No PowerSupply objects found"}
- return result
-
- def get_multi_psu_inventory(self):
- return self.aggregate_systems(self.get_psu_inventory)
-
- def get_system_inventory(self, systems_uri):
- result = {}
- inventory = {}
- # Get these entries, but does not fail if not found
- properties = ['Status', 'HostName', 'PowerState', 'Model', 'Manufacturer',
- 'PartNumber', 'SystemType', 'AssetTag', 'ServiceTag',
- 'SerialNumber', 'SKU', 'BiosVersion', 'MemorySummary',
- 'ProcessorSummary', 'TrustedModules']
-
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for property in properties:
- if property in data:
- inventory[property] = data[property]
-
- result["entries"] = inventory
- return result
-
- def get_multi_system_inventory(self):
- return self.aggregate_systems(self.get_system_inventory)
-
- def get_network_protocols(self):
- result = {}
- service_result = {}
- # Find NetworkProtocol
- response = self.get_request(self.root_uri + self.manager_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'NetworkProtocol' not in data:
- return {'ret': False, 'msg': "NetworkProtocol resource not found"}
- networkprotocol_uri = data["NetworkProtocol"]["@odata.id"]
-
- response = self.get_request(self.root_uri + networkprotocol_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH',
- 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP',
- 'RFB']
- for protocol_service in protocol_services:
- if protocol_service in data.keys():
- service_result[protocol_service] = data[protocol_service]
-
- result['ret'] = True
- result["entries"] = service_result
- return result
-
- def set_network_protocols(self, manager_services):
- # Check input data validity
- protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH',
- 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP',
- 'RFB']
- protocol_state_onlist = ['true', 'True', True, 'on', 1]
- protocol_state_offlist = ['false', 'False', False, 'off', 0]
- payload = {}
- for service_name in manager_services.keys():
- if service_name not in protocol_services:
- return {'ret': False, 'msg': "Service name %s is invalid" % service_name}
- payload[service_name] = {}
- for service_property in manager_services[service_name].keys():
- value = manager_services[service_name][service_property]
- if service_property in ['ProtocolEnabled', 'protocolenabled']:
- if value in protocol_state_onlist:
- payload[service_name]['ProtocolEnabled'] = True
- elif value in protocol_state_offlist:
- payload[service_name]['ProtocolEnabled'] = False
- else:
- return {'ret': False, 'msg': "Value of property %s is invalid" % service_property}
- elif service_property in ['port', 'Port']:
- if isinstance(value, int):
- payload[service_name]['Port'] = value
- elif isinstance(value, str) and value.isdigit():
- payload[service_name]['Port'] = int(value)
- else:
- return {'ret': False, 'msg': "Value of property %s is invalid" % service_property}
- else:
- payload[service_name][service_property] = value
-
- # Find NetworkProtocol
- response = self.get_request(self.root_uri + self.manager_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'NetworkProtocol' not in data:
- return {'ret': False, 'msg': "NetworkProtocol resource not found"}
- networkprotocol_uri = data["NetworkProtocol"]["@odata.id"]
-
- # Check service property support or not
- response = self.get_request(self.root_uri + networkprotocol_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- for service_name in payload.keys():
- if service_name not in data:
- return {'ret': False, 'msg': "%s service not supported" % service_name}
- for service_property in payload[service_name].keys():
- if service_property not in data[service_name]:
- return {'ret': False, 'msg': "%s property for %s service not supported" % (service_property, service_name)}
-
- # if the protocol is already set, nothing to do
- need_change = False
- for service_name in payload.keys():
- for service_property in payload[service_name].keys():
- value = payload[service_name][service_property]
- if value != data[service_name][service_property]:
- need_change = True
- break
-
- if not need_change:
- return {'ret': True, 'changed': False, 'msg': "Manager NetworkProtocol services already set"}
-
- response = self.patch_request(self.root_uri + networkprotocol_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True, 'msg': "Modified Manager NetworkProtocol services"}
-
- @staticmethod
- def to_singular(resource_name):
- if resource_name.endswith('ies'):
- resource_name = resource_name[:-3] + 'y'
- elif resource_name.endswith('s'):
- resource_name = resource_name[:-1]
- return resource_name
-
- def get_health_resource(self, subsystem, uri, health, expanded):
- status = 'Status'
-
- if expanded:
- d = expanded
- else:
- r = self.get_request(self.root_uri + uri)
- if r.get('ret'):
- d = r.get('data')
- else:
- return
-
- if 'Members' in d: # collections case
- for m in d.get('Members'):
- u = m.get('@odata.id')
- r = self.get_request(self.root_uri + u)
- if r.get('ret'):
- p = r.get('data')
- if p:
- e = {self.to_singular(subsystem.lower()) + '_uri': u,
- status: p.get(status,
- "Status not available")}
- health[subsystem].append(e)
- else: # non-collections case
- e = {self.to_singular(subsystem.lower()) + '_uri': uri,
- status: d.get(status,
- "Status not available")}
- health[subsystem].append(e)
-
- def get_health_subsystem(self, subsystem, data, health):
- if subsystem in data:
- sub = data.get(subsystem)
- if isinstance(sub, list):
- for r in sub:
- if '@odata.id' in r:
- uri = r.get('@odata.id')
- expanded = None
- if '#' in uri and len(r) > 1:
- expanded = r
- self.get_health_resource(subsystem, uri, health, expanded)
- elif isinstance(sub, dict):
- if '@odata.id' in sub:
- uri = sub.get('@odata.id')
- self.get_health_resource(subsystem, uri, health, None)
- elif 'Members' in data:
- for m in data.get('Members'):
- u = m.get('@odata.id')
- r = self.get_request(self.root_uri + u)
- if r.get('ret'):
- d = r.get('data')
- self.get_health_subsystem(subsystem, d, health)
-
- def get_health_report(self, category, uri, subsystems):
- result = {}
- health = {}
- status = 'Status'
-
- # Get health status of top level resource
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- health[category] = {status: data.get(status, "Status not available")}
-
- # Get health status of subsystems
- for sub in subsystems:
- d = None
- if sub.startswith('Links.'): # ex: Links.PCIeDevices
- sub = sub[len('Links.'):]
- d = data.get('Links', {})
- elif '.' in sub: # ex: Thermal.Fans
- p, sub = sub.split('.')
- u = data.get(p, {}).get('@odata.id')
- if u:
- r = self.get_request(self.root_uri + u)
- if r['ret']:
- d = r['data']
- if not d:
- continue
- else: # ex: Memory
- d = data
- health[sub] = []
- self.get_health_subsystem(sub, d, health)
- if not health[sub]:
- del health[sub]
-
- result["entries"] = health
- return result
-
- def get_system_health_report(self, systems_uri):
- subsystems = ['Processors', 'Memory', 'SimpleStorage', 'Storage',
- 'EthernetInterfaces', 'NetworkInterfaces.NetworkPorts',
- 'NetworkInterfaces.NetworkDeviceFunctions']
- return self.get_health_report('System', systems_uri, subsystems)
-
- def get_multi_system_health_report(self):
- return self.aggregate_systems(self.get_system_health_report)
-
- def get_chassis_health_report(self, chassis_uri):
- subsystems = ['Power.PowerSupplies', 'Thermal.Fans',
- 'Links.PCIeDevices']
- return self.get_health_report('Chassis', chassis_uri, subsystems)
-
- def get_multi_chassis_health_report(self):
- return self.aggregate_chassis(self.get_chassis_health_report)
-
- def get_manager_health_report(self, manager_uri):
- subsystems = []
- return self.get_health_report('Manager', manager_uri, subsystems)
-
- def get_multi_manager_health_report(self):
- return self.aggregate_managers(self.get_manager_health_report)
-
- def set_manager_nic(self, nic_addr, nic_config):
- # Get EthernetInterface collection
- response = self.get_request(self.root_uri + self.manager_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'EthernetInterfaces' not in data:
- return {'ret': False, 'msg': "EthernetInterfaces resource not found"}
- ethernetinterfaces_uri = data["EthernetInterfaces"]["@odata.id"]
- response = self.get_request(self.root_uri + ethernetinterfaces_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- uris = [a.get('@odata.id') for a in data.get('Members', []) if
- a.get('@odata.id')]
-
- # Find target EthernetInterface
- target_ethernet_uri = None
- target_ethernet_current_setting = None
- if nic_addr == 'null':
- # Find root_uri matched EthernetInterface when nic_addr is not specified
- nic_addr = (self.root_uri).split('/')[-1]
- nic_addr = nic_addr.split(':')[0] # split port if existing
- for uri in uris:
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if '"' + nic_addr + '"' in str(data) or "'" + nic_addr + "'" in str(data):
- target_ethernet_uri = uri
- target_ethernet_current_setting = data
- break
- if target_ethernet_uri is None:
- return {'ret': False, 'msg': "No matched EthernetInterface found under Manager"}
-
- # Convert input to payload and check validity
- payload = {}
- for property in nic_config.keys():
- value = nic_config[property]
- if property not in target_ethernet_current_setting:
- return {'ret': False, 'msg': "Property %s in nic_config is invalid" % property}
- if isinstance(value, dict):
- if isinstance(target_ethernet_current_setting[property], dict):
- payload[property] = value
- elif isinstance(target_ethernet_current_setting[property], list):
- payload[property] = list()
- payload[property].append(value)
- else:
- return {'ret': False, 'msg': "Value of property %s in nic_config is invalid" % property}
- else:
- payload[property] = value
-
- # If no need change, nothing to do. If error detected, report it
- need_change = False
- for property in payload.keys():
- set_value = payload[property]
- cur_value = target_ethernet_current_setting[property]
- # type is simple(not dict/list)
- if not isinstance(set_value, dict) and not isinstance(set_value, list):
- if set_value != cur_value:
- need_change = True
- # type is dict
- if isinstance(set_value, dict):
- for subprop in payload[property].keys():
- if subprop not in target_ethernet_current_setting[property]:
- return {'ret': False, 'msg': "Sub-property %s in nic_config is invalid" % subprop}
- sub_set_value = payload[property][subprop]
- sub_cur_value = target_ethernet_current_setting[property][subprop]
- if sub_set_value != sub_cur_value:
- need_change = True
- # type is list
- if isinstance(set_value, list):
- for i in range(len(set_value)):
- for subprop in payload[property][i].keys():
- if subprop not in target_ethernet_current_setting[property][i]:
- return {'ret': False, 'msg': "Sub-property %s in nic_config is invalid" % subprop}
- sub_set_value = payload[property][i][subprop]
- sub_cur_value = target_ethernet_current_setting[property][i][subprop]
- if sub_set_value != sub_cur_value:
- need_change = True
-
- if not need_change:
- return {'ret': True, 'changed': False, 'msg': "Manager NIC already set"}
-
- response = self.patch_request(self.root_uri + target_ethernet_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True, 'msg': "Modified Manager NIC"}
diff --git a/lib/ansible/module_utils/redhat.py b/lib/ansible/module_utils/redhat.py
deleted file mode 100644
index 358a2bd7b1..0000000000
--- a/lib/ansible/module_utils/redhat.py
+++ /dev/null
@@ -1,284 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), James Laska
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-import re
-import shutil
-import tempfile
-import types
-
-from ansible.module_utils.six.moves import configparser
-
-
-class RegistrationBase(object):
- def __init__(self, module, username=None, password=None):
- self.module = module
- self.username = username
- self.password = password
-
- def configure(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def enable(self):
- # Remove any existing redhat.repo
- redhat_repo = '/etc/yum.repos.d/redhat.repo'
- if os.path.isfile(redhat_repo):
- os.unlink(redhat_repo)
-
- def register(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def unregister(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def unsubscribe(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def update_plugin_conf(self, plugin, enabled=True):
- plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
-
- if os.path.isfile(plugin_conf):
- tmpfd, tmpfile = tempfile.mkstemp()
- shutil.copy2(plugin_conf, tmpfile)
- cfg = configparser.ConfigParser()
- cfg.read([tmpfile])
-
- if enabled:
- cfg.set('main', 'enabled', 1)
- else:
- cfg.set('main', 'enabled', 0)
-
- fd = open(tmpfile, 'w+')
- cfg.write(fd)
- fd.close()
- self.module.atomic_move(tmpfile, plugin_conf)
-
- def subscribe(self, **kwargs):
- raise NotImplementedError("Must be implemented by a sub-class")
-
-
-class Rhsm(RegistrationBase):
- def __init__(self, module, username=None, password=None):
- RegistrationBase.__init__(self, module, username, password)
- self.config = self._read_config()
- self.module = module
-
- def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
- '''
- Load RHSM configuration from /etc/rhsm/rhsm.conf.
- Returns:
- * ConfigParser object
- '''
-
- # Read RHSM defaults ...
- cp = configparser.ConfigParser()
- cp.read(rhsm_conf)
-
- # Add support for specifying a default value w/o having to standup some configuration
- # Yeah, I know this should be subclassed ... but, oh well
- def get_option_default(self, key, default=''):
- sect, opt = key.split('.', 1)
- if self.has_section(sect) and self.has_option(sect, opt):
- return self.get(sect, opt)
- else:
- return default
-
- cp.get_option = types.MethodType(get_option_default, cp, configparser.ConfigParser)
-
- return cp
-
- def enable(self):
- '''
- Enable the system to receive updates from subscription-manager.
- This involves updating affected yum plugins and removing any
- conflicting yum repositories.
- '''
- RegistrationBase.enable(self)
- self.update_plugin_conf('rhnplugin', False)
- self.update_plugin_conf('subscription-manager', True)
-
- def configure(self, **kwargs):
- '''
- Configure the system as directed for registration with RHN
- Raises:
- * Exception - if error occurs while running command
- '''
- args = ['subscription-manager', 'config']
-
- # Pass supplied **kwargs as parameters to subscription-manager. Ignore
- # non-configuration parameters and replace '_' with '.'. For example,
- # 'server_hostname' becomes '--system.hostname'.
- for k, v in kwargs.items():
- if re.search(r'^(system|rhsm)_', k):
- args.append('--%s=%s' % (k.replace('_', '.'), v))
-
- self.module.run_command(args, check_rc=True)
-
- @property
- def is_registered(self):
- '''
- Determine whether the current system
- Returns:
- * Boolean - whether the current system is currently registered to
- RHN.
- '''
- args = ['subscription-manager', 'identity']
- rc, stdout, stderr = self.module.run_command(args, check_rc=False)
- if rc == 0:
- return True
- else:
- return False
-
- def register(self, username, password, autosubscribe, activationkey):
- '''
- Register the current system to the provided RHN server
- Raises:
- * Exception - if error occurs while running command
- '''
- args = ['subscription-manager', 'register']
-
- # Generate command arguments
- if activationkey:
- args.append('--activationkey "%s"' % activationkey)
- else:
- if autosubscribe:
- args.append('--autosubscribe')
- if username:
- args.extend(['--username', username])
- if password:
- args.extend(['--password', password])
-
- # Do the needful...
- rc, stderr, stdout = self.module.run_command(args, check_rc=True)
-
- def unsubscribe(self):
- '''
- Unsubscribe a system from all subscribed channels
- Raises:
- * Exception - if error occurs while running command
- '''
- args = ['subscription-manager', 'unsubscribe', '--all']
- rc, stderr, stdout = self.module.run_command(args, check_rc=True)
-
- def unregister(self):
- '''
- Unregister a currently registered system
- Raises:
- * Exception - if error occurs while running command
- '''
- args = ['subscription-manager', 'unregister']
- rc, stderr, stdout = self.module.run_command(args, check_rc=True)
- self.update_plugin_conf('rhnplugin', False)
- self.update_plugin_conf('subscription-manager', False)
-
- def subscribe(self, regexp):
- '''
- Subscribe current system to available pools matching the specified
- regular expression
- Raises:
- * Exception - if error occurs while running command
- '''
-
- # Available pools ready for subscription
- available_pools = RhsmPools(self.module)
-
- for pool in available_pools.filter(regexp):
- pool.subscribe()
-
-
-class RhsmPool(object):
- '''
- Convenience class for housing subscription information
- '''
-
- def __init__(self, module, **kwargs):
- self.module = module
- for k, v in kwargs.items():
- setattr(self, k, v)
-
- def __str__(self):
- return str(self.__getattribute__('_name'))
-
- def subscribe(self):
- args = "subscription-manager subscribe --pool %s" % self.PoolId
- rc, stdout, stderr = self.module.run_command(args, check_rc=True)
- if rc == 0:
- return True
- else:
- return False
-
-
-class RhsmPools(object):
- """
- This class is used for manipulating pools subscriptions with RHSM
- """
- def __init__(self, module):
- self.module = module
- self.products = self._load_product_list()
-
- def __iter__(self):
- return self.products.__iter__()
-
- def _load_product_list(self):
- """
- Loads list of all available pools for system in data structure
- """
- args = "subscription-manager list --available"
- rc, stdout, stderr = self.module.run_command(args, check_rc=True)
-
- products = []
- for line in stdout.split('\n'):
- # Remove leading+trailing whitespace
- line = line.strip()
- # An empty line implies the end of an output group
- if len(line) == 0:
- continue
- # If a colon ':' is found, parse
- elif ':' in line:
- (key, value) = line.split(':', 1)
- key = key.strip().replace(" ", "") # To unify
- value = value.strip()
- if key in ['ProductName', 'SubscriptionName']:
- # Remember the name for later processing
- products.append(RhsmPool(self.module, _name=value, key=value))
- elif products:
- # Associate value with most recently recorded product
- products[-1].__setattr__(key, value)
- # FIXME - log some warning?
- # else:
- # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
- return products
-
- def filter(self, regexp='^$'):
- '''
- Return a list of RhsmPools whose name matches the provided regular expression
- '''
- r = re.compile(regexp)
- for product in self.products:
- if r.search(product._name):
- yield product
diff --git a/lib/ansible/module_utils/remote_management/__init__.py b/lib/ansible/module_utils/remote_management/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/remote_management/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/remote_management/dellemc/__init__.py b/lib/ansible/module_utils/remote_management/dellemc/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/remote_management/dellemc/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/remote_management/dellemc/dellemc_idrac.py b/lib/ansible/module_utils/remote_management/dellemc/dellemc_idrac.py
deleted file mode 100644
index b633b93ebf..0000000000
--- a/lib/ansible/module_utils/remote_management/dellemc/dellemc_idrac.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# -*- coding: utf-8 -*-
-
-#
-# Dell EMC OpenManage Ansible Modules
-# Version 1.0
-# Copyright (C) 2018 Dell Inc.
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
-# Other trademarks may be trademarks of their respective owners.
-#
-
-from __future__ import (absolute_import, division,
- print_function)
-__metaclass__ = type
-
-try:
- from omsdk.sdkinfra import sdkinfra
- from omsdk.sdkcreds import UserCredentials
- from omsdk.sdkfile import FileOnShare, file_share_manager
- from omsdk.sdkprotopref import ProtoPreference, ProtocolEnum
- from omsdk.http.sdkwsmanbase import WsManOptions
- HAS_OMSDK = True
-except ImportError:
- HAS_OMSDK = False
-
-
-class iDRACConnection:
-
- def __init__(self, module_params):
- if not HAS_OMSDK:
- raise ImportError("Dell EMC OMSDK library is required for this module")
- self.idrac_ip = module_params['idrac_ip']
- self.idrac_user = module_params['idrac_user']
- self.idrac_pwd = module_params['idrac_password']
- self.idrac_port = module_params['idrac_port']
- if not all((self.idrac_ip, self.idrac_user, self.idrac_pwd)):
- raise ValueError("hostname, username and password required")
- self.handle = None
- self.creds = UserCredentials(self.idrac_user, self.idrac_pwd)
- self.pOp = WsManOptions(port=self.idrac_port)
- self.sdk = sdkinfra()
- if self.sdk is None:
- msg = "Could not initialize iDRAC drivers."
- raise RuntimeError(msg)
-
- def __enter__(self):
- self.sdk.importPath()
- self.handle = self.sdk.get_driver(self.sdk.driver_enum.iDRAC, self.idrac_ip, self.creds, pOptions=self.pOp)
- if self.handle is None:
- msg = "Could not find device driver for iDRAC with IP Address: {0}".format(self.idrac_ip)
- raise RuntimeError(msg)
- return self.handle
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.handle.disconnect()
- return False
diff --git a/lib/ansible/module_utils/remote_management/dellemc/ome.py b/lib/ansible/module_utils/remote_management/dellemc/ome.py
deleted file mode 100644
index c387e2a164..0000000000
--- a/lib/ansible/module_utils/remote_management/dellemc/ome.py
+++ /dev/null
@@ -1,181 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Dell EMC OpenManage Ansible Modules
-# Version 1.3
-# Copyright (C) 2019 Dell Inc. or its subsidiaries. All Rights Reserved.
-
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-
-# * Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
-from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-
-SESSION_RESOURCE_COLLECTION = {
- "SESSION": "SessionService/Sessions",
- "SESSION_ID": "SessionService/Sessions('{Id}')",
-}
-
-
-class OpenURLResponse(object):
- """Handles HTTPResponse"""
-
- def __init__(self, resp):
- self.body = None
- self.resp = resp
- if self.resp:
- self.body = self.resp.read()
-
- @property
- def json_data(self):
- try:
- return json.loads(self.body)
- except ValueError:
- raise ValueError("Unable to parse json")
-
- @property
- def status_code(self):
- return self.resp.getcode()
-
- @property
- def success(self):
- return self.status_code in (200, 201, 202, 204)
-
- @property
- def token_header(self):
- return self.resp.headers.get('X-Auth-Token')
-
-
-class RestOME(object):
- """Handles OME API requests"""
-
- def __init__(self, module_params=None, req_session=False):
- self.module_params = module_params
- self.hostname = self.module_params["hostname"]
- self.username = self.module_params["username"]
- self.password = self.module_params["password"]
- self.port = self.module_params["port"]
- self.req_session = req_session
- self.session_id = None
- self.protocol = 'https'
- self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
-
- def _get_base_url(self):
- """builds base url"""
- return '{0}://{1}:{2}/api'.format(self.protocol, self.hostname, self.port)
-
- def _build_url(self, path, query_param=None):
- """builds complete url"""
- url = path
- base_uri = self._get_base_url()
- if path:
- url = '{0}/{1}'.format(base_uri, path)
- if query_param:
- url += "?{0}".format(urlencode(query_param))
- return url
-
- def _url_common_args_spec(self, method, api_timeout, headers=None):
- """Creates an argument common spec"""
- req_header = self._headers
- if headers:
- req_header.update(headers)
- url_kwargs = {
- "method": method,
- "validate_certs": False,
- "use_proxy": True,
- "headers": req_header,
- "timeout": api_timeout,
- "follow_redirects": 'all',
- }
- return url_kwargs
-
- def _args_without_session(self, method, api_timeout=30, headers=None):
- """Creates an argument spec in case of basic authentication"""
- req_header = self._headers
- if headers:
- req_header.update(headers)
- url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
- url_kwargs["url_username"] = self.username
- url_kwargs["url_password"] = self.password
- url_kwargs["force_basic_auth"] = True
- return url_kwargs
-
- def _args_with_session(self, method, api_timeout=30, headers=None):
- """Creates an argument spec, in case of authentication with session"""
- url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
- url_kwargs["force_basic_auth"] = False
- return url_kwargs
-
- def invoke_request(self, method, path, data=None, query_param=None, headers=None,
- api_timeout=30, dump=True):
- """
- Sends a request via open_url
- Returns :class:`OpenURLResponse` object.
- :arg method: HTTP verb to use for the request
- :arg path: path to request without query parameter
- :arg data: (optional) Payload to send with the request
- :arg query_param: (optional) Dictionary of query parameter to send with request
- :arg headers: (optional) Dictionary of HTTP Headers to send with the
- request
- :arg api_timeout: (optional) How long to wait for the server to send
- data before giving up
- :arg dump: (Optional) boolean value for dumping payload data.
- :returns: OpenURLResponse
- """
- try:
- if 'X-Auth-Token' in self._headers:
- url_kwargs = self._args_with_session(method, api_timeout, headers=headers)
- else:
- url_kwargs = self._args_without_session(method, api_timeout, headers=headers)
- if data and dump:
- data = json.dumps(data)
- url = self._build_url(path, query_param=query_param)
- resp = open_url(url, data=data, **url_kwargs)
- resp_data = OpenURLResponse(resp)
- except (HTTPError, URLError, SSLValidationError, ConnectionError) as err:
- raise err
- return resp_data
-
- def __enter__(self):
- """Creates sessions by passing it to header"""
- if self.req_session:
- payload = {'UserName': self.username,
- 'Password': self.password,
- 'SessionType': 'API', }
- path = SESSION_RESOURCE_COLLECTION["SESSION"]
- resp = self.invoke_request('POST', path, data=payload)
- if resp and resp.success:
- self.session_id = resp.json_data.get("Id")
- self._headers["X-Auth-Token"] = resp.token_header
- else:
- msg = "Could not create the session"
- raise ConnectionError(msg)
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- """Deletes a session id, which is in use for request"""
- if self.session_id:
- path = SESSION_RESOURCE_COLLECTION["SESSION_ID"].format(Id=self.session_id)
- self.invoke_request('DELETE', path)
- return False
diff --git a/lib/ansible/module_utils/remote_management/lxca/__init__.py b/lib/ansible/module_utils/remote_management/lxca/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/remote_management/lxca/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/remote_management/lxca/common.py b/lib/ansible/module_utils/remote_management/lxca/common.py
deleted file mode 100644
index 50080ccb4b..0000000000
--- a/lib/ansible/module_utils/remote_management/lxca/common.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by
-# Ansible still belong to the author of the module, and may assign their
-# own license to the complete work.
-#
-# Copyright (C) 2017 Lenovo, Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-# Contains LXCA common class
-# Lenovo xClarity Administrator (LXCA)
-
-import traceback
-try:
- from pylxca import connect, disconnect
- HAS_PYLXCA = True
-except ImportError:
- HAS_PYLXCA = False
-
-
-PYLXCA_REQUIRED = "Lenovo xClarity Administrator Python Client (Python package 'pylxca') is required for this module."
-
-
-def has_pylxca(module):
- """
- Check pylxca is installed
- :param module:
- """
- if not HAS_PYLXCA:
- module.fail_json(msg=PYLXCA_REQUIRED)
-
-
-LXCA_COMMON_ARGS = dict(
- login_user=dict(required=True),
- login_password=dict(required=True, no_log=True),
- auth_url=dict(required=True),
-)
-
-
-class connection_object:
- def __init__(self, module):
- self.module = module
-
- def __enter__(self):
- return setup_conn(self.module)
-
- def __exit__(self, type, value, traceback):
- close_conn()
-
-
-def setup_conn(module):
- """
- this function create connection to LXCA
- :param module:
- :return: lxca connection
- """
- lxca_con = None
- try:
- lxca_con = connect(module.params['auth_url'],
- module.params['login_user'],
- module.params['login_password'],
- "True")
- except Exception as exception:
- error_msg = '; '.join(exception.args)
- module.fail_json(msg=error_msg, exception=traceback.format_exc())
- return lxca_con
-
-
-def close_conn():
- """
- this function close connection to LXCA
- :param module:
- :return: None
- """
- disconnect()
diff --git a/lib/ansible/module_utils/scaleway.py b/lib/ansible/module_utils/scaleway.py
deleted file mode 100644
index 50041eff52..0000000000
--- a/lib/ansible/module_utils/scaleway.py
+++ /dev/null
@@ -1,183 +0,0 @@
-import json
-import re
-import sys
-
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.urls import fetch_url
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-
-
-def scaleway_argument_spec():
- return dict(
- api_token=dict(required=True, fallback=(env_fallback, ['SCW_TOKEN', 'SCW_API_KEY', 'SCW_OAUTH_TOKEN', 'SCW_API_TOKEN']),
- no_log=True, aliases=['oauth_token']),
- api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://api.scaleway.com', aliases=['base_url']),
- api_timeout=dict(type='int', default=30, aliases=['timeout']),
- query_parameters=dict(type='dict', default={}),
- validate_certs=dict(default=True, type='bool'),
- )
-
-
-def payload_from_object(scw_object):
- return dict(
- (k, v)
- for k, v in scw_object.items()
- if k != 'id' and v is not None
- )
-
-
-class ScalewayException(Exception):
-
- def __init__(self, message):
- self.message = message
-
-
-# Specify a complete Link header, for validation purposes
-R_LINK_HEADER = r'''<[^>]+>;\srel="(first|previous|next|last)"
- (,<[^>]+>;\srel="(first|previous|next|last)")*'''
-# Specify a single relation, for iteration and string extraction purposes
-R_RELATION = r'<(?P<target_IRI>[^>]+)>; rel="(?P<relation>first|previous|next|last)"'
-
-
-def parse_pagination_link(header):
- if not re.match(R_LINK_HEADER, header, re.VERBOSE):
- raise ScalewayException('Scaleway API answered with an invalid Link pagination header')
- else:
- relations = header.split(',')
- parsed_relations = {}
- rc_relation = re.compile(R_RELATION)
- for relation in relations:
- match = rc_relation.match(relation)
- if not match:
- raise ScalewayException('Scaleway API answered with an invalid relation in the Link pagination header')
- data = match.groupdict()
- parsed_relations[data['relation']] = data['target_IRI']
- return parsed_relations
-
-
-class Response(object):
-
- def __init__(self, resp, info):
- self.body = None
- if resp:
- self.body = resp.read()
- self.info = info
-
- @property
- def json(self):
- if not self.body:
- if "body" in self.info:
- return json.loads(self.info["body"])
- return None
- try:
- return json.loads(self.body)
- except ValueError:
- return None
-
- @property
- def status_code(self):
- return self.info["status"]
-
- @property
- def ok(self):
- return self.status_code in (200, 201, 202, 204)
-
-
-class Scaleway(object):
-
- def __init__(self, module):
- self.module = module
- self.headers = {
- 'X-Auth-Token': self.module.params.get('api_token'),
- 'User-Agent': self.get_user_agent_string(module),
- 'Content-type': 'application/json',
- }
- self.name = None
-
- def get_resources(self):
- results = self.get('/%s' % self.name)
-
- if not results.ok:
- raise ScalewayException('Error fetching {0} ({1}) [{2}: {3}]'.format(
- self.name, '%s/%s' % (self.module.params.get('api_url'), self.name),
- results.status_code, results.json['message']
- ))
-
- return results.json.get(self.name)
-
- def _url_builder(self, path, params):
- d = self.module.params.get('query_parameters')
- if params is not None:
- d.update(params)
- query_string = urlencode(d, doseq=True)
-
- if path[0] == '/':
- path = path[1:]
- return '%s/%s?%s' % (self.module.params.get('api_url'), path, query_string)
-
- def send(self, method, path, data=None, headers=None, params=None):
- url = self._url_builder(path=path, params=params)
- self.warn(url)
-
- if headers is not None:
- self.headers.update(headers)
-
- if self.headers['Content-Type'] == "application/json":
- data = self.module.jsonify(data)
-
- resp, info = fetch_url(
- self.module, url, data=data, headers=self.headers, method=method,
- timeout=self.module.params.get('api_timeout')
- )
-
- # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
- if info['status'] == -1:
- self.module.fail_json(msg=info['msg'])
-
- return Response(resp, info)
-
- @staticmethod
- def get_user_agent_string(module):
- return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0])
-
- def get(self, path, data=None, headers=None, params=None):
- return self.send(method='GET', path=path, data=data, headers=headers, params=params)
-
- def put(self, path, data=None, headers=None, params=None):
- return self.send(method='PUT', path=path, data=data, headers=headers, params=params)
-
- def post(self, path, data=None, headers=None, params=None):
- return self.send(method='POST', path=path, data=data, headers=headers, params=params)
-
- def delete(self, path, data=None, headers=None, params=None):
- return self.send(method='DELETE', path=path, data=data, headers=headers, params=params)
-
- def patch(self, path, data=None, headers=None, params=None):
- return self.send(method="PATCH", path=path, data=data, headers=headers, params=params)
-
- def update(self, path, data=None, headers=None, params=None):
- return self.send(method="UPDATE", path=path, data=data, headers=headers, params=params)
-
- def warn(self, x):
- self.module.warn(str(x))
-
-
-SCALEWAY_LOCATION = {
- 'par1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-1'},
- 'EMEA-FR-PAR1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-1'},
-
- 'ams1': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/nl-ams-1'},
- 'EMEA-NL-EVS': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/nl-ams-1'}
-}
-
-SCALEWAY_ENDPOINT = "https://api.scaleway.com"
-
-SCALEWAY_REGIONS = [
- "fr-par",
- "nl-ams",
-]
-
-SCALEWAY_ZONES = [
- "fr-par-1",
- "nl-ams-1",
-]
diff --git a/lib/ansible/module_utils/source_control/__init__.py b/lib/ansible/module_utils/source_control/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/source_control/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/source_control/bitbucket.py b/lib/ansible/module_utils/source_control/bitbucket.py
deleted file mode 100644
index 8359eec11f..0000000000
--- a/lib/ansible/module_utils/source_control/bitbucket.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-
-import json
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.urls import fetch_url, basic_auth_header
-
-# Makes all classes defined in the file into new-style classes without explicitly inheriting from object
-__metaclass__ = type
-
-
-class BitbucketHelper:
- BITBUCKET_API_URL = 'https://api.bitbucket.org'
-
- error_messages = {
- 'required_client_id': '`client_id` must be specified as a parameter or '
- 'BITBUCKET_CLIENT_ID environment variable',
- 'required_client_secret': '`client_secret` must be specified as a parameter or '
- 'BITBUCKET_CLIENT_SECRET environment variable',
- }
-
- def __init__(self, module):
- self.module = module
- self.access_token = None
-
- @staticmethod
- def bitbucket_argument_spec():
- return dict(
- client_id=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_ID'])),
- client_secret=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_SECRET'])),
- )
-
- def check_arguments(self):
- if self.module.params['client_id'] is None:
- self.module.fail_json(msg=self.error_messages['required_client_id'])
-
- if self.module.params['client_secret'] is None:
- self.module.fail_json(msg=self.error_messages['required_client_secret'])
-
- def fetch_access_token(self):
- self.check_arguments()
-
- headers = {
- 'Authorization': basic_auth_header(self.module.params['client_id'], self.module.params['client_secret'])
- }
-
- info, content = self.request(
- api_url='https://bitbucket.org/site/oauth2/access_token',
- method='POST',
- data='grant_type=client_credentials',
- headers=headers,
- )
-
- if info['status'] == 200:
- self.access_token = content['access_token']
- else:
- self.module.fail_json(msg='Failed to retrieve access token: {0}'.format(info))
-
- def request(self, api_url, method, data=None, headers=None):
- headers = headers or {}
-
- if self.access_token:
- headers.update({
- 'Authorization': 'Bearer {0}'.format(self.access_token),
- })
-
- if isinstance(data, dict):
- data = self.module.jsonify(data)
- headers.update({
- 'Content-type': 'application/json',
- })
-
- response, info = fetch_url(
- module=self.module,
- url=api_url,
- method=method,
- headers=headers,
- data=data,
- force=True,
- )
-
- content = {}
-
- if response is not None:
- body = to_text(response.read())
- if body:
- content = json.loads(body)
-
- return info, content
diff --git a/lib/ansible/module_utils/storage/__init__.py b/lib/ansible/module_utils/storage/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/storage/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/storage/emc/__init__.py b/lib/ansible/module_utils/storage/emc/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/storage/emc/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/storage/emc/emc_vnx.py b/lib/ansible/module_utils/storage/emc/emc_vnx.py
deleted file mode 100644
index c6177e5367..0000000000
--- a/lib/ansible/module_utils/storage/emc/emc_vnx.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# (c) 2018 Luca 'remix_tj' Lorenzetto
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-emc_vnx_argument_spec = {
- 'sp_address': dict(type='str', required=True),
- 'sp_user': dict(type='str', required=False, default='sysadmin'),
- 'sp_password': dict(type='str', required=False, default='sysadmin',
- no_log=True),
-}
diff --git a/lib/ansible/module_utils/storage/hpe3par/__init__.py b/lib/ansible/module_utils/storage/hpe3par/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/module_utils/storage/hpe3par/__init__.py
+++ /dev/null
diff --git a/lib/ansible/module_utils/storage/hpe3par/hpe3par.py b/lib/ansible/module_utils/storage/hpe3par/hpe3par.py
deleted file mode 100644
index 2495f9be1b..0000000000
--- a/lib/ansible/module_utils/storage/hpe3par/hpe3par.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from ansible.module_utils import basic
-
-
-def convert_to_binary_multiple(size_with_unit):
- if size_with_unit is None:
- return -1
- valid_units = ['MiB', 'GiB', 'TiB']
- valid_unit = False
- for unit in valid_units:
- if size_with_unit.strip().endswith(unit):
- valid_unit = True
- size = size_with_unit.split(unit)[0]
- if float(size) < 0:
- return -1
- if not valid_unit:
- raise ValueError("%s does not have a valid unit. The unit must be one of %s" % (size_with_unit, valid_units))
-
- size = size_with_unit.replace(" ", "").split('iB')[0]
- size_kib = basic.human_to_bytes(size)
- return int(size_kib / (1024 * 1024))
-
-
-storage_system_spec = {
- "storage_system_ip": {
- "required": True,
- "type": "str"
- },
- "storage_system_username": {
- "required": True,
- "type": "str",
- "no_log": True
- },
- "storage_system_password": {
- "required": True,
- "type": "str",
- "no_log": True
- },
- "secure": {
- "type": "bool",
- "default": False
- }
-}
-
-
-def cpg_argument_spec():
- spec = {
- "state": {
- "required": True,
- "choices": ['present', 'absent'],
- "type": 'str'
- },
- "cpg_name": {
- "required": True,
- "type": "str"
- },
- "domain": {
- "type": "str"
- },
- "growth_increment": {
- "type": "str",
- },
- "growth_limit": {
- "type": "str",
- },
- "growth_warning": {
- "type": "str",
- },
- "raid_type": {
- "required": False,
- "type": "str",
- "choices": ['R0', 'R1', 'R5', 'R6']
- },
- "set_size": {
- "required": False,
- "type": "int"
- },
- "high_availability": {
- "type": "str",
- "choices": ['PORT', 'CAGE', 'MAG']
- },
- "disk_type": {
- "type": "str",
- "choices": ['FC', 'NL', 'SSD']
- }
- }
- spec.update(storage_system_spec)
- return spec
diff --git a/lib/ansible/module_utils/univention_umc.py b/lib/ansible/module_utils/univention_umc.py
deleted file mode 100644
index 9c84930cf5..0000000000
--- a/lib/ansible/module_utils/univention_umc.py
+++ /dev/null
@@ -1,293 +0,0 @@
-# -*- coding: UTF-8 -*-
-
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c) 2016, Adfinis SyGroup AG
-# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-
-"""Univention Corporate Server (UCS) access module.
-
-Provides the following functions for working with an UCS server.
-
- - ldap_search(filter, base=None, attr=None)
- Search the LDAP via Univention's LDAP wrapper (ULDAP)
-
- - config_registry()
- Return the UCR registry object
-
- - base_dn()
- Return the configured Base DN according to the UCR
-
- - uldap()
- Return a handle to the ULDAP LDAP wrapper
-
- - umc_module_for_add(module, container_dn, superordinate=None)
- Return a UMC module for creating a new object of the given type
-
- - umc_module_for_edit(module, object_dn, superordinate=None)
- Return a UMC module for editing an existing object of the given type
-
-
-Any other module is not part of the "official" API and may change at any time.
-"""
-
-import re
-
-
-__all__ = [
- 'ldap_search',
- 'config_registry',
- 'base_dn',
- 'uldap',
- 'umc_module_for_add',
- 'umc_module_for_edit',
-]
-
-
-_singletons = {}
-
-
-def ldap_module():
- import ldap as orig_ldap
- return orig_ldap
-
-
-def _singleton(name, constructor):
- if name in _singletons:
- return _singletons[name]
- _singletons[name] = constructor()
- return _singletons[name]
-
-
-def config_registry():
-
- def construct():
- import univention.config_registry
- ucr = univention.config_registry.ConfigRegistry()
- ucr.load()
- return ucr
-
- return _singleton('config_registry', construct)
-
-
-def base_dn():
- return config_registry()['ldap/base']
-
-
-def uldap():
- "Return a configured univention uldap object"
-
- def construct():
- try:
- secret_file = open('/etc/ldap.secret', 'r')
- bind_dn = 'cn=admin,{0}'.format(base_dn())
- except IOError: # pragma: no cover
- secret_file = open('/etc/machine.secret', 'r')
- bind_dn = config_registry()["ldap/hostdn"]
- pwd_line = secret_file.readline()
- pwd = re.sub('\n', '', pwd_line)
-
- import univention.admin.uldap
- return univention.admin.uldap.access(
- host=config_registry()['ldap/master'],
- base=base_dn(),
- binddn=bind_dn,
- bindpw=pwd,
- start_tls=1,
- )
-
- return _singleton('uldap', construct)
-
-
-def config():
- def construct():
- import univention.admin.config
- return univention.admin.config.config()
- return _singleton('config', construct)
-
-
-def init_modules():
- def construct():
- import univention.admin.modules
- univention.admin.modules.update()
- return True
- return _singleton('modules_initialized', construct)
-
-
-def position_base_dn():
- def construct():
- import univention.admin.uldap
- return univention.admin.uldap.position(base_dn())
- return _singleton('position_base_dn', construct)
-
-
-def ldap_dn_tree_parent(dn, count=1):
- dn_array = dn.split(',')
- dn_array[0:count] = []
- return ','.join(dn_array)
-
-
-def ldap_search(filter, base=None, attr=None):
- """Replaces uldaps search and uses a generator.
- !! Arguments are not the same."""
-
- if base is None:
- base = base_dn()
- msgid = uldap().lo.lo.search(
- base,
- ldap_module().SCOPE_SUBTREE,
- filterstr=filter,
- attrlist=attr
- )
- # I used to have a try: finally: here but there seems to be a bug in python
- # which swallows the KeyboardInterrupt
- # The abandon now doesn't make too much sense
- while True:
- result_type, result_data = uldap().lo.lo.result(msgid, all=0)
- if not result_data:
- break
- if result_type is ldap_module().RES_SEARCH_RESULT: # pragma: no cover
- break
- else:
- if result_type is ldap_module().RES_SEARCH_ENTRY:
- for res in result_data:
- yield res
- uldap().lo.lo.abandon(msgid)
-
-
-def module_by_name(module_name_):
- """Returns an initialized UMC module, identified by the given name.
-
- The module is a module specification according to the udm commandline.
- Example values are:
- * users/user
- * shares/share
- * groups/group
-
- If the module does not exist, a KeyError is raised.
-
- The modules are cached, so they won't be re-initialized
- in subsequent calls.
- """
-
- def construct():
- import univention.admin.modules
- init_modules()
- module = univention.admin.modules.get(module_name_)
- univention.admin.modules.init(uldap(), position_base_dn(), module)
- return module
-
- return _singleton('module/%s' % module_name_, construct)
-
-
-def get_umc_admin_objects():
- """Convenience accessor for getting univention.admin.objects.
-
- This implements delayed importing, so the univention.* modules
- are not loaded until this function is called.
- """
- import univention.admin
- return univention.admin.objects
-
-
-def umc_module_for_add(module, container_dn, superordinate=None):
- """Returns an UMC module object prepared for creating a new entry.
-
- The module is a module specification according to the udm commandline.
- Example values are:
- * users/user
- * shares/share
- * groups/group
-
- The container_dn MUST be the dn of the container (not of the object to
- be created itself!).
- """
- mod = module_by_name(module)
-
- position = position_base_dn()
- position.setDn(container_dn)
-
- # config, ldap objects from common module
- obj = mod.object(config(), uldap(), position, superordinate=superordinate)
- obj.open()
-
- return obj
-
-
-def umc_module_for_edit(module, object_dn, superordinate=None):
- """Returns an UMC module object prepared for editing an existing entry.
-
- The module is a module specification according to the udm commandline.
- Example values are:
- * users/user
- * shares/share
- * groups/group
-
- The object_dn MUST be the dn of the object itself, not the container!
- """
- mod = module_by_name(module)
-
- objects = get_umc_admin_objects()
-
- position = position_base_dn()
- position.setDn(ldap_dn_tree_parent(object_dn))
-
- obj = objects.get(
- mod,
- config(),
- uldap(),
- position=position,
- superordinate=superordinate,
- dn=object_dn
- )
- obj.open()
-
- return obj
-
-
-def create_containers_and_parents(container_dn):
- """Create a container and if needed the parents containers"""
- import univention.admin.uexceptions as uexcp
- if not container_dn.startswith("cn="):
- raise AssertionError()
- try:
- parent = ldap_dn_tree_parent(container_dn)
- obj = umc_module_for_add(
- 'container/cn',
- parent
- )
- obj['name'] = container_dn.split(',')[0].split('=')[1]
- obj['description'] = "container created by import"
- except uexcp.ldapError:
- create_containers_and_parents(parent)
- obj = umc_module_for_add(
- 'container/cn',
- parent
- )
- obj['name'] = container_dn.split(',')[0].split('=')[1]
- obj['description'] = "container created by import"
diff --git a/lib/ansible/module_utils/utm_utils.py b/lib/ansible/module_utils/utm_utils.py
deleted file mode 100644
index ba193713f4..0000000000
--- a/lib/ansible/module_utils/utm_utils.py
+++ /dev/null
@@ -1,234 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-import json
-
-from ansible.module_utils._text import to_native
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.urls import fetch_url
-
-
-class UTMModuleConfigurationError(Exception):
-
- def __init__(self, msg, **args):
- super(UTMModuleConfigurationError, self).__init__(self, msg)
- self.msg = msg
- self.module_fail_args = args
-
- def do_fail(self, module):
- module.fail_json(msg=self.msg, other=self.module_fail_args)
-
-
-class UTMModule(AnsibleModule):
- """
- This is a helper class to construct any UTM Module. This will automatically add the utm host, port, token,
- protocol, validate_certs and state field to the module. If you want to implement your own sophos utm module
- just initialize this UTMModule class and define the Payload fields that are needed for your module.
- See the other modules like utm_aaa_group for example.
- """
-
- def __init__(self, argument_spec, bypass_checks=False, no_log=False,
- mutually_exclusive=None, required_together=None, required_one_of=None, add_file_common_args=False,
- supports_check_mode=False, required_if=None):
- default_specs = dict(
- headers=dict(type='dict', required=False, default={}),
- utm_host=dict(type='str', required=True),
- utm_port=dict(type='int', default=4444),
- utm_token=dict(type='str', required=True, no_log=True),
- utm_protocol=dict(type='str', required=False, default="https", choices=["https", "http"]),
- validate_certs=dict(type='bool', required=False, default=True),
- state=dict(default='present', choices=['present', 'absent'])
- )
- super(UTMModule, self).__init__(self._merge_specs(default_specs, argument_spec), bypass_checks, no_log,
- mutually_exclusive, required_together, required_one_of,
- add_file_common_args, supports_check_mode, required_if)
-
- def _merge_specs(self, default_specs, custom_specs):
- result = default_specs.copy()
- result.update(custom_specs)
- return result
-
-
-class UTM:
-
- def __init__(self, module, endpoint, change_relevant_keys, info_only=False):
- """
- Initialize UTM Class
- :param module: The Ansible module
- :param endpoint: The corresponding endpoint to the module
- :param change_relevant_keys: The keys of the object to check for changes
- :param info_only: When implementing an info module, set this to true. Will allow access to the info method only
- """
- self.info_only = info_only
- self.module = module
- self.request_url = module.params.get('utm_protocol') + "://" + module.params.get('utm_host') + ":" + to_native(
- module.params.get('utm_port')) + "/api/objects/" + endpoint + "/"
-
- """
- The change_relevant_keys will be checked for changes to determine whether the object needs to be updated
- """
- self.change_relevant_keys = change_relevant_keys
- self.module.params['url_username'] = 'token'
- self.module.params['url_password'] = module.params.get('utm_token')
- if all(elem in self.change_relevant_keys for elem in module.params.keys()):
- raise UTMModuleConfigurationError(
- "The keys " + to_native(
- self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native(
- module.params.keys()))
-
- def execute(self):
- try:
- if not self.info_only:
- if self.module.params.get('state') == 'present':
- self._add()
- elif self.module.params.get('state') == 'absent':
- self._remove()
- else:
- self._info()
- except Exception as e:
- self.module.fail_json(msg=to_native(e))
-
- def _info(self):
- """
- returns the info for an object in utm
- """
- info, result = self._lookup_entry(self.module, self.request_url)
- if info["status"] >= 400:
- self.module.fail_json(result=json.loads(info))
- else:
- if result is None:
- self.module.exit_json(changed=False)
- else:
- self.module.exit_json(result=result, changed=False)
-
- def _add(self):
- """
- adds or updates a host object on utm
- """
-
- combined_headers = self._combine_headers()
-
- is_changed = False
- info, result = self._lookup_entry(self.module, self.request_url)
- if info["status"] >= 400:
- self.module.fail_json(result=json.loads(info))
- else:
- data_as_json_string = self.module.jsonify(self.module.params)
- if result is None:
- response, info = fetch_url(self.module, self.request_url, method="POST",
- headers=combined_headers,
- data=data_as_json_string)
- if info["status"] >= 400:
- self.module.fail_json(msg=json.loads(info["body"]))
- is_changed = True
- result = self._clean_result(json.loads(response.read()))
- else:
- if self._is_object_changed(self.change_relevant_keys, self.module, result):
- response, info = fetch_url(self.module, self.request_url + result['_ref'], method="PUT",
- headers=combined_headers,
- data=data_as_json_string)
- if info['status'] >= 400:
- self.module.fail_json(msg=json.loads(info["body"]))
- is_changed = True
- result = self._clean_result(json.loads(response.read()))
- self.module.exit_json(result=result, changed=is_changed)
-
- def _combine_headers(self):
- """
- This will combine a header default with headers that come from the module declaration
- :return: A combined headers dict
- """
- default_headers = {"Accept": "application/json", "Content-type": "application/json"}
- if self.module.params.get('headers') is not None:
- result = default_headers.copy()
- result.update(self.module.params.get('headers'))
- else:
- result = default_headers
- return result
-
- def _remove(self):
- """
- removes an object from utm
- """
- is_changed = False
- info, result = self._lookup_entry(self.module, self.request_url)
- if result is not None:
- response, info = fetch_url(self.module, self.request_url + result['_ref'], method="DELETE",
- headers={"Accept": "application/json", "X-Restd-Err-Ack": "all"},
- data=self.module.jsonify(self.module.params))
- if info["status"] >= 400:
- self.module.fail_json(msg=json.loads(info["body"]))
- else:
- is_changed = True
- self.module.exit_json(changed=is_changed)
-
- def _lookup_entry(self, module, request_url):
- """
- Lookup for existing entry
- :param module:
- :param request_url:
- :return:
- """
- response, info = fetch_url(module, request_url, method="GET", headers={"Accept": "application/json"})
- result = None
- if response is not None:
- results = json.loads(response.read())
- result = next(iter(filter(lambda d: d['name'] == module.params.get('name'), results)), None)
- return info, result
-
- def _clean_result(self, result):
- """
- Will clean the result from irrelevant fields
- :param result: The result from the query
- :return: The modified result
- """
- del result['utm_host']
- del result['utm_port']
- del result['utm_token']
- del result['utm_protocol']
- del result['validate_certs']
- del result['url_username']
- del result['url_password']
- del result['state']
- return result
-
- def _is_object_changed(self, keys, module, result):
- """
- Check if my object is changed
- :param keys: The keys that will determine if an object is changed
- :param module: The module
- :param result: The result from the query
- :return:
- """
- for key in keys:
- if module.params.get(key) != result[key]:
- return True
- return False
diff --git a/lib/ansible/module_utils/vexata.py b/lib/ansible/module_utils/vexata.py
deleted file mode 100644
index 072def8744..0000000000
--- a/lib/ansible/module_utils/vexata.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2019, Sandeep Kasargod <sandeep@vexata.com>
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-
-HAS_VEXATAPI = True
-try:
- from vexatapi.vexata_api_proxy import VexataAPIProxy
-except ImportError:
- HAS_VEXATAPI = False
-
-from ansible.module_utils._text import to_native
-from ansible.module_utils.basic import env_fallback
-
-VXOS_VERSION = None
-
-
-def get_version(iocs_json):
- if not iocs_json:
- raise Exception('Invalid IOC json')
- active = filter(lambda x: x['mgmtRole'], iocs_json)
- if not active:
- raise Exception('Unable to detect active IOC')
- active = active[0]
- ver = active['swVersion']
- if ver[0] != 'v':
- raise Exception('Illegal version string')
- ver = ver[1:ver.find('-')]
- ver = map(int, ver.split('.'))
- return tuple(ver)
-
-
-def get_array(module):
- """Return storage array object or fail"""
- global VXOS_VERSION
- array = module.params['array']
- user = module.params.get('user', None)
- password = module.params.get('password', None)
- validate = module.params.get('validate_certs')
-
- if not HAS_VEXATAPI:
- module.fail_json(msg='vexatapi library is required for this module. '
- 'To install, use `pip install vexatapi`')
-
- if user and password:
- system = VexataAPIProxy(array, user, password, verify_cert=validate)
- else:
- module.fail_json(msg='The user/password are required to be passed in to '
- 'the module as arguments or by setting the '
- 'VEXATA_USER and VEXATA_PASSWORD environment variables.')
- try:
- if system.test_connection():
- VXOS_VERSION = get_version(system.iocs())
- return system
- else:
- module.fail_json(msg='Test connection to array failed.')
- except Exception as e:
- module.fail_json(msg='Vexata API access failed: {0}'.format(to_native(e)))
-
-
-def argument_spec():
- """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
- return dict(
- array=dict(type='str',
- required=True),
- user=dict(type='str',
- fallback=(env_fallback, ['VEXATA_USER'])),
- password=dict(type='str',
- no_log=True,
- fallback=(env_fallback, ['VEXATA_PASSWORD'])),
- validate_certs=dict(type='bool',
- required=False,
- default=False),
- )
-
-
-def required_together():
- """Return the default list used for the required_together argument to AnsibleModule"""
- return [['user', 'password']]
-
-
-def size_to_MiB(size):
- """Convert a '<integer>[MGT]' string to MiB, return -1 on error."""
- quant = size[:-1]
- exponent = size[-1]
- if not quant.isdigit() or exponent not in 'MGT':
- return -1
- quant = int(quant)
- if exponent == 'G':
- quant <<= 10
- elif exponent == 'T':
- quant <<= 20
- return quant
diff --git a/lib/ansible/module_utils/vultr.py b/lib/ansible/module_utils/vultr.py
deleted file mode 100644
index e5d23ede8a..0000000000
--- a/lib/ansible/module_utils/vultr.py
+++ /dev/null
@@ -1,333 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2017, René Moser <mail@renemoser.net>
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import os
-import time
-import random
-import urllib
-from ansible.module_utils.six.moves import configparser
-from ansible.module_utils._text import to_text, to_native
-from ansible.module_utils.urls import fetch_url
-
-
-VULTR_API_ENDPOINT = "https://api.vultr.com"
-VULTR_USER_AGENT = 'Ansible Vultr'
-
-
-def vultr_argument_spec():
- return dict(
- api_key=dict(type='str', default=os.environ.get('VULTR_API_KEY'), no_log=True),
- api_timeout=dict(type='int', default=os.environ.get('VULTR_API_TIMEOUT')),
- api_retries=dict(type='int', default=os.environ.get('VULTR_API_RETRIES')),
- api_retry_max_delay=dict(type='int', default=os.environ.get('VULTR_API_RETRY_MAX_DELAY')),
- api_account=dict(type='str', default=os.environ.get('VULTR_API_ACCOUNT') or 'default'),
- api_endpoint=dict(type='str', default=os.environ.get('VULTR_API_ENDPOINT')),
- validate_certs=dict(type='bool', default=True),
- )
-
-
-class Vultr:
-
- def __init__(self, module, namespace):
-
- if module._name.startswith('vr_'):
- module.deprecate("The Vultr modules were renamed. The prefix of the modules changed from vr_ to vultr_", version='2.11')
-
- self.module = module
-
- # Namespace use for returns
- self.namespace = namespace
- self.result = {
- 'changed': False,
- namespace: dict(),
- 'diff': dict(before=dict(), after=dict())
- }
-
- # For caching HTTP API responses
- self.api_cache = dict()
-
- try:
- config = self.read_env_variables()
- config.update(Vultr.read_ini_config(self.module.params.get('api_account')))
- except KeyError:
- config = {}
-
- try:
- self.api_config = {
- 'api_key': self.module.params.get('api_key') or config.get('key'),
- 'api_timeout': self.module.params.get('api_timeout') or int(config.get('timeout') or 60),
- 'api_retries': self.module.params.get('api_retries') or int(config.get('retries') or 5),
- 'api_retry_max_delay': self.module.params.get('api_retry_max_delay') or int(config.get('retry_max_delay') or 12),
- 'api_endpoint': self.module.params.get('api_endpoint') or config.get('endpoint') or VULTR_API_ENDPOINT,
- }
- except ValueError as e:
- self.fail_json(msg="One of the following settings, "
- "in section '%s' in the ini config file has not an int value: timeout, retries. "
- "Error was %s" % (self.module.params.get('api_account'), to_native(e)))
-
- if not self.api_config.get('api_key'):
- self.module.fail_json(msg="The API key is not speicied. Please refer to the documentation.")
-
- # Common vultr returns
- self.result['vultr_api'] = {
- 'api_account': self.module.params.get('api_account'),
- 'api_timeout': self.api_config['api_timeout'],
- 'api_retries': self.api_config['api_retries'],
- 'api_retry_max_delay': self.api_config['api_retry_max_delay'],
- 'api_endpoint': self.api_config['api_endpoint'],
- }
-
- # Headers to be passed to the API
- self.headers = {
- 'API-Key': "%s" % self.api_config['api_key'],
- 'User-Agent': VULTR_USER_AGENT,
- 'Accept': 'application/json',
- }
-
- def read_env_variables(self):
- keys = ['key', 'timeout', 'retries', 'retry_max_delay', 'endpoint']
- env_conf = {}
- for key in keys:
- if 'VULTR_API_%s' % key.upper() not in os.environ:
- continue
- env_conf[key] = os.environ['VULTR_API_%s' % key.upper()]
-
- return env_conf
-
- @staticmethod
- def read_ini_config(ini_group):
- paths = (
- os.path.join(os.path.expanduser('~'), '.vultr.ini'),
- os.path.join(os.getcwd(), 'vultr.ini'),
- )
- if 'VULTR_API_CONFIG' in os.environ:
- paths += (os.path.expanduser(os.environ['VULTR_API_CONFIG']),)
-
- conf = configparser.ConfigParser()
- conf.read(paths)
-
- if not conf._sections.get(ini_group):
- return dict()
-
- return dict(conf.items(ini_group))
-
- def fail_json(self, **kwargs):
- self.result.update(kwargs)
- self.module.fail_json(**self.result)
-
- def get_yes_or_no(self, key):
- if self.module.params.get(key) is not None:
- return 'yes' if self.module.params.get(key) is True else 'no'
-
- def switch_enable_disable(self, resource, param_key, resource_key=None):
- if resource_key is None:
- resource_key = param_key
-
- param = self.module.params.get(param_key)
- if param is None:
- return
-
- r_value = resource.get(resource_key)
- if r_value in ['yes', 'no']:
- if param and r_value != 'yes':
- return "enable"
- elif not param and r_value != 'no':
- return "disable"
- else:
- if param and not r_value:
- return "enable"
- elif not param and r_value:
- return "disable"
-
- def api_query(self, path="/", method="GET", data=None):
- url = self.api_config['api_endpoint'] + path
-
- if data:
- data_encoded = dict()
- data_list = ""
- for k, v in data.items():
- if isinstance(v, list):
- for s in v:
- try:
- data_list += '&%s[]=%s' % (k, urllib.quote(s))
- except AttributeError:
- data_list += '&%s[]=%s' % (k, urllib.parse.quote(s))
- elif v is not None:
- data_encoded[k] = v
- try:
- data = urllib.urlencode(data_encoded) + data_list
- except AttributeError:
- data = urllib.parse.urlencode(data_encoded) + data_list
-
- retry_max_delay = self.api_config['api_retry_max_delay']
- randomness = random.randint(0, 1000) / 1000.0
-
- for retry in range(0, self.api_config['api_retries']):
- response, info = fetch_url(
- module=self.module,
- url=url,
- data=data,
- method=method,
- headers=self.headers,
- timeout=self.api_config['api_timeout'],
- )
-
- if info.get('status') == 200:
- break
-
- # Vultr has a rate limiting requests per second, try to be polite
- # Use exponential backoff plus a little bit of randomness
- delay = 2 ** retry + randomness
- if delay > retry_max_delay:
- delay = retry_max_delay + randomness
- time.sleep(delay)
-
- else:
- self.fail_json(msg="Reached API retries limit %s for URL %s, method %s with data %s. Returned %s, with body: %s %s" % (
- self.api_config['api_retries'],
- url,
- method,
- data,
- info['status'],
- info['msg'],
- info.get('body')
- ))
-
- if info.get('status') != 200:
- self.fail_json(msg="URL %s, method %s with data %s. Returned %s, with body: %s %s" % (
- url,
- method,
- data,
- info['status'],
- info['msg'],
- info.get('body')
- ))
-
- res = response.read()
- if not res:
- return {}
-
- try:
- return self.module.from_json(to_native(res)) or {}
- except ValueError as e:
- self.module.fail_json(msg="Could not process response into json: %s" % e)
-
- def query_resource_by_key(self, key, value, resource='regions', query_by='list', params=None, use_cache=False, id_key=None, optional=False):
- if not value:
- return {}
-
- r_list = None
- if use_cache:
- r_list = self.api_cache.get(resource)
-
- if not r_list:
- r_list = self.api_query(path="/v1/%s/%s" % (resource, query_by), data=params)
- if use_cache:
- self.api_cache.update({
- resource: r_list
- })
-
- if not r_list:
- return {}
-
- elif isinstance(r_list, list):
- for r_data in r_list:
- if str(r_data[key]) == str(value):
- return r_data
- if id_key is not None and to_text(r_data[id_key]) == to_text(value):
- return r_data
- elif isinstance(r_list, dict):
- for r_id, r_data in r_list.items():
- if str(r_data[key]) == str(value):
- return r_data
- if id_key is not None and to_text(r_data[id_key]) == to_text(value):
- return r_data
- if not optional:
- if id_key:
- msg = "Could not find %s with ID or %s: %s" % (resource, key, value)
- else:
- msg = "Could not find %s with %s: %s" % (resource, key, value)
- self.module.fail_json(msg=msg)
- return {}
-
- @staticmethod
- def normalize_result(resource, schema, remove_missing_keys=True):
- if remove_missing_keys:
- fields_to_remove = set(resource.keys()) - set(schema.keys())
- for field in fields_to_remove:
- resource.pop(field)
-
- for search_key, config in schema.items():
- if search_key in resource:
- if 'convert_to' in config:
- if config['convert_to'] == 'int':
- resource[search_key] = int(resource[search_key])
- elif config['convert_to'] == 'float':
- resource[search_key] = float(resource[search_key])
- elif config['convert_to'] == 'bool':
- resource[search_key] = True if resource[search_key] == 'yes' else False
-
- if 'transform' in config:
- resource[search_key] = config['transform'](resource[search_key])
-
- if 'key' in config:
- resource[config['key']] = resource[search_key]
- del resource[search_key]
-
- return resource
-
- def get_result(self, resource):
- if resource:
- if isinstance(resource, list):
- self.result[self.namespace] = [Vultr.normalize_result(item, self.returns) for item in resource]
- else:
- self.result[self.namespace] = Vultr.normalize_result(resource, self.returns)
-
- return self.result
-
- def get_plan(self, plan=None, key='name', optional=False):
- value = plan or self.module.params.get('plan')
-
- return self.query_resource_by_key(
- key=key,
- value=value,
- resource='plans',
- use_cache=True,
- id_key='VPSPLANID',
- optional=optional,
- )
-
- def get_firewallgroup(self, firewallgroup=None, key='description'):
- value = firewallgroup or self.module.params.get('firewallgroup')
-
- return self.query_resource_by_key(
- key=key,
- value=value,
- resource='firewall',
- query_by='group_list',
- use_cache=True
- )
-
- def get_application(self, application=None, key='name'):
- value = application or self.module.params.get('application')
-
- return self.query_resource_by_key(
- key=key,
- value=value,
- resource='app',
- use_cache=True
- )
-
- def get_region(self, region=None, key='name'):
- value = region or self.module.params.get('region')
-
- return self.query_resource_by_key(
- key=key,
- value=value,
- resource='regions',
- use_cache=True
- )
diff --git a/lib/ansible/module_utils/xenserver.py b/lib/ansible/module_utils/xenserver.py
deleted file mode 100644
index dbc6a0adbe..0000000000
--- a/lib/ansible/module_utils/xenserver.py
+++ /dev/null
@@ -1,862 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import atexit
-import time
-import re
-import traceback
-
-XENAPI_IMP_ERR = None
-try:
- import XenAPI
- HAS_XENAPI = True
-except ImportError:
- HAS_XENAPI = False
- XENAPI_IMP_ERR = traceback.format_exc()
-
-from ansible.module_utils.basic import env_fallback, missing_required_lib
-from ansible.module_utils.common.network import is_mac
-from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
-
-
-def xenserver_common_argument_spec():
- return dict(
- hostname=dict(type='str',
- aliases=['host', 'pool'],
- required=False,
- default='localhost',
- fallback=(env_fallback, ['XENSERVER_HOST']),
- ),
- username=dict(type='str',
- aliases=['user', 'admin'],
- required=False,
- default='root',
- fallback=(env_fallback, ['XENSERVER_USER'])),
- password=dict(type='str',
- aliases=['pass', 'pwd'],
- required=False,
- no_log=True,
- fallback=(env_fallback, ['XENSERVER_PASSWORD'])),
- validate_certs=dict(type='bool',
- required=False,
- default=True,
- fallback=(env_fallback, ['XENSERVER_VALIDATE_CERTS'])),
- )
-
-
-def xapi_to_module_vm_power_state(power_state):
- """Maps XAPI VM power states to module VM power states."""
- module_power_state_map = {
- "running": "poweredon",
- "halted": "poweredoff",
- "suspended": "suspended",
- "paused": "paused"
- }
-
- return module_power_state_map.get(power_state)
-
-
-def module_to_xapi_vm_power_state(power_state):
- """Maps module VM power states to XAPI VM power states."""
- vm_power_state_map = {
- "poweredon": "running",
- "poweredoff": "halted",
- "restarted": "running",
- "suspended": "suspended",
- "shutdownguest": "halted",
- "rebootguest": "running",
- }
-
- return vm_power_state_map.get(power_state)
-
-
-def is_valid_ip_addr(ip_addr):
- """Validates given string as IPv4 address for given string.
-
- Args:
- ip_addr (str): string to validate as IPv4 address.
-
- Returns:
- bool: True if string is valid IPv4 address, else False.
- """
- ip_addr_split = ip_addr.split('.')
-
- if len(ip_addr_split) != 4:
- return False
-
- for ip_addr_octet in ip_addr_split:
- if not ip_addr_octet.isdigit():
- return False
-
- ip_addr_octet_int = int(ip_addr_octet)
-
- if ip_addr_octet_int < 0 or ip_addr_octet_int > 255:
- return False
-
- return True
-
-
-def is_valid_ip_netmask(ip_netmask):
- """Validates given string as IPv4 netmask.
-
- Args:
- ip_netmask (str): string to validate as IPv4 netmask.
-
- Returns:
- bool: True if string is valid IPv4 netmask, else False.
- """
- ip_netmask_split = ip_netmask.split('.')
-
- if len(ip_netmask_split) != 4:
- return False
-
- valid_octet_values = ['0', '128', '192', '224', '240', '248', '252', '254', '255']
-
- for ip_netmask_octet in ip_netmask_split:
- if ip_netmask_octet not in valid_octet_values:
- return False
-
- if ip_netmask_split[0] != '255' and (ip_netmask_split[1] != '0' or ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
- return False
- elif ip_netmask_split[1] != '255' and (ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
- return False
- elif ip_netmask_split[2] != '255' and ip_netmask_split[3] != '0':
- return False
-
- return True
-
-
-def is_valid_ip_prefix(ip_prefix):
- """Validates given string as IPv4 prefix.
-
- Args:
- ip_prefix (str): string to validate as IPv4 prefix.
-
- Returns:
- bool: True if string is valid IPv4 prefix, else False.
- """
- if not ip_prefix.isdigit():
- return False
-
- ip_prefix_int = int(ip_prefix)
-
- if ip_prefix_int < 0 or ip_prefix_int > 32:
- return False
-
- return True
-
-
-def ip_prefix_to_netmask(ip_prefix, skip_check=False):
- """Converts IPv4 prefix to netmask.
-
- Args:
- ip_prefix (str): IPv4 prefix to convert.
- skip_check (bool): Skip validation of IPv4 prefix
- (default: False). Use if you are sure IPv4 prefix is valid.
-
- Returns:
- str: IPv4 netmask equivalent to given IPv4 prefix if
- IPv4 prefix is valid, else an empty string.
- """
- if skip_check:
- ip_prefix_valid = True
- else:
- ip_prefix_valid = is_valid_ip_prefix(ip_prefix)
-
- if ip_prefix_valid:
- return '.'.join([str((0xffffffff << (32 - int(ip_prefix)) >> i) & 0xff) for i in [24, 16, 8, 0]])
- else:
- return ""
-
-
-def ip_netmask_to_prefix(ip_netmask, skip_check=False):
- """Converts IPv4 netmask to prefix.
-
- Args:
- ip_netmask (str): IPv4 netmask to convert.
- skip_check (bool): Skip validation of IPv4 netmask
- (default: False). Use if you are sure IPv4 netmask is valid.
-
- Returns:
- str: IPv4 prefix equivalent to given IPv4 netmask if
- IPv4 netmask is valid, else an empty string.
- """
- if skip_check:
- ip_netmask_valid = True
- else:
- ip_netmask_valid = is_valid_ip_netmask(ip_netmask)
-
- if ip_netmask_valid:
- return str(sum([bin(int(i)).count("1") for i in ip_netmask.split(".")]))
- else:
- return ""
-
-
-def is_valid_ip6_addr(ip6_addr):
- """Validates given string as IPv6 address.
-
- Args:
- ip6_addr (str): string to validate as IPv6 address.
-
- Returns:
- bool: True if string is valid IPv6 address, else False.
- """
- ip6_addr = ip6_addr.lower()
- ip6_addr_split = ip6_addr.split(':')
-
- if ip6_addr_split[0] == "":
- ip6_addr_split.pop(0)
-
- if ip6_addr_split[-1] == "":
- ip6_addr_split.pop(-1)
-
- if len(ip6_addr_split) > 8:
- return False
-
- if ip6_addr_split.count("") > 1:
- return False
- elif ip6_addr_split.count("") == 1:
- ip6_addr_split.remove("")
- else:
- if len(ip6_addr_split) != 8:
- return False
-
- ip6_addr_hextet_regex = re.compile('^[0-9a-f]{1,4}$')
-
- for ip6_addr_hextet in ip6_addr_split:
- if not bool(ip6_addr_hextet_regex.match(ip6_addr_hextet)):
- return False
-
- return True
-
-
-def is_valid_ip6_prefix(ip6_prefix):
- """Validates given string as IPv6 prefix.
-
- Args:
- ip6_prefix (str): string to validate as IPv6 prefix.
-
- Returns:
- bool: True if string is valid IPv6 prefix, else False.
- """
- if not ip6_prefix.isdigit():
- return False
-
- ip6_prefix_int = int(ip6_prefix)
-
- if ip6_prefix_int < 0 or ip6_prefix_int > 128:
- return False
-
- return True
-
-
-def get_object_ref(module, name, uuid=None, obj_type="VM", fail=True, msg_prefix=""):
- """Finds and returns a reference to arbitrary XAPI object.
-
- An object is searched by using either name (name_label) or UUID
- with UUID taken precedence over name.
-
- Args:
- module: Reference to Ansible module object.
- name (str): Name (name_label) of an object to search for.
- uuid (str): UUID of an object to search for.
- obj_type (str): Any valid XAPI object type. See XAPI docs.
- fail (bool): Should function fail with error message if object
- is not found or exit silently (default: True). The function
- always fails if multiple objects with same name are found.
- msg_prefix (str): A string error messages should be prefixed
- with (default: "").
-
- Returns:
- XAPI reference to found object or None if object is not found
- and fail=False.
- """
- xapi_session = XAPI.connect(module)
-
- if obj_type in ["template", "snapshot"]:
- real_obj_type = "VM"
- elif obj_type == "home server":
- real_obj_type = "host"
- elif obj_type == "ISO image":
- real_obj_type = "VDI"
- else:
- real_obj_type = obj_type
-
- obj_ref = None
-
- # UUID has precedence over name.
- if uuid:
- try:
- # Find object by UUID. If no object is found using given UUID,
- # an exception will be generated.
- obj_ref = xapi_session.xenapi_request("%s.get_by_uuid" % real_obj_type, (uuid,))
- except XenAPI.Failure as f:
- if fail:
- module.fail_json(msg="%s%s with UUID '%s' not found!" % (msg_prefix, obj_type, uuid))
- elif name:
- try:
- # Find object by name (name_label).
- obj_ref_list = xapi_session.xenapi_request("%s.get_by_name_label" % real_obj_type, (name,))
- except XenAPI.Failure as f:
- module.fail_json(msg="XAPI ERROR: %s" % f.details)
-
- # If obj_ref_list is empty.
- if not obj_ref_list:
- if fail:
- module.fail_json(msg="%s%s with name '%s' not found!" % (msg_prefix, obj_type, name))
- # If obj_ref_list contains multiple object references.
- elif len(obj_ref_list) > 1:
- module.fail_json(msg="%smultiple %ss with name '%s' found! Please use UUID." % (msg_prefix, obj_type, name))
- # The obj_ref_list contains only one object reference.
- else:
- obj_ref = obj_ref_list[0]
- else:
- module.fail_json(msg="%sno valid name or UUID supplied for %s!" % (msg_prefix, obj_type))
-
- return obj_ref
-
-
-def gather_vm_params(module, vm_ref):
- """Gathers all VM parameters available in XAPI database.
-
- Args:
- module: Reference to Ansible module object.
- vm_ref (str): XAPI reference to VM.
-
- Returns:
- dict: VM parameters.
- """
- # We silently return empty vm_params if bad vm_ref was supplied.
- if not vm_ref or vm_ref == "OpaqueRef:NULL":
- return {}
-
- xapi_session = XAPI.connect(module)
-
- try:
- vm_params = xapi_session.xenapi.VM.get_record(vm_ref)
-
- # We need some params like affinity, VBDs, VIFs, VDIs etc. dereferenced.
-
- # Affinity.
- if vm_params['affinity'] != "OpaqueRef:NULL":
- vm_affinity = xapi_session.xenapi.host.get_record(vm_params['affinity'])
- vm_params['affinity'] = vm_affinity
- else:
- vm_params['affinity'] = {}
-
- # VBDs.
- vm_vbd_params_list = [xapi_session.xenapi.VBD.get_record(vm_vbd_ref) for vm_vbd_ref in vm_params['VBDs']]
-
- # List of VBDs is usually sorted by userdevice but we sort just
- # in case. We need this list sorted by userdevice so that we can
- # make positional pairing with module.params['disks'].
- vm_vbd_params_list = sorted(vm_vbd_params_list, key=lambda vm_vbd_params: int(vm_vbd_params['userdevice']))
- vm_params['VBDs'] = vm_vbd_params_list
-
- # VDIs.
- for vm_vbd_params in vm_params['VBDs']:
- if vm_vbd_params['VDI'] != "OpaqueRef:NULL":
- vm_vdi_params = xapi_session.xenapi.VDI.get_record(vm_vbd_params['VDI'])
- else:
- vm_vdi_params = {}
-
- vm_vbd_params['VDI'] = vm_vdi_params
-
- # VIFs.
- vm_vif_params_list = [xapi_session.xenapi.VIF.get_record(vm_vif_ref) for vm_vif_ref in vm_params['VIFs']]
-
- # List of VIFs is usually sorted by device but we sort just
- # in case. We need this list sorted by device so that we can
- # make positional pairing with module.params['networks'].
- vm_vif_params_list = sorted(vm_vif_params_list, key=lambda vm_vif_params: int(vm_vif_params['device']))
- vm_params['VIFs'] = vm_vif_params_list
-
- # Networks.
- for vm_vif_params in vm_params['VIFs']:
- if vm_vif_params['network'] != "OpaqueRef:NULL":
- vm_network_params = xapi_session.xenapi.network.get_record(vm_vif_params['network'])
- else:
- vm_network_params = {}
-
- vm_vif_params['network'] = vm_network_params
-
- # Guest metrics.
- if vm_params['guest_metrics'] != "OpaqueRef:NULL":
- vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_params['guest_metrics'])
- vm_params['guest_metrics'] = vm_guest_metrics
- else:
- vm_params['guest_metrics'] = {}
-
- # Detect customization agent.
- xenserver_version = get_xenserver_version(module)
-
- if (xenserver_version[0] >= 7 and xenserver_version[1] >= 0 and vm_params.get('guest_metrics') and
- "feature-static-ip-setting" in vm_params['guest_metrics']['other']):
- vm_params['customization_agent'] = "native"
- else:
- vm_params['customization_agent'] = "custom"
-
- except XenAPI.Failure as f:
- module.fail_json(msg="XAPI ERROR: %s" % f.details)
-
- return vm_params
-
-
-def gather_vm_facts(module, vm_params):
- """Gathers VM facts.
-
- Args:
- module: Reference to Ansible module object.
- vm_params (dict): A dictionary with VM parameters as returned
- by gather_vm_params() function.
-
- Returns:
- dict: VM facts.
- """
- # We silently return empty vm_facts if no vm_params are available.
- if not vm_params:
- return {}
-
- xapi_session = XAPI.connect(module)
-
- # Gather facts.
- vm_facts = {
- "state": xapi_to_module_vm_power_state(vm_params['power_state'].lower()),
- "name": vm_params['name_label'],
- "name_desc": vm_params['name_description'],
- "uuid": vm_params['uuid'],
- "is_template": vm_params['is_a_template'],
- "folder": vm_params['other_config'].get('folder', ''),
- "hardware": {
- "num_cpus": int(vm_params['VCPUs_max']),
- "num_cpu_cores_per_socket": int(vm_params['platform'].get('cores-per-socket', '1')),
- "memory_mb": int(int(vm_params['memory_dynamic_max']) / 1048576),
- },
- "disks": [],
- "cdrom": {},
- "networks": [],
- "home_server": vm_params['affinity'].get('name_label', ''),
- "domid": vm_params['domid'],
- "platform": vm_params['platform'],
- "other_config": vm_params['other_config'],
- "xenstore_data": vm_params['xenstore_data'],
- "customization_agent": vm_params['customization_agent'],
- }
-
- for vm_vbd_params in vm_params['VBDs']:
- if vm_vbd_params['type'] == "Disk":
- vm_disk_sr_params = xapi_session.xenapi.SR.get_record(vm_vbd_params['VDI']['SR'])
-
- vm_disk_params = {
- "size": int(vm_vbd_params['VDI']['virtual_size']),
- "name": vm_vbd_params['VDI']['name_label'],
- "name_desc": vm_vbd_params['VDI']['name_description'],
- "sr": vm_disk_sr_params['name_label'],
- "sr_uuid": vm_disk_sr_params['uuid'],
- "os_device": vm_vbd_params['device'],
- "vbd_userdevice": vm_vbd_params['userdevice'],
- }
-
- vm_facts['disks'].append(vm_disk_params)
- elif vm_vbd_params['type'] == "CD":
- if vm_vbd_params['empty']:
- vm_facts['cdrom'].update(type="none")
- else:
- vm_facts['cdrom'].update(type="iso")
- vm_facts['cdrom'].update(iso_name=vm_vbd_params['VDI']['name_label'])
-
- for vm_vif_params in vm_params['VIFs']:
- vm_guest_metrics_networks = vm_params['guest_metrics'].get('networks', {})
-
- vm_network_params = {
- "name": vm_vif_params['network']['name_label'],
- "mac": vm_vif_params['MAC'],
- "vif_device": vm_vif_params['device'],
- "mtu": vm_vif_params['MTU'],
- "ip": vm_guest_metrics_networks.get("%s/ip" % vm_vif_params['device'], ''),
- "prefix": "",
- "netmask": "",
- "gateway": "",
- "ip6": [vm_guest_metrics_networks[ipv6] for ipv6 in sorted(vm_guest_metrics_networks.keys()) if ipv6.startswith("%s/ipv6/" %
- vm_vif_params['device'])],
- "prefix6": "",
- "gateway6": "",
- }
-
- if vm_params['customization_agent'] == "native":
- if vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
- vm_network_params['prefix'] = vm_vif_params['ipv4_addresses'][0].split('/')[1]
- vm_network_params['netmask'] = ip_prefix_to_netmask(vm_network_params['prefix'])
-
- vm_network_params['gateway'] = vm_vif_params['ipv4_gateway']
-
- if vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
- vm_network_params['prefix6'] = vm_vif_params['ipv6_addresses'][0].split('/')[1]
-
- vm_network_params['gateway6'] = vm_vif_params['ipv6_gateway']
-
- elif vm_params['customization_agent'] == "custom":
- vm_xenstore_data = vm_params['xenstore_data']
-
- for f in ['prefix', 'netmask', 'gateway', 'prefix6', 'gateway6']:
- vm_network_params[f] = vm_xenstore_data.get("vm-data/networks/%s/%s" % (vm_vif_params['device'], f), "")
-
- vm_facts['networks'].append(vm_network_params)
-
- return vm_facts
-
-
-def set_vm_power_state(module, vm_ref, power_state, timeout=300):
- """Controls VM power state.
-
- Args:
- module: Reference to Ansible module object.
- vm_ref (str): XAPI reference to VM.
- power_state (str): Power state to put VM into. Accepted values:
-
- - poweredon
- - poweredoff
- - restarted
- - suspended
- - shutdownguest
- - rebootguest
-
- timeout (int): timeout in seconds (default: 300).
-
- Returns:
- tuple (bool, str): Bool element is True if VM power state has
- changed by calling this function, else False. Str element carries
- a value of resulting power state as defined by XAPI - 'running',
- 'halted' or 'suspended'.
- """
- # Fail if we don't have a valid VM reference.
- if not vm_ref or vm_ref == "OpaqueRef:NULL":
- module.fail_json(msg="Cannot set VM power state. Invalid VM reference supplied!")
-
- xapi_session = XAPI.connect(module)
-
- power_state = power_state.replace('_', '').replace('-', '').lower()
- vm_power_state_resulting = module_to_xapi_vm_power_state(power_state)
-
- state_changed = False
-
- try:
- # Get current state of the VM.
- vm_power_state_current = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower())
-
- if vm_power_state_current != power_state:
- if power_state == "poweredon":
- if not module.check_mode:
- # VM can be in either halted, suspended, paused or running state.
- # For VM to be in running state, start has to be called on halted,
- # resume on suspended and unpause on paused VM.
- if vm_power_state_current == "poweredoff":
- xapi_session.xenapi.VM.start(vm_ref, False, False)
- elif vm_power_state_current == "suspended":
- xapi_session.xenapi.VM.resume(vm_ref, False, False)
- elif vm_power_state_current == "paused":
- xapi_session.xenapi.VM.unpause(vm_ref)
- elif power_state == "poweredoff":
- if not module.check_mode:
- # hard_shutdown will halt VM regardless of current state.
- xapi_session.xenapi.VM.hard_shutdown(vm_ref)
- elif power_state == "restarted":
- # hard_reboot will restart VM only if VM is in paused or running state.
- if vm_power_state_current in ["paused", "poweredon"]:
- if not module.check_mode:
- xapi_session.xenapi.VM.hard_reboot(vm_ref)
- else:
- module.fail_json(msg="Cannot restart VM in state '%s'!" % vm_power_state_current)
- elif power_state == "suspended":
- # running state is required for suspend.
- if vm_power_state_current == "poweredon":
- if not module.check_mode:
- xapi_session.xenapi.VM.suspend(vm_ref)
- else:
- module.fail_json(msg="Cannot suspend VM in state '%s'!" % vm_power_state_current)
- elif power_state == "shutdownguest":
- # running state is required for guest shutdown.
- if vm_power_state_current == "poweredon":
- if not module.check_mode:
- if timeout == 0:
- xapi_session.xenapi.VM.clean_shutdown(vm_ref)
- else:
- task_ref = xapi_session.xenapi.Async.VM.clean_shutdown(vm_ref)
- task_result = wait_for_task(module, task_ref, timeout)
-
- if task_result:
- module.fail_json(msg="Guest shutdown task failed: '%s'!" % task_result)
- else:
- module.fail_json(msg="Cannot shutdown guest when VM is in state '%s'!" % vm_power_state_current)
- elif power_state == "rebootguest":
- # running state is required for guest reboot.
- if vm_power_state_current == "poweredon":
- if not module.check_mode:
- if timeout == 0:
- xapi_session.xenapi.VM.clean_reboot(vm_ref)
- else:
- task_ref = xapi_session.xenapi.Async.VM.clean_reboot(vm_ref)
- task_result = wait_for_task(module, task_ref, timeout)
-
- if task_result:
- module.fail_json(msg="Guest reboot task failed: '%s'!" % task_result)
- else:
- module.fail_json(msg="Cannot reboot guest when VM is in state '%s'!" % vm_power_state_current)
- else:
- module.fail_json(msg="Requested VM power state '%s' is unsupported!" % power_state)
-
- state_changed = True
- except XenAPI.Failure as f:
- module.fail_json(msg="XAPI ERROR: %s" % f.details)
-
- return (state_changed, vm_power_state_resulting)
-
-
-def wait_for_task(module, task_ref, timeout=300):
- """Waits for async XAPI task to finish.
-
- Args:
- module: Reference to Ansible module object.
- task_ref (str): XAPI reference to task.
- timeout (int): timeout in seconds (default: 300).
-
- Returns:
- str: failure message on failure, else an empty string.
- """
- # Fail if we don't have a valid task reference.
- if not task_ref or task_ref == "OpaqueRef:NULL":
- module.fail_json(msg="Cannot wait for task. Invalid task reference supplied!")
-
- xapi_session = XAPI.connect(module)
-
- interval = 2
-
- result = ""
-
- # If we have to wait indefinitely, make time_left larger than 0 so we can
- # enter while loop.
- if timeout == 0:
- time_left = 1
- else:
- time_left = timeout
-
- try:
- while time_left > 0:
- task_status = xapi_session.xenapi.task.get_status(task_ref).lower()
-
- if task_status == "pending":
- # Task is still running.
- time.sleep(interval)
-
- # We decrease time_left only if we don't wait indefinitely.
- if timeout != 0:
- time_left -= interval
-
- continue
- elif task_status == "success":
- # Task is done.
- break
- else:
- # Task failed.
- result = task_status
- break
- else:
- # We timed out.
- result = "timeout"
-
- xapi_session.xenapi.task.destroy(task_ref)
- except XenAPI.Failure as f:
- module.fail_json(msg="XAPI ERROR: %s" % f.details)
-
- return result
-
-
-def wait_for_vm_ip_address(module, vm_ref, timeout=300):
- """Waits for VM to acquire an IP address.
-
- Args:
- module: Reference to Ansible module object.
- vm_ref (str): XAPI reference to VM.
- timeout (int): timeout in seconds (default: 300).
-
- Returns:
- dict: VM guest metrics as retrieved by
- VM_guest_metrics.get_record() XAPI method with info
- on IP address acquired.
- """
- # Fail if we don't have a valid VM reference.
- if not vm_ref or vm_ref == "OpaqueRef:NULL":
- module.fail_json(msg="Cannot wait for VM IP address. Invalid VM reference supplied!")
-
- xapi_session = XAPI.connect(module)
-
- vm_guest_metrics = {}
-
- try:
- # We translate VM power state string so that error message can be
- # consistent with module VM power states.
- vm_power_state = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower())
-
- if vm_power_state != 'poweredon':
- module.fail_json(msg="Cannot wait for VM IP address when VM is in state '%s'!" % vm_power_state)
-
- interval = 2
-
- # If we have to wait indefinitely, make time_left larger than 0 so we can
- # enter while loop.
- if timeout == 0:
- time_left = 1
- else:
- time_left = timeout
-
- while time_left > 0:
- vm_guest_metrics_ref = xapi_session.xenapi.VM.get_guest_metrics(vm_ref)
-
- if vm_guest_metrics_ref != "OpaqueRef:NULL":
- vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_guest_metrics_ref)
- vm_ips = vm_guest_metrics['networks']
-
- if "0/ip" in vm_ips:
- break
-
- time.sleep(interval)
-
- # We decrease time_left only if we don't wait indefinitely.
- if timeout != 0:
- time_left -= interval
- else:
- # We timed out.
- module.fail_json(msg="Timed out waiting for VM IP address!")
-
- except XenAPI.Failure as f:
- module.fail_json(msg="XAPI ERROR: %s" % f.details)
-
- return vm_guest_metrics
-
-
-def get_xenserver_version(module):
- """Returns XenServer version.
-
- Args:
- module: Reference to Ansible module object.
-
- Returns:
- list: Element [0] is major version. Element [1] is minor version.
- Element [2] is update number.
- """
- xapi_session = XAPI.connect(module)
-
- host_ref = xapi_session.xenapi.session.get_this_host(xapi_session._session)
-
- try:
- xenserver_version = [int(version_number) for version_number in xapi_session.xenapi.host.get_software_version(host_ref)['product_version'].split('.')]
- except ValueError:
- xenserver_version = [0, 0, 0]
-
- return xenserver_version
-
-
-class XAPI(object):
- """Class for XAPI session management."""
- _xapi_session = None
-
- @classmethod
- def connect(cls, module, disconnect_atexit=True):
- """Establishes XAPI connection and returns session reference.
-
- If no existing session is available, establishes a new one
- and returns it, else returns existing one.
-
- Args:
- module: Reference to Ansible module object.
- disconnect_atexit (bool): Controls if method should
- register atexit handler to disconnect from XenServer
- on module exit (default: True).
-
- Returns:
- XAPI session reference.
- """
- if cls._xapi_session is not None:
- return cls._xapi_session
-
- hostname = module.params['hostname']
- username = module.params['username']
- password = module.params['password']
- ignore_ssl = not module.params['validate_certs']
-
- if hostname == 'localhost':
- cls._xapi_session = XenAPI.xapi_local()
- username = ''
- password = ''
- else:
- # If scheme is not specified we default to http:// because https://
- # is problematic in most setups.
- if not hostname.startswith("http://") and not hostname.startswith("https://"):
- hostname = "http://%s" % hostname
-
- try:
- # ignore_ssl is supported in XenAPI library from XenServer 7.2
- # SDK onward but there is no way to tell which version we
- # are using. TypeError will be raised if ignore_ssl is not
- # supported. Additionally, ignore_ssl requires Python 2.7.9
- # or newer.
- cls._xapi_session = XenAPI.Session(hostname, ignore_ssl=ignore_ssl)
- except TypeError:
- # Try without ignore_ssl.
- cls._xapi_session = XenAPI.Session(hostname)
-
- if not password:
- password = ''
-
- try:
- cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, 'Ansible')
- except XenAPI.Failure as f:
- module.fail_json(msg="Unable to log on to XenServer at %s as %s: %s" % (hostname, username, f.details))
-
- # Disabling atexit should be used in special cases only.
- if disconnect_atexit:
- atexit.register(cls._xapi_session.logout)
-
- return cls._xapi_session
-
-
-class XenServerObject(object):
- """Base class for all XenServer objects.
-
- This class contains active XAPI session reference and common
- attributes with useful info about XenServer host/pool.
-
- Attributes:
- module: Reference to Ansible module object.
- xapi_session: Reference to XAPI session.
- pool_ref (str): XAPI reference to a pool currently connected to.
- default_sr_ref (str): XAPI reference to a pool default
- Storage Repository.
- host_ref (str): XAPI rerefence to a host currently connected to.
- xenserver_version (list of str): Contains XenServer major and
- minor version.
- """
-
- def __init__(self, module):
- """Inits XenServerObject using common module parameters.
-
- Args:
- module: Reference to Ansible module object.
- """
- if not HAS_XENAPI:
- module.fail_json(changed=False, msg=missing_required_lib("XenAPI"), exception=XENAPI_IMP_ERR)
-
- self.module = module
- self.xapi_session = XAPI.connect(module)
-
- try:
- self.pool_ref = self.xapi_session.xenapi.pool.get_all()[0]
- self.default_sr_ref = self.xapi_session.xenapi.pool.get_default_SR(self.pool_ref)
- self.xenserver_version = get_xenserver_version(module)
- except XenAPI.Failure as f:
- self.module.fail_json(msg="XAPI ERROR: %s" % f.details)