summaryrefslogtreecommitdiff
path: root/lib/ansible
diff options
context:
space:
mode:
authorJohn R Barker <john@johnrbarker.com>2018-01-30 12:23:52 +0000
committerGitHub <noreply@github.com>2018-01-30 12:23:52 +0000
commita23c95023b3e4d79b971b75cc8c5878c62ebf501 (patch)
tree9b88fd6be81a2bf2cc3bf613802c83be069bb6ef /lib/ansible
parent7c83f006c0bd77e9288388d7de62cee6cf10d5ee (diff)
downloadansible-a23c95023b3e4d79b971b75cc8c5878c62ebf501.tar.gz
Module deprecation: docs, scheme and tests (#34100)
Enforce module deprecation. After module has reached the end of it's deprecation cycle we will replace it with a docs stub. * Replace deprecated modules with docs-only sub * Use of deprecated past deprecation cycle gives meaningful message (see examples below) * Enforce documentation.deprecation dict via `schema.py` * Update `ansible-doc` and web docs to display documentation.deprecation * Document that structure in `dev_guide` * Ensure that all modules starting with `_` have a `deprecation:` block * Ensure `deprecation:` block is only used on modules that start with `_` * `removed_in` A string which represents when this module needs **deleting** * CHANGELOG.md and porting_guide_2.5.rst list removed modules as well as alternatives * CHANGELOG.md links to porting guide index To ensure that meaningful messages are given to the user if they try to use a module at the end of it's deprecation cycle we enforce the module to contain: ```python if __name__ == '__main__': removed_module() ```
Diffstat (limited to 'lib/ansible')
-rw-r--r--lib/ansible/cli/doc.py2
-rw-r--r--lib/ansible/modules/cloud/amazon/_ec2_ami_find.py5
-rw-r--r--lib/ansible/modules/cloud/amazon/_ec2_ami_search.py126
-rw-r--r--lib/ansible/modules/cloud/amazon/_ec2_remote_facts.py5
-rw-r--r--lib/ansible/modules/cloud/amazon/_ec2_vpc.py611
-rw-r--r--lib/ansible/modules/cloud/azure/_azure.py5
-rw-r--r--lib/ansible/modules/cloud/cloudstack/_cs_nic.py5
-rw-r--r--lib/ansible/modules/cloud/docker/_docker.py1465
-rwxr-xr-xlib/ansible/modules/clustering/k8s/_kubernetes.py5
-rw-r--r--lib/ansible/modules/clustering/openshift/_oc.py5
-rw-r--r--lib/ansible/modules/network/citrix/_netscaler.py5
-rw-r--r--lib/ansible/modules/network/cumulus/_cl_bond.py283
-rw-r--r--lib/ansible/modules/network/cumulus/_cl_bridge.py260
-rw-r--r--lib/ansible/modules/network/cumulus/_cl_img_install.py215
-rw-r--r--lib/ansible/modules/network/cumulus/_cl_interface.py251
-rw-r--r--lib/ansible/modules/network/cumulus/_cl_interface_policy.py83
-rw-r--r--lib/ansible/modules/network/cumulus/_cl_license.py47
-rw-r--r--lib/ansible/modules/network/cumulus/_cl_ports.py140
-rw-r--r--lib/ansible/modules/network/nxos/_nxos_ip_interface.py5
-rw-r--r--lib/ansible/modules/network/nxos/_nxos_mtu.py265
-rw-r--r--lib/ansible/modules/network/nxos/_nxos_portchannel.py5
-rw-r--r--lib/ansible/modules/network/nxos/_nxos_switchport.py5
-rw-r--r--lib/ansible/modules/network/panos/_panos_nat_policy.py (renamed from lib/ansible/modules/network/panos/panos_nat_policy.py)7
-rw-r--r--lib/ansible/modules/network/panos/_panos_security_policy.py (renamed from lib/ansible/modules/network/panos/panos_security_policy.py)7
-rw-r--r--lib/ansible/modules/utilities/helper/_accelerate.py14
-rw-r--r--lib/ansible/modules/utilities/logic/_include.py5
-rw-r--r--lib/ansible/modules/windows/_win_msi.py5
27 files changed, 137 insertions, 3699 deletions
diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py
index 99cf388721..dc001ee05f 100644
--- a/lib/ansible/cli/doc.py
+++ b/lib/ansible/cli/doc.py
@@ -468,7 +468,7 @@ class DocCLI(CLI):
if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0:
text.append("DEPRECATED: \n")
if isinstance(doc['deprecated'], dict):
- text.append("\tReason: %(why)s\n\tScheduled removal: Ansible %(version)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated'))
+ text.append("\tReason: %(why)s\n\tWill be removed in: Ansible %(removed_in)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated'))
else:
text.append("%s" % doc.pop('deprecated'))
text.append("\n")
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_ami_find.py b/lib/ansible/modules/cloud/amazon/_ec2_ami_find.py
index 1ae5f081f1..6bd6e3436a 100644
--- a/lib/ansible/modules/cloud/amazon/_ec2_ami_find.py
+++ b/lib/ansible/modules/cloud/amazon/_ec2_ami_find.py
@@ -16,7 +16,10 @@ DOCUMENTATION = r'''
module: ec2_ami_find
version_added: '2.0'
short_description: Searches for AMIs to obtain the AMI ID and other information
-deprecated: Deprecated in 2.5. Use M(ec2_ami_facts) instead.
+deprecated:
+ removed_in: "2.9"
+ why: Various AWS modules have been combined and replaced with M(ec2_ami_facts).
+ alternative: Use M(ec2_ami_facts) instead.
description:
- Returns list of matching AMIs with AMI ID, along with other useful information
- Can search AMIs with different owners
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_ami_search.py b/lib/ansible/modules/cloud/amazon/_ec2_ami_search.py
index d270e159c3..c17a3727bb 100644
--- a/lib/ansible/modules/cloud/amazon/_ec2_ami_search.py
+++ b/lib/ansible/modules/cloud/amazon/_ec2_ami_search.py
@@ -16,7 +16,10 @@ DOCUMENTATION = '''
---
module: ec2_ami_search
short_description: Retrieve AWS AMI information for a given operating system.
-deprecated: "Use M(ec2_ami_find) instead."
+deprecated:
+ removed_in: "2.2"
+ why: Various AWS modules have been combined and replaced with M(ec2_ami_facts).
+ alternative: Use M(ec2_ami_find) instead.
version_added: "1.6"
description:
- Look up the most recent AMI on AWS for a given operating system.
@@ -84,124 +87,7 @@ EXAMPLES = '''
key_name: mykey
'''
-import csv
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.urls import fetch_url
-
-
-SUPPORTED_DISTROS = ['ubuntu']
-
-AWS_REGIONS = ['ap-northeast-1',
- 'ap-southeast-1',
- 'ap-northeast-2',
- 'ap-southeast-2',
- 'ap-south-1',
- 'ca-central-1',
- 'eu-central-1',
- 'eu-west-1',
- 'eu-west-2',
- 'sa-east-1',
- 'us-east-1',
- 'us-east-2',
- 'us-west-1',
- 'us-west-2',
- "us-gov-west-1"]
-
-
-def get_url(module, url):
- """ Get url and return response """
-
- r, info = fetch_url(module, url)
- if info['status'] != 200:
- # Backwards compat
- info['status_code'] = info['status']
- module.fail_json(**info)
- return r
-
-
-def ubuntu(module):
- """ Get the ami for ubuntu """
-
- release = module.params['release']
- stream = module.params['stream']
- store = module.params['store']
- arch = module.params['arch']
- region = module.params['region']
- virt = module.params['virt']
-
- url = get_ubuntu_url(release, stream)
-
- req = get_url(module, url)
- reader = csv.reader(req, delimiter='\t')
- try:
- ami, aki, ari, tag, serial = lookup_ubuntu_ami(reader, release, stream,
- store, arch, region, virt)
- module.exit_json(changed=False, ami=ami, aki=aki, ari=ari, tag=tag,
- serial=serial)
- except KeyError:
- module.fail_json(msg="No matching AMI found")
-
-
-def lookup_ubuntu_ami(table, release, stream, store, arch, region, virt):
- """ Look up the Ubuntu AMI that matches query given a table of AMIs
-
- table: an iterable that returns a row of
- (release, stream, tag, serial, region, ami, aki, ari, virt)
- release: ubuntu release name
- stream: 'server' or 'desktop'
- store: 'ebs', 'ebs-io1', 'ebs-ssd' or 'instance-store'
- arch: 'i386' or 'amd64'
- region: EC2 region
- virt: 'paravirtual' or 'hvm'
-
- Returns (ami, aki, ari, tag, serial)"""
- expected = (release, stream, store, arch, region, virt)
-
- for row in table:
- (actual_release, actual_stream, tag, serial,
- actual_store, actual_arch, actual_region, ami, aki, ari,
- actual_virt) = row
- actual = (actual_release, actual_stream, actual_store, actual_arch,
- actual_region, actual_virt)
- if actual == expected:
- # aki and ari are sometimes blank
- if aki == '':
- aki = None
- if ari == '':
- ari = None
- return (ami, aki, ari, tag, serial)
-
- raise KeyError()
-
-
-def get_ubuntu_url(release, stream):
- url = "https://cloud-images.ubuntu.com/query/%s/%s/released.current.txt"
- return url % (release, stream)
-
-
-def main():
- arg_spec = dict(
- distro=dict(required=True, choices=SUPPORTED_DISTROS),
- release=dict(required=True),
- stream=dict(required=False, default='server',
- choices=['desktop', 'server']),
- store=dict(required=False, default='ebs',
- choices=['ebs', 'ebs-io1', 'ebs-ssd', 'instance-store']),
- arch=dict(required=False, default='amd64',
- choices=['i386', 'amd64']),
- region=dict(required=False, default='us-east-1', choices=AWS_REGIONS),
- virt=dict(required=False, default='paravirtual',
- choices=['paravirtual', 'hvm']),
- )
- module = AnsibleModule(argument_spec=arg_spec)
- distro = module.params['distro']
-
- if distro == 'ubuntu':
- ubuntu(module)
- else:
- module.fail_json(msg="Unsupported distro: %s" % distro)
-
+from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
- main()
+ removed_module()
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_remote_facts.py b/lib/ansible/modules/cloud/amazon/_ec2_remote_facts.py
index 57e342e3d7..a3aece7a15 100644
--- a/lib/ansible/modules/cloud/amazon/_ec2_remote_facts.py
+++ b/lib/ansible/modules/cloud/amazon/_ec2_remote_facts.py
@@ -15,7 +15,10 @@ DOCUMENTATION = '''
---
module: ec2_remote_facts
short_description: Gather facts about ec2 instances in AWS
-deprecated: Deprecated in 2.4. Use M(ec2_instance_facts) instead.
+deprecated:
+ removed_in: "2.8"
+ why: Replaced with boto3 version.
+ alternative: Use M(ec2_instance_facts) instead.
description:
- Gather facts about ec2 instances in AWS
version_added: "2.0"
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_vpc.py b/lib/ansible/modules/cloud/amazon/_ec2_vpc.py
index 2b1b88862e..f6765f7ef1 100644
--- a/lib/ansible/modules/cloud/amazon/_ec2_vpc.py
+++ b/lib/ansible/modules/cloud/amazon/_ec2_vpc.py
@@ -18,10 +18,11 @@ short_description: configure AWS virtual private clouds
description:
- Create or terminates AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "1.4"
-deprecated: >-
- Deprecated in 2.3. Use M(ec2_vpc_net) along with supporting modules including
- M(ec2_vpc_igw), M(ec2_vpc_route_table), M(ec2_vpc_subnet), M(ec2_vpc_dhcp_options),
- M(ec2_vpc_nat_gateway), M(ec2_vpc_nacl).
+deprecated:
+ removed_in: "2.5"
+ why: Replaced by dedicated modules.
+ alternative: Use M(ec2_vpc_net) along with supporting modules including M(ec2_vpc_igw), M(ec2_vpc_route_table), M(ec2_vpc_subnet),
+ M(ec2_vpc_dhcp_options), M(ec2_vpc_nat_gateway), M(ec2_vpc_nacl).
options:
cidr_block:
description:
@@ -159,605 +160,7 @@ EXAMPLES = '''
# the delete will fail until those dependencies are removed.
'''
-import time
-
-try:
- import boto
- import boto.ec2
- import boto.vpc
- from boto.exception import EC2ResponseError
-
- HAS_BOTO = True
-except ImportError:
- HAS_BOTO = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info
-
-
-def get_vpc_info(vpc):
- """
- Retrieves vpc information from an instance
- ID and returns it as a dictionary
- """
-
- return({
- 'id': vpc.id,
- 'cidr_block': vpc.cidr_block,
- 'dhcp_options_id': vpc.dhcp_options_id,
- 'region': vpc.region.name,
- 'state': vpc.state,
- })
-
-
-def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
- """
- Finds a VPC that matches a specific id or cidr + tags
-
- module : AnsibleModule object
- vpc_conn: authenticated VPCConnection connection object
-
- Returns:
- A VPC object that matches either an ID or CIDR and one or more tag values
- """
-
- if vpc_id is None and cidr is None:
- module.fail_json(
- msg='You must specify either a vpc_id or a cidr block + list of unique tags, aborting'
- )
-
- found_vpcs = []
-
- resource_tags = module.params.get('resource_tags')
-
- # Check for existing VPC by cidr_block or id
- if vpc_id is not None:
- found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available', })
-
- else:
- previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'})
-
- for vpc in previous_vpcs:
- # Get all tags for each of the found VPCs
- vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
-
- # If the supplied list of ID Tags match a subset of the VPC Tags, we found our VPC
- if resource_tags and set(resource_tags.items()).issubset(set(vpc_tags.items())):
- found_vpcs.append(vpc)
-
- found_vpc = None
-
- if len(found_vpcs) == 1:
- found_vpc = found_vpcs[0]
-
- if len(found_vpcs) > 1:
- module.fail_json(msg='Found more than one vpc based on the supplied criteria, aborting')
-
- return (found_vpc)
-
-
-def routes_match(rt_list=None, rt=None, igw=None):
- """
- Check if the route table has all routes as in given list
-
- rt_list : A list if routes provided in the module
- rt : The Remote route table object
- igw : The internet gateway object for this vpc
-
- Returns:
- True when there provided routes and remote routes are the same.
- False when provided routes and remote routes are different.
- """
-
- local_routes = []
- remote_routes = []
- for route in rt_list:
- route_kwargs = {
- 'gateway_id': None,
- 'instance_id': None,
- 'interface_id': None,
- 'vpc_peering_connection_id': None,
- 'state': 'active'
- }
- if route['gw'] == 'igw':
- route_kwargs['gateway_id'] = igw.id
- elif route['gw'].startswith('i-'):
- route_kwargs['instance_id'] = route['gw']
- elif route['gw'].startswith('eni-'):
- route_kwargs['interface_id'] = route['gw']
- elif route['gw'].startswith('pcx-'):
- route_kwargs['vpc_peering_connection_id'] = route['gw']
- else:
- route_kwargs['gateway_id'] = route['gw']
- route_kwargs['destination_cidr_block'] = route['dest']
- local_routes.append(route_kwargs)
- for j in rt.routes:
- remote_routes.append(j.__dict__)
- match = []
- for i in local_routes:
- change = "false"
- for j in remote_routes:
- if set(i.items()).issubset(set(j.items())):
- change = "true"
- match.append(change)
- if 'false' in match:
- return False
- else:
- return True
-
-
-def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=None):
- """
- Checks if the remote routes match the local routes.
-
- route_tables : Route_tables parameter in the module
- vpc_conn : The VPC connection object
- module : The module object
- vpc : The vpc object for this route table
- igw : The internet gateway object for this vpc
-
- Returns:
- True when there is difference between the provided routes and remote routes and if subnet associations are different.
- False when both routes and subnet associations matched.
-
- """
- # We add a one for the main table
- rtb_len = len(route_tables) + 1
- remote_rtb_len = len(vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id}))
- if remote_rtb_len != rtb_len:
- return True
- for rt in route_tables:
- rt_id = None
- for sn in rt['subnets']:
- rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id})
- if len(rsn) != 1:
- module.fail_json(
- msg='The subnet {0} to associate with route_table {1} '
- 'does not exist, aborting'.format(sn, rt)
- )
- nrt = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id, 'association.subnet-id': rsn[0].id})
- if not nrt:
- return True
- else:
- nrt = nrt[0]
- if not rt_id:
- rt_id = nrt.id
- if not routes_match(rt['routes'], nrt, igw):
- return True
- continue
- else:
- if rt_id == nrt.id:
- continue
- else:
- return True
- return True
- return False
-
-
-def create_vpc(module, vpc_conn):
- """
- Creates a new or modifies an existing VPC.
-
- module : AnsibleModule object
- vpc_conn: authenticated VPCConnection connection object
-
- Returns:
- A dictionary with information
- about the VPC and subnets that were launched
- """
-
- id = module.params.get('vpc_id')
- cidr_block = module.params.get('cidr_block')
- instance_tenancy = module.params.get('instance_tenancy')
- dns_support = module.params.get('dns_support')
- dns_hostnames = module.params.get('dns_hostnames')
- subnets = module.params.get('subnets')
- internet_gateway = module.params.get('internet_gateway')
- route_tables = module.params.get('route_tables')
- vpc_spec_tags = module.params.get('resource_tags')
- wait = module.params.get('wait')
- wait_timeout = int(module.params.get('wait_timeout'))
- changed = False
-
- # Check for existing VPC by cidr_block + tags or id
- previous_vpc = find_vpc(module, vpc_conn, id, cidr_block)
-
- if previous_vpc is not None:
- changed = False
- vpc = previous_vpc
- else:
- changed = True
- try:
- vpc = vpc_conn.create_vpc(cidr_block, instance_tenancy)
- # wait here until the vpc is available
- pending = True
- wait_timeout = time.time() + wait_timeout
- while wait and wait_timeout > time.time() and pending:
- try:
- pvpc = vpc_conn.get_all_vpcs(vpc.id)
- if hasattr(pvpc, 'state'):
- if pvpc.state == "available":
- pending = False
- elif hasattr(pvpc[0], 'state'):
- if pvpc[0].state == "available":
- pending = False
- # sometimes vpc_conn.create_vpc() will return a vpc that can't be found yet by vpc_conn.get_all_vpcs()
- # when that happens, just wait a bit longer and try again
- except boto.exception.BotoServerError as e:
- if e.error_code != 'InvalidVpcID.NotFound':
- raise
- if pending:
- time.sleep(5)
- if wait and wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg="wait for vpc availability timeout on %s" % time.asctime())
-
- except boto.exception.BotoServerError as e:
- module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
-
- # Done with base VPC, now change to attributes and features.
-
- # Add resource tags
- vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
-
- if not set(vpc_spec_tags.items()).issubset(set(vpc_tags.items())):
- new_tags = {}
-
- for (key, value) in set(vpc_spec_tags.items()):
- if (key, value) not in set(vpc_tags.items()):
- new_tags[key] = value
-
- if new_tags:
- vpc_conn.create_tags(vpc.id, new_tags)
-
- # boto doesn't appear to have a way to determine the existing
- # value of the dns attributes, so we just set them.
- # It also must be done one at a time.
- vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_support=dns_support)
- vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_hostnames=dns_hostnames)
-
- # Process all subnet properties
- if subnets is not None:
- if not isinstance(subnets, list):
- module.fail_json(msg='subnets needs to be a list of cidr blocks')
-
- current_subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
-
- # First add all new subnets
- for subnet in subnets:
- add_subnet = True
- subnet_tags_current = True
- new_subnet_tags = subnet.get('resource_tags', {})
- subnet_tags_delete = []
-
- for csn in current_subnets:
- if subnet['cidr'] == csn.cidr_block:
- add_subnet = False
-
- # Check if AWS subnet tags are in playbook subnet tags
- existing_tags_subset_of_new_tags = (set(csn.tags.items()).issubset(set(new_subnet_tags.items())))
- # Check if subnet tags in playbook are in AWS subnet tags
- new_tags_subset_of_existing_tags = (set(new_subnet_tags.items()).issubset(set(csn.tags.items())))
-
- if existing_tags_subset_of_new_tags is False:
- try:
- for item in csn.tags.items():
- if item not in new_subnet_tags.items():
- subnet_tags_delete.append(item)
-
- subnet_tags_delete = [key[0] for key in subnet_tags_delete]
- delete_subnet_tag = vpc_conn.delete_tags(csn.id, subnet_tags_delete)
- changed = True
- except EC2ResponseError as e:
- module.fail_json(msg='Unable to delete resource tag, error {0}'.format(e))
- # Add new subnet tags if not current
-
- if new_tags_subset_of_existing_tags is False:
- try:
- changed = True
- create_subnet_tag = vpc_conn.create_tags(csn.id, new_subnet_tags)
-
- except EC2ResponseError as e:
- module.fail_json(msg='Unable to create resource tag, error: {0}'.format(e))
-
- if add_subnet:
- try:
- new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None))
- new_subnet_tags = subnet.get('resource_tags', {})
- if new_subnet_tags:
- # Sometimes AWS takes its time to create a subnet and so using new subnets's id
- # to create tags results in exception.
- # boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending'
- # so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet
- while len(vpc_conn.get_all_subnets(filters={'subnet-id': new_subnet.id})) == 0:
- time.sleep(0.1)
-
- vpc_conn.create_tags(new_subnet.id, new_subnet_tags)
-
- changed = True
- except EC2ResponseError as e:
- module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e))
-
- # Now delete all absent subnets
- for csubnet in current_subnets:
- delete_subnet = True
- for subnet in subnets:
- if csubnet.cidr_block == subnet['cidr']:
- delete_subnet = False
- if delete_subnet:
- try:
- vpc_conn.delete_subnet(csubnet.id)
- changed = True
- except EC2ResponseError as e:
- module.fail_json(msg='Unable to delete subnet {0}, error: {1}'.format(csubnet.cidr_block, e))
-
- # Handle Internet gateway (create/delete igw)
- igw = None
- igw_id = None
- igws = vpc_conn.get_all_internet_gateways(filters={'attachment.vpc-id': vpc.id})
- if len(igws) > 1:
- module.fail_json(msg='EC2 returned more than one Internet Gateway for id %s, aborting' % vpc.id)
- if internet_gateway:
- if len(igws) != 1:
- try:
- igw = vpc_conn.create_internet_gateway()
- vpc_conn.attach_internet_gateway(igw.id, vpc.id)
- changed = True
- except EC2ResponseError as e:
- module.fail_json(msg='Unable to create Internet Gateway, error: {0}'.format(e))
- else:
- # Set igw variable to the current igw instance for use in route tables.
- igw = igws[0]
- else:
- if len(igws) > 0:
- try:
- vpc_conn.detach_internet_gateway(igws[0].id, vpc.id)
- vpc_conn.delete_internet_gateway(igws[0].id)
- changed = True
- except EC2ResponseError as e:
- module.fail_json(msg='Unable to delete Internet Gateway, error: {0}'.format(e))
-
- if igw is not None:
- igw_id = igw.id
-
- # Handle route tables - this may be worth splitting into a
- # different module but should work fine here. The strategy to stay
- # idempotent is to basically build all the route tables as
- # defined, track the route table ids, and then run through the
- # remote list of route tables and delete any that we didn't
- # create. This shouldn't interrupt traffic in theory, but is the
- # only way to really work with route tables over time that I can
- # think of without using painful aws ids. Hopefully boto will add
- # the replace-route-table API to make this smoother and
- # allow control of the 'main' routing table.
- if route_tables is not None:
- rtb_needs_change = rtb_changed(route_tables, vpc_conn, module, vpc, igw)
- if route_tables is not None and rtb_needs_change:
- if not isinstance(route_tables, list):
- module.fail_json(msg='route tables need to be a list of dictionaries')
-
- # Work through each route table and update/create to match dictionary array
- all_route_tables = []
- for rt in route_tables:
- try:
- new_rt = vpc_conn.create_route_table(vpc.id)
- new_rt_tags = rt.get('resource_tags', None)
- if new_rt_tags:
- vpc_conn.create_tags(new_rt.id, new_rt_tags)
- for route in rt['routes']:
- route_kwargs = {}
- if route['gw'] == 'igw':
- if not internet_gateway:
- module.fail_json(
- msg='You asked for an Internet Gateway '
- '(igw) route, but you have no Internet Gateway'
- )
- route_kwargs['gateway_id'] = igw.id
- elif route['gw'].startswith('i-'):
- route_kwargs['instance_id'] = route['gw']
- elif route['gw'].startswith('eni-'):
- route_kwargs['interface_id'] = route['gw']
- elif route['gw'].startswith('pcx-'):
- route_kwargs['vpc_peering_connection_id'] = route['gw']
- else:
- route_kwargs['gateway_id'] = route['gw']
- vpc_conn.create_route(new_rt.id, route['dest'], **route_kwargs)
-
- # Associate with subnets
- for sn in rt['subnets']:
- rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id})
- if len(rsn) != 1:
- module.fail_json(
- msg='The subnet {0} to associate with route_table {1} '
- 'does not exist, aborting'.format(sn, rt)
- )
- rsn = rsn[0]
-
- # Disassociate then associate since we don't have replace
- old_rt = vpc_conn.get_all_route_tables(
- filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id}
- )
- old_rt = [x for x in old_rt if x.id is not None]
- if len(old_rt) == 1:
- old_rt = old_rt[0]
- association_id = None
- for a in old_rt.associations:
- if a.subnet_id == rsn.id:
- association_id = a.id
- vpc_conn.disassociate_route_table(association_id)
-
- vpc_conn.associate_route_table(new_rt.id, rsn.id)
-
- all_route_tables.append(new_rt)
- changed = True
- except EC2ResponseError as e:
- module.fail_json(
- msg='Unable to create and associate route table {0}, error: '
- '{1}'.format(rt, e)
- )
-
- # Now that we are good to go on our new route tables, delete the
- # old ones except the 'main' route table as boto can't set the main
- # table yet.
- all_rts = vpc_conn.get_all_route_tables(filters={'vpc-id': vpc.id})
- for rt in all_rts:
- if rt.id is None:
- continue
- delete_rt = True
- for newrt in all_route_tables:
- if newrt.id == rt.id:
- delete_rt = False
- break
- if delete_rt:
- rta = rt.associations
- is_main = False
- for a in rta:
- if a.main:
- is_main = True
- break
- try:
- if not is_main:
- vpc_conn.delete_route_table(rt.id)
- changed = True
- except EC2ResponseError as e:
- module.fail_json(msg='Unable to delete old route table {0}, error: {1}'.format(rt.id, e))
-
- vpc_dict = get_vpc_info(vpc)
-
- created_vpc_id = vpc.id
- returned_subnets = []
- current_subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
-
- for sn in current_subnets:
- returned_subnets.append({
- 'resource_tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})),
- 'cidr': sn.cidr_block,
- 'az': sn.availability_zone,
- 'id': sn.id,
- })
-
- if subnets is not None:
- # Sort subnets by the order they were listed in the play
- order = {}
- for idx, val in enumerate(subnets):
- order[val['cidr']] = idx
-
- # Number of subnets in the play
- subnets_in_play = len(subnets)
- returned_subnets.sort(key=lambda x: order.get(x['cidr'], subnets_in_play))
-
- return (vpc_dict, created_vpc_id, returned_subnets, igw_id, changed)
-
-
-def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
- """
- Terminates a VPC
-
- module: Ansible module object
- vpc_conn: authenticated VPCConnection connection object
- vpc_id: a vpc id to terminate
- cidr: The cidr block of the VPC - can be used in lieu of an ID
-
- Returns a dictionary of VPC information
- about the VPC terminated.
-
- If the VPC to be terminated is available
- "changed" will be set to True.
-
- """
- vpc_dict = {}
- terminated_vpc_id = ''
- changed = False
-
- vpc = find_vpc(module, vpc_conn, vpc_id, cidr)
-
- if vpc is not None:
- if vpc.state == 'available':
- terminated_vpc_id = vpc.id
- vpc_dict = get_vpc_info(vpc)
- try:
- subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
- for sn in subnets:
- vpc_conn.delete_subnet(sn.id)
-
- igws = vpc_conn.get_all_internet_gateways(
- filters={'attachment.vpc-id': vpc.id}
- )
- for igw in igws:
- vpc_conn.detach_internet_gateway(igw.id, vpc.id)
- vpc_conn.delete_internet_gateway(igw.id)
-
- rts = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id})
- for rt in rts:
- rta = rt.associations
- is_main = False
- for a in rta:
- if a.main:
- is_main = True
- if not is_main:
- vpc_conn.delete_route_table(rt.id)
-
- vpc_conn.delete_vpc(vpc.id)
- except EC2ResponseError as e:
- module.fail_json(
- msg='Unable to delete VPC {0}, error: {1}'.format(vpc.id, e)
- )
- changed = True
- vpc_dict['state'] = "terminated"
-
- return (changed, vpc_dict, terminated_vpc_id)
-
-
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- cidr_block=dict(),
- instance_tenancy=dict(choices=['default', 'dedicated'], default='default'),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(default=300),
- dns_support=dict(type='bool', default=True),
- dns_hostnames=dict(type='bool', default=True),
- subnets=dict(type='list'),
- vpc_id=dict(),
- internet_gateway=dict(type='bool', default=False),
- resource_tags=dict(type='dict', required=True),
- route_tables=dict(type='list'),
- state=dict(choices=['present', 'absent'], default='present'),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- )
-
- if not HAS_BOTO:
- module.fail_json(msg='boto required for this module')
-
- state = module.params.get('state')
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
-
- # If we have a region specified, connect to its endpoint.
- if region:
- try:
- vpc_conn = connect_to_aws(boto.vpc, region, **aws_connect_kwargs)
- except boto.exception.NoAuthHandlerFound as e:
- module.fail_json(msg=str(e))
- else:
- module.fail_json(msg="region must be specified")
-
- igw_id = None
- if module.params.get('state') == 'absent':
- vpc_id = module.params.get('vpc_id')
- cidr = module.params.get('cidr_block')
- (changed, vpc_dict, new_vpc_id) = terminate_vpc(module, vpc_conn, vpc_id, cidr)
- subnets_changed = None
- elif module.params.get('state') == 'present':
- # Changed is always set to true when provisioning a new VPC
- (vpc_dict, new_vpc_id, subnets_changed, igw_id, changed) = create_vpc(module, vpc_conn)
-
- module.exit_json(changed=changed, vpc_id=new_vpc_id, vpc=vpc_dict, igw_id=igw_id, subnets=subnets_changed)
-
+from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
- main()
+ removed_module()
diff --git a/lib/ansible/modules/cloud/azure/_azure.py b/lib/ansible/modules/cloud/azure/_azure.py
index bc40cb5d26..1b380ef9c7 100644
--- a/lib/ansible/modules/cloud/azure/_azure.py
+++ b/lib/ansible/modules/cloud/azure/_azure.py
@@ -19,7 +19,10 @@ short_description: create or terminate a virtual machine in azure
description:
- Creates or terminates azure instances. When created optionally waits for it to be 'running'.
version_added: "1.7"
-deprecated: "Use M(azure_rm_virtualmachine) instead."
+deprecated:
+ removed_in: "2.8"
+ why: Replaced with various dedicated Azure modules.
+ alternative: M(azure_rm_virtualmachine)
options:
name:
description:
diff --git a/lib/ansible/modules/cloud/cloudstack/_cs_nic.py b/lib/ansible/modules/cloud/cloudstack/_cs_nic.py
index 5a457ea8a0..9bc433de46 100644
--- a/lib/ansible/modules/cloud/cloudstack/_cs_nic.py
+++ b/lib/ansible/modules/cloud/cloudstack/_cs_nic.py
@@ -20,7 +20,10 @@ description:
version_added: "2.3"
author:
- René Moser (@resmo)
-deprecated: Deprecated in 2.4. Use M(cs_instance_nic_secondaryip) instead.
+deprecated:
+ removed_in: "2.8"
+ why: New module created.
+ alternative: Use M(cs_instance_nic_secondaryip) instead.
options:
vm:
description:
diff --git a/lib/ansible/modules/cloud/docker/_docker.py b/lib/ansible/modules/cloud/docker/_docker.py
index 152475a54d..98bf6e1bbc 100644
--- a/lib/ansible/modules/cloud/docker/_docker.py
+++ b/lib/ansible/modules/cloud/docker/_docker.py
@@ -18,7 +18,10 @@ DOCUMENTATION = '''
module: docker
version_added: "1.4"
short_description: manage docker containers
-deprecated: In 2.2 use M(docker_container) and M(docker_image) instead.
+deprecated:
+ removed_in: "2.4"
+ why: Replaced by dedicated modules.
+ alternative: Use M(docker_container) and M(docker_image) instead.
description:
- This is the original Ansible module for managing the Docker container life cycle.
- NOTE - Additional and newer modules are available. For the latest on orchestrating containers with Ansible
@@ -478,1463 +481,7 @@ EXAMPLES = '''
syslog-tag: myservice
'''
-import json
-import os
-import shlex
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-
-try:
- import docker.client
- import docker.utils
- import docker.errors
- from requests.exceptions import RequestException
- HAS_DOCKER_PY = True
-except ImportError:
- HAS_DOCKER_PY = False
-
-DEFAULT_DOCKER_API_VERSION = None
-DEFAULT_TIMEOUT_SECONDS = 60
-if HAS_DOCKER_PY:
- try:
- from docker.errors import APIError as DockerAPIError
- except ImportError:
- from docker.client import APIError as DockerAPIError
- try:
- # docker-py 1.2+
- import docker.constants
- DEFAULT_DOCKER_API_VERSION = docker.constants.DEFAULT_DOCKER_API_VERSION
- DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS
- except (ImportError, AttributeError):
- # docker-py less than 1.2
- DEFAULT_DOCKER_API_VERSION = docker.client.DEFAULT_DOCKER_API_VERSION
- DEFAULT_TIMEOUT_SECONDS = docker.client.DEFAULT_TIMEOUT_SECONDS
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-def _human_to_bytes(number):
- suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
-
- if isinstance(number, int):
- return number
- if number.isdigit():
- return int(number)
- if number[-1] == suffixes[0] and number[-2].isdigit():
- return number[:-1]
-
- i = 1
- for each in suffixes[1:]:
- if number[-len(each):] == suffixes[i]:
- return int(number[:-len(each)]) * (1024 ** i)
- i = i + 1
-
- raise ValueError('Could not convert %s to integer' % (number,))
-
-
-def _ansible_facts(container_list):
- return {"docker_containers": container_list}
-
-
-def _docker_id_quirk(inspect):
- # XXX: some quirk in docker
- if 'ID' in inspect:
- inspect['Id'] = inspect['ID']
- del inspect['ID']
- return inspect
-
-
-def get_split_image_tag(image):
- # If image contains a host or org name, omit that from our check
- if '/' in image:
- registry, resource = image.rsplit('/', 1)
- else:
- registry, resource = None, image
-
- # now we can determine if image has a tag or a digest
- for s in ['@', ':']:
- if s in resource:
- resource, tag = resource.split(s, 1)
- if registry:
- resource = '/'.join((registry, resource))
- break
- else:
- tag = "latest"
- resource = image
-
- return resource, tag
-
-
-def normalize_image(image):
- """
- Normalize a Docker image name to include the implied :latest tag.
- """
-
- return ":".join(get_split_image_tag(image))
-
-
-def is_running(container):
- '''Return True if an inspected container is in a state we consider "running."'''
-
- return container['State']['Running'] is True and not container['State'].get('Ghost', False)
-
-
-def get_docker_py_versioninfo():
- if hasattr(docker, '__version__'):
- # a '__version__' attribute was added to the module but not until
- # after 0.3.0 was pushed to pypi. If it's there, use it.
- version = []
- for part in docker.__version__.split('.'):
- try:
- version.append(int(part))
- except ValueError:
- for idx, char in enumerate(part):
- if not char.isdigit():
- nondigit = part[idx:]
- digit = part[:idx]
- break
- if digit:
- version.append(int(digit))
- if nondigit:
- version.append(nondigit)
- elif hasattr(docker.Client, '_get_raw_response_socket'):
- # HACK: if '__version__' isn't there, we check for the existence of
- # `_get_raw_response_socket` in the docker.Client class, which was
- # added in 0.3.0
- version = (0, 3, 0)
- else:
- # This is untrue but this module does not function with a version less
- # than 0.3.0 so it's okay to lie here.
- version = (0,)
-
- return tuple(version)
-
-
-def check_dependencies(module):
- """
- Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a
- helpful error message if it isn't.
- """
- if not HAS_DOCKER_PY:
- module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.")
- else:
- versioninfo = get_docker_py_versioninfo()
- if versioninfo < (0, 3, 0):
- module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.")
-
-
-class DockerManager(object):
-
- counters = dict(
- created=0, started=0, stopped=0, killed=0, removed=0, restarted=0, pulled=0
- )
- reload_reasons = []
- _capabilities = set()
-
- # Map optional parameters to minimum (docker-py version, server APIVersion)
- # docker-py version is a tuple of ints because we have to compare them
- # server APIVersion is passed to a docker-py function that takes strings
- _cap_ver_req = dict(
- devices=((0, 7, 0), '1.2'),
- dns=((0, 3, 0), '1.10'),
- volumes_from=((0, 3, 0), '1.10'),
- restart_policy=((0, 5, 0), '1.14'),
- extra_hosts=((0, 7, 0), '1.3.1'),
- pid=((1, 0, 0), '1.17'),
- log_driver=((1, 2, 0), '1.18'),
- log_opt=((1, 2, 0), '1.18'),
- host_config=((0, 7, 0), '1.15'),
- cpu_set=((0, 6, 0), '1.14'),
- cap_add=((0, 5, 0), '1.14'),
- cap_drop=((0, 5, 0), '1.14'),
- read_only=((1, 0, 0), '1.17'),
- labels=((1, 2, 0), '1.18'),
- stop_timeout=((0, 5, 0), '1.0'),
- ulimits=((1, 2, 0), '1.18'),
- # Clientside only
- insecure_registry=((0, 5, 0), '0.0'),
- env_file=((1, 4, 0), '0.0'),
- )
-
- def __init__(self, module):
- self.module = module
-
- self.binds = None
- self.volumes = None
- if self.module.params.get('volumes'):
- self.binds = []
- self.volumes = []
- vols = self.module.params.get('volumes')
- for vol in vols:
- parts = vol.split(":")
- # regular volume
- if len(parts) == 1:
- self.volumes.append(parts[0])
- # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container)
- elif 2 <= len(parts) <= 3:
- # default to read-write
- mode = 'rw'
- # with supplied bind mode
- if len(parts) == 3:
- if parts[2] not in ["rw", "rw,Z", "rw,z", "z,rw", "Z,rw", "Z", "z", "ro", "ro,Z", "ro,z", "z,ro", "Z,ro"]:
- self.module.fail_json(msg='invalid bind mode ' + parts[2])
- else:
- mode = parts[2]
- self.binds.append("%s:%s:%s" % (parts[0], parts[1], mode))
- else:
- self.module.fail_json(msg='volumes support 1 to 3 arguments')
-
- self.lxc_conf = None
- if self.module.params.get('lxc_conf'):
- self.lxc_conf = []
- options = self.module.params.get('lxc_conf')
- for option in options:
- parts = option.split(':', 1)
- self.lxc_conf.append({"Key": parts[0], "Value": parts[1]})
-
- self.exposed_ports = None
- if self.module.params.get('expose'):
- self.exposed_ports = self.get_exposed_ports(self.module.params.get('expose'))
-
- self.port_bindings = None
- if self.module.params.get('ports'):
- self.port_bindings = self.get_port_bindings(self.module.params.get('ports'))
-
- self.links = None
- if self.module.params.get('links'):
- self.links = self.get_links(self.module.params.get('links'))
-
- self.ulimits = None
- if self.module.params.get('ulimits'):
- self.ulimits = []
- ulimits = self.module.params.get('ulimits')
- for ulimit in ulimits:
- parts = ulimit.split(":")
- if len(parts) == 2:
- self.ulimits.append({'name': parts[0], 'soft': int(parts[1]), 'hard': int(parts[1])})
- elif len(parts) == 3:
- self.ulimits.append({'name': parts[0], 'soft': int(parts[1]), 'hard': int(parts[2])})
- else:
- self.module.fail_json(msg='ulimits support 2 to 3 arguments')
-
- # Connect to the docker server using any configured host and TLS settings.
-
- env_host = os.getenv('DOCKER_HOST')
- env_docker_verify = os.getenv('DOCKER_TLS_VERIFY')
- env_cert_path = os.getenv('DOCKER_CERT_PATH')
- env_docker_hostname = os.getenv('DOCKER_TLS_HOSTNAME')
-
- docker_url = module.params.get('docker_url')
- if not docker_url:
- if env_host:
- docker_url = env_host
- else:
- docker_url = 'unix://var/run/docker.sock'
-
- docker_api_version = module.params.get('docker_api_version')
- timeout = module.params.get('timeout')
-
- tls_client_cert = module.params.get('tls_client_cert', None)
- if not tls_client_cert and env_cert_path:
- tls_client_cert = os.path.join(env_cert_path, 'cert.pem')
-
- tls_client_key = module.params.get('tls_client_key', None)
- if not tls_client_key and env_cert_path:
- tls_client_key = os.path.join(env_cert_path, 'key.pem')
-
- tls_ca_cert = module.params.get('tls_ca_cert')
- if not tls_ca_cert and env_cert_path:
- tls_ca_cert = os.path.join(env_cert_path, 'ca.pem')
-
- tls_hostname = module.params.get('tls_hostname')
- if tls_hostname is None:
- if env_docker_hostname:
- tls_hostname = env_docker_hostname
- else:
- parsed_url = urlparse(docker_url)
- if ':' in parsed_url.netloc:
- tls_hostname = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
- else:
- tls_hostname = parsed_url
- if not tls_hostname:
- tls_hostname = True
-
- # use_tls can be one of four values:
- # no: Do not use tls
- # encrypt: Use tls. We may do client auth. We will not verify the server
- # verify: Use tls. We may do client auth. We will verify the server
- # None: Only use tls if the parameters for client auth were specified
- # or tls_ca_cert (which requests verifying the server with
- # a specific ca certificate)
- use_tls = module.params.get('use_tls')
- if use_tls is None and env_docker_verify is not None:
- use_tls = 'verify'
-
- tls_config = None
- if use_tls != 'no':
- params = {}
-
- # Setup client auth
- if tls_client_cert and tls_client_key:
- params['client_cert'] = (tls_client_cert, tls_client_key)
-
- # We're allowed to verify the connection to the server
- if use_tls == 'verify' or (use_tls is None and tls_ca_cert):
- if tls_ca_cert:
- params['ca_cert'] = tls_ca_cert
- params['verify'] = True
- params['assert_hostname'] = tls_hostname
- else:
- params['verify'] = True
- params['assert_hostname'] = tls_hostname
- elif use_tls == 'encrypt':
- params['verify'] = False
-
- if params:
- # See https://github.com/docker/docker-py/blob/d39da11/docker/utils/utils.py#L279-L296
- docker_url = docker_url.replace('tcp://', 'https://')
- tls_config = docker.tls.TLSConfig(**params)
-
- self.client = docker.Client(base_url=docker_url,
- version=docker_api_version,
- tls=tls_config,
- timeout=timeout)
-
- self.docker_py_versioninfo = get_docker_py_versioninfo()
-
- env = self.module.params.get('env', None)
- env_file = self.module.params.get('env_file', None)
- self.environment = self.get_environment(env, env_file)
-
- def _check_capabilities(self):
- """
- Create a list of available capabilities
- """
- api_version = self.client.version()['ApiVersion']
- for cap, req_vers in self._cap_ver_req.items():
- if (self.docker_py_versioninfo >= req_vers[0] and
- docker.utils.compare_version(req_vers[1], api_version) >= 0):
- self._capabilities.add(cap)
-
- def ensure_capability(self, capability, fail=True):
- """
- Some of the functionality this ansible module implements are only
- available in newer versions of docker. Ensure that the capability
- is available here.
-
- If fail is set to False then return True or False depending on whether
- we have the capability. Otherwise, simply fail and exit the module if
- we lack the capability.
- """
- if not self._capabilities:
- self._check_capabilities()
-
- if capability in self._capabilities:
- return True
-
- if not fail:
- return False
-
- api_version = self.client.version()['ApiVersion']
- self.module.fail_json(msg='Specifying the `%s` parameter requires'
- ' docker-py: %s, docker server apiversion %s; found'
- ' docker-py: %s, server: %s' % (capability,
- '.'.join(map(str, self._cap_ver_req[capability][0])),
- self._cap_ver_req[capability][1],
- '.'.join(map(str, self.docker_py_versioninfo)),
- api_version))
-
- def get_environment(self, env, env_file):
- """
- If environment files are combined with explicit environment variables, the explicit environment variables will override the key from the env file.
- """
- final_env = {}
-
- if env_file:
- self.ensure_capability('env_file')
- parsed_env_file = docker.utils.parse_env_file(env_file)
-
- for name, value in parsed_env_file.items():
- final_env[name] = str(value)
-
- if env:
- for name, value in env.items():
- final_env[name] = str(value)
-
- return final_env
-
- def get_links(self, links):
- """
- Parse the links passed, if a link is specified without an alias then just create the alias of the same name as the link
- """
- processed_links = {}
-
- for link in links:
- parsed_link = link.split(':', 1)
- if(len(parsed_link) == 2):
- processed_links[parsed_link[0]] = parsed_link[1]
- else:
- processed_links[parsed_link[0]] = parsed_link[0]
-
- return processed_links
-
- def get_exposed_ports(self, expose_list):
- """
- Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax.
- """
- if expose_list:
- exposed = []
- for port in expose_list:
- port = str(port).strip()
- if port.endswith('/tcp') or port.endswith('/udp'):
- port_with_proto = tuple(port.split('/'))
- else:
- # assume tcp protocol if not specified
- port_with_proto = (port, 'tcp')
- exposed.append(port_with_proto)
- return exposed
- else:
- return None
-
- def get_start_params(self):
- """
- Create start params
- """
- params = {
- 'lxc_conf': self.lxc_conf,
- 'binds': self.binds,
- 'port_bindings': self.port_bindings,
- 'publish_all_ports': self.module.params.get('publish_all_ports'),
- 'privileged': self.module.params.get('privileged'),
- 'links': self.links,
- 'network_mode': self.module.params.get('net'),
- }
-
- optionals = {}
- for optional_param in ('devices', 'dns', 'volumes_from', 'restart_policy', 'restart_policy_retry',
- 'pid', 'extra_hosts', 'log_driver', 'cap_add', 'cap_drop', 'read_only', 'log_opt'):
- optionals[optional_param] = self.module.params.get(optional_param)
-
- if optionals['devices'] is not None:
- self.ensure_capability('devices')
- params['devices'] = optionals['devices']
-
- if optionals['dns'] is not None:
- self.ensure_capability('dns')
- params['dns'] = optionals['dns']
-
- if optionals['volumes_from'] is not None:
- self.ensure_capability('volumes_from')
- params['volumes_from'] = optionals['volumes_from']
-
- if optionals['restart_policy'] is not None:
- self.ensure_capability('restart_policy')
- params['restart_policy'] = dict(Name=optionals['restart_policy'])
- if params['restart_policy']['Name'] == 'on-failure':
- params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry']
-
- # docker_py only accepts 'host' or None
- if 'pid' in optionals and not optionals['pid']:
- optionals['pid'] = None
-
- if optionals['pid'] is not None:
- self.ensure_capability('pid')
- params['pid_mode'] = optionals['pid']
-
- if optionals['extra_hosts'] is not None:
- self.ensure_capability('extra_hosts')
- params['extra_hosts'] = optionals['extra_hosts']
-
- if optionals['log_driver'] is not None:
- self.ensure_capability('log_driver')
- log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON)
- if optionals['log_opt'] is not None:
- for k, v in optionals['log_opt'].items():
- log_config.set_config_value(k, v)
- log_config.type = optionals['log_driver']
- params['log_config'] = log_config
-
- if optionals['cap_add'] is not None:
- self.ensure_capability('cap_add')
- params['cap_add'] = optionals['cap_add']
-
- if optionals['cap_drop'] is not None:
- self.ensure_capability('cap_drop')
- params['cap_drop'] = optionals['cap_drop']
-
- if optionals['read_only'] is not None:
- self.ensure_capability('read_only')
- params['read_only'] = optionals['read_only']
-
- return params
-
- def create_host_config(self):
- """
- Create HostConfig object
- """
- params = self.get_start_params()
- return docker.utils.create_host_config(**params)
-
- def get_port_bindings(self, ports):
- """
- Parse the `ports` string into a port bindings dict for the `start_container` call.
- """
- binds = {}
- for port in ports:
- # ports could potentially be an array like [80, 443], so we make sure they're strings
- # before splitting
- parts = str(port).split(':')
- container_port = parts[-1]
- if '/' not in container_port:
- container_port = int(parts[-1])
-
- p_len = len(parts)
- if p_len == 1:
- # Bind `container_port` of the container to a dynamically
- # allocated TCP port on all available interfaces of the host
- # machine.
- bind = ('0.0.0.0',)
- elif p_len == 2:
- # Bind `container_port` of the container to port `parts[0]` on
- # all available interfaces of the host machine.
- bind = ('0.0.0.0', int(parts[0]))
- elif p_len == 3:
- # Bind `container_port` of the container to port `parts[1]` on
- # IP `parts[0]` of the host machine. If `parts[1]` empty bind
- # to a dynamically allocated port of IP `parts[0]`.
- bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],)
-
- if container_port in binds:
- old_bind = binds[container_port]
- if isinstance(old_bind, list):
- # append to list if it already exists
- old_bind.append(bind)
- else:
- # otherwise create list that contains the old and new binds
- binds[container_port] = [binds[container_port], bind]
- else:
- binds[container_port] = bind
-
- return binds
-
- def get_summary_message(self):
- '''
- Generate a message that briefly describes the actions taken by this
- task, in English.
- '''
-
- parts = []
- for k, v in self.counters.items():
- if v == 0:
- continue
-
- if v == 1:
- plural = ""
- else:
- plural = "s"
- parts.append("%s %d container%s" % (k, v, plural))
-
- if parts:
- return ", ".join(parts) + "."
- else:
- return "No action taken."
-
- def get_reload_reason_message(self):
- '''
- Generate a message describing why any reloaded containers were reloaded.
- '''
-
- if self.reload_reasons:
- return ", ".join(self.reload_reasons)
- else:
- return None
-
- def get_summary_counters_msg(self):
- msg = ""
- for k, v in self.counters.items():
- msg = msg + "%s %d " % (k, v)
-
- return msg
-
- def increment_counter(self, name):
- self.counters[name] = self.counters[name] + 1
-
- def has_changed(self):
- for k, v in self.counters.items():
- if v > 0:
- return True
-
- return False
-
- def get_inspect_image(self):
- try:
- return self.client.inspect_image(self.module.params.get('image'))
- except DockerAPIError as e:
- if e.response.status_code == 404:
- return None
- else:
- raise e
-
- def get_image_repo_tags(self):
- image, tag = get_split_image_tag(self.module.params.get('image'))
- if tag is None:
- tag = 'latest'
- resource = '%s:%s' % (image, tag)
-
- for image in self.client.images(name=image):
- if resource in image.get('RepoTags', []):
- return image['RepoTags']
- return []
-
- def get_inspect_containers(self, containers):
- inspect = []
- for i in containers:
- details = self.client.inspect_container(i['Id'])
- details = _docker_id_quirk(details)
- inspect.append(details)
-
- return inspect
-
- def get_differing_containers(self):
- """
- Inspect all matching, running containers, and return those that were
- started with parameters that differ from the ones that are provided
- during this module run. A list containing the differing
- containers will be returned, and a short string describing the specific
- difference encountered in each container will be appended to
- reload_reasons.
-
- This generates the set of containers that need to be stopped and
- started with new parameters with state=reloaded.
- """
-
- running = self.get_running_containers()
- current = self.get_inspect_containers(running)
- defaults = self.client.info()
-
- # Get API version
- api_version = self.client.version()['ApiVersion']
-
- image = self.get_inspect_image()
- if image is None:
- # The image isn't present. Assume that we're about to pull a new
- # tag and *everything* will be restarted.
- #
- # This will give false positives if you untag an image on the host
- # and there's nothing more to pull.
- return current
-
- differing = []
-
- for container in current:
-
- # IMAGE
- # Compare the image by ID rather than name, so that containers
- # will be restarted when new versions of an existing image are
- # pulled.
- if container['Image'] != image['Id']:
- self.reload_reasons.append('image ({0} => {1})'.format(container['Image'], image['Id']))
- differing.append(container)
- continue
-
- # ENTRYPOINT
-
- expected_entrypoint = self.module.params.get('entrypoint')
- if expected_entrypoint:
- expected_entrypoint = shlex.split(expected_entrypoint)
- actual_entrypoint = container["Config"]["Entrypoint"]
-
- if actual_entrypoint != expected_entrypoint:
- self.reload_reasons.append(
- 'entrypoint ({0} => {1})'
- .format(actual_entrypoint, expected_entrypoint)
- )
- differing.append(container)
- continue
-
- # COMMAND
-
- expected_command = self.module.params.get('command')
- if expected_command:
- expected_command = shlex.split(expected_command)
- actual_command = container["Config"]["Cmd"]
-
- if actual_command != expected_command:
- self.reload_reasons.append('command ({0} => {1})'.format(actual_command, expected_command))
- differing.append(container)
- continue
-
- # EXPOSED PORTS
- expected_exposed_ports = set((image['ContainerConfig'].get('ExposedPorts') or {}).keys())
- for p in (self.exposed_ports or []):
- expected_exposed_ports.add("/".join(p))
-
- actually_exposed_ports = set((container["Config"].get("ExposedPorts") or {}).keys())
-
- if actually_exposed_ports != expected_exposed_ports:
- self.reload_reasons.append('exposed_ports ({0} => {1})'.format(actually_exposed_ports, expected_exposed_ports))
- differing.append(container)
- continue
-
- # VOLUMES
-
- expected_volume_keys = set((image['ContainerConfig']['Volumes'] or {}).keys())
- if self.volumes:
- expected_volume_keys.update(self.volumes)
-
- actual_volume_keys = set((container['Config']['Volumes'] or {}).keys())
-
- if actual_volume_keys != expected_volume_keys:
- self.reload_reasons.append('volumes ({0} => {1})'.format(actual_volume_keys, expected_volume_keys))
- differing.append(container)
- continue
-
- # ULIMITS
-
- expected_ulimit_keys = set(map(lambda x: '%s:%s:%s' % (x['name'], x['soft'], x['hard']), self.ulimits or []))
- actual_ulimit_keys = set(map(lambda x: '%s:%s:%s' % (x['Name'], x['Soft'], x['Hard']), (container['HostConfig']['Ulimits'] or [])))
-
- if actual_ulimit_keys != expected_ulimit_keys:
- self.reload_reasons.append('ulimits ({0} => {1})'.format(actual_ulimit_keys, expected_ulimit_keys))
- differing.append(container)
- continue
-
- # CPU_SHARES
-
- expected_cpu_shares = self.module.params.get('cpu_shares')
- actual_cpu_shares = container['HostConfig']['CpuShares']
-
- if expected_cpu_shares and actual_cpu_shares != expected_cpu_shares:
- self.reload_reasons.append('cpu_shares ({0} => {1})'.format(actual_cpu_shares, expected_cpu_shares))
- differing.append(container)
- continue
-
- # MEM_LIMIT
-
- try:
- expected_mem = _human_to_bytes(self.module.params.get('memory_limit'))
- except ValueError as e:
- self.module.fail_json(msg=str(e))
-
- # For v1.19 API and above use HostConfig, otherwise use Config
- if docker.utils.compare_version('1.19', api_version) >= 0:
- actual_mem = container['HostConfig']['Memory']
- else:
- actual_mem = container['Config']['Memory']
-
- if expected_mem and actual_mem != expected_mem:
- self.reload_reasons.append('memory ({0} => {1})'.format(actual_mem, expected_mem))
- differing.append(container)
- continue
-
- # ENVIRONMENT
- # actual_env is likely to include environment variables injected by
- # the Dockerfile.
-
- expected_env = {}
-
- for image_env in image['ContainerConfig']['Env'] or []:
- name, value = image_env.split('=', 1)
- expected_env[name] = value
-
- if self.environment:
- for name, value in self.environment.items():
- expected_env[name] = str(value)
-
- actual_env = {}
- for container_env in container['Config']['Env'] or []:
- name, value = container_env.split('=', 1)
- actual_env[name] = value
-
- if actual_env != expected_env:
- # Don't include the environment difference in the output.
- self.reload_reasons.append('environment {0} => {1}'.format(actual_env, expected_env))
- differing.append(container)
- continue
-
- # LABELS
-
- expected_labels = {}
- for name, value in self.module.params.get('labels').items():
- expected_labels[name] = str(value)
-
- if isinstance(container['Config']['Labels'], dict):
- actual_labels = container['Config']['Labels']
- else:
- for container_label in container['Config']['Labels'] or []:
- name, value = container_label.split('=', 1)
- actual_labels[name] = value
-
- if actual_labels != expected_labels:
- self.reload_reasons.append('labels {0} => {1}'.format(actual_labels, expected_labels))
- differing.append(container)
- continue
-
- # HOSTNAME
-
- expected_hostname = self.module.params.get('hostname')
- actual_hostname = container['Config']['Hostname']
- if expected_hostname and actual_hostname != expected_hostname:
- self.reload_reasons.append('hostname ({0} => {1})'.format(actual_hostname, expected_hostname))
- differing.append(container)
- continue
-
- # DOMAINNAME
-
- expected_domainname = self.module.params.get('domainname')
- actual_domainname = container['Config']['Domainname']
- if expected_domainname and actual_domainname != expected_domainname:
- self.reload_reasons.append('domainname ({0} => {1})'.format(actual_domainname, expected_domainname))
- differing.append(container)
- continue
-
- # DETACH
-
- # We don't have to check for undetached containers. If it wasn't
- # detached, it would have stopped before the playbook continued!
-
- # NAME
-
- # We also don't have to check name, because this is one of the
- # criteria that's used to determine which container(s) match in
- # the first place.
-
- # STDIN_OPEN
-
- expected_stdin_open = self.module.params.get('stdin_open')
- actual_stdin_open = container['Config']['OpenStdin']
- if actual_stdin_open != expected_stdin_open:
- self.reload_reasons.append('stdin_open ({0} => {1})'.format(actual_stdin_open, expected_stdin_open))
- differing.append(container)
- continue
-
- # TTY
-
- expected_tty = self.module.params.get('tty')
- actual_tty = container['Config']['Tty']
- if actual_tty != expected_tty:
- self.reload_reasons.append('tty ({0} => {1})'.format(actual_tty, expected_tty))
- differing.append(container)
- continue
-
- # -- "start" call differences --
-
- # LXC_CONF
-
- if self.lxc_conf:
- expected_lxc = set(self.lxc_conf)
- actual_lxc = set(container['HostConfig']['LxcConf'] or [])
- if actual_lxc != expected_lxc:
- self.reload_reasons.append('lxc_conf ({0} => {1})'.format(actual_lxc, expected_lxc))
- differing.append(container)
- continue
-
- # BINDS
-
- expected_binds = set()
- if self.binds:
- for bind in self.binds:
- expected_binds.add(bind)
-
- actual_binds = set()
- for bind in (container['HostConfig']['Binds'] or []):
- if len(bind.split(':')) == 2:
- actual_binds.add(bind + ":rw")
- else:
- actual_binds.add(bind)
-
- if actual_binds != expected_binds:
- self.reload_reasons.append('binds ({0} => {1})'.format(actual_binds, expected_binds))
- differing.append(container)
- continue
-
- # PORT BINDINGS
-
- expected_bound_ports = {}
- if self.port_bindings:
- for container_port, config in self.port_bindings.items():
- if isinstance(container_port, int):
- container_port = "{0}/tcp".format(container_port)
- if len(config) == 1:
- expected_bound_ports[container_port] = [dict(HostIp="0.0.0.0", HostPort="")]
- elif isinstance(config[0], tuple):
- expected_bound_ports[container_port] = []
- for hostip, hostport in config:
- expected_bound_ports[container_port].append(dict(HostIp=hostip, HostPort=str(hostport)))
- else:
- expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}]
-
- actual_bound_ports = container['HostConfig']['PortBindings'] or {}
-
- if actual_bound_ports != expected_bound_ports:
- self.reload_reasons.append('port bindings ({0} => {1})'.format(actual_bound_ports, expected_bound_ports))
- differing.append(container)
- continue
-
- # PUBLISHING ALL PORTS
-
- # What we really care about is the set of ports that is actually
- # published. That should be caught above.
-
- # PRIVILEGED
-
- expected_privileged = self.module.params.get('privileged')
- actual_privileged = container['HostConfig']['Privileged']
- if actual_privileged != expected_privileged:
- self.reload_reasons.append('privileged ({0} => {1})'.format(actual_privileged, expected_privileged))
- differing.append(container)
- continue
-
- # LINKS
-
- expected_links = set()
- for link, alias in (self.links or {}).items():
- expected_links.add("/{0}:{1}/{2}".format(link, container["Name"], alias))
-
- actual_links = set()
- for link in (container['HostConfig']['Links'] or []):
- actual_links.add(link)
-
- if actual_links != expected_links:
- self.reload_reasons.append('links ({0} => {1})'.format(actual_links, expected_links))
- differing.append(container)
- continue
-
- # NETWORK MODE
-
- expected_netmode = self.module.params.get('net') or 'bridge'
- actual_netmode = container['HostConfig']['NetworkMode'] or 'bridge'
- if actual_netmode != expected_netmode:
- self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode))
- differing.append(container)
- continue
-
- # DEVICES
-
- expected_devices = set()
- for device in (self.module.params.get('devices') or []):
- if len(device.split(':')) == 2:
- expected_devices.add(device + ":rwm")
- else:
- expected_devices.add(device)
-
- actual_devices = set()
- for device in (container['HostConfig']['Devices'] or []):
- actual_devices.add("{PathOnHost}:{PathInContainer}:{CgroupPermissions}".format(**device))
-
- if actual_devices != expected_devices:
- self.reload_reasons.append('devices ({0} => {1})'.format(actual_devices, expected_devices))
- differing.append(container)
- continue
-
- # DNS
-
- expected_dns = set(self.module.params.get('dns') or [])
- actual_dns = set(container['HostConfig']['Dns'] or [])
- if actual_dns != expected_dns:
- self.reload_reasons.append('dns ({0} => {1})'.format(actual_dns, expected_dns))
- differing.append(container)
- continue
-
- # VOLUMES_FROM
-
- expected_volumes_from = set(self.module.params.get('volumes_from') or [])
- actual_volumes_from = set(container['HostConfig']['VolumesFrom'] or [])
- if actual_volumes_from != expected_volumes_from:
- self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from))
- differing.append(container)
-
- # LOG_DRIVER
-
- if self.ensure_capability('log_driver', False):
- expected_log_driver = self.module.params.get('log_driver') or defaults['LoggingDriver']
- actual_log_driver = container['HostConfig']['LogConfig']['Type']
- if actual_log_driver != expected_log_driver:
- self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver))
- differing.append(container)
- continue
-
- if self.ensure_capability('log_opt', False):
- expected_logging_opts = self.module.params.get('log_opt') or {}
- actual_log_opts = container['HostConfig']['LogConfig']['Config']
- if len(set(expected_logging_opts.items()) - set(actual_log_opts.items())) != 0:
- log_opt_reasons = {
- 'added': dict(set(expected_logging_opts.items()) - set(actual_log_opts.items())),
- 'removed': dict(set(actual_log_opts.items()) - set(expected_logging_opts.items()))
- }
- self.reload_reasons.append('log_opt ({0})'.format(log_opt_reasons))
- differing.append(container)
-
- return differing
-
- def get_deployed_containers(self):
- """
- Return any matching containers that are already present.
- """
-
- entrypoint = self.module.params.get('entrypoint')
- if entrypoint is not None:
- entrypoint = shlex.split(entrypoint)
- command = self.module.params.get('command')
- if command is not None:
- command = shlex.split(command)
- name = self.module.params.get('name')
- if name and not name.startswith('/'):
- name = '/' + name
- deployed = []
-
- # "images" will be a collection of equivalent "name:tag" image names
- # that map to the same Docker image.
- inspected = self.get_inspect_image()
- if inspected:
- repo_tags = self.get_image_repo_tags()
- else:
- repo_tags = [normalize_image(self.module.params.get('image'))]
-
- for container in self.client.containers(all=True):
- details = None
-
- if name:
- name_list = container.get('Names')
- if name_list is None:
- name_list = []
- matches = name in name_list
- else:
- details = self.client.inspect_container(container['Id'])
- details = _docker_id_quirk(details)
-
- running_image = normalize_image(details['Config']['Image'])
-
- image_matches = running_image in repo_tags
-
- if command is None:
- command_matches = True
- else:
- command_matches = (command == details['Config']['Cmd'])
-
- if entrypoint is None:
- entrypoint_matches = True
- else:
- entrypoint_matches = (
- entrypoint == details['Config']['Entrypoint']
- )
-
- matches = (image_matches and command_matches and
- entrypoint_matches)
-
- if matches:
- if not details:
- details = self.client.inspect_container(container['Id'])
- details = _docker_id_quirk(details)
-
- deployed.append(details)
-
- return deployed
-
- def get_running_containers(self):
- return [c for c in self.get_deployed_containers() if is_running(c)]
-
- def pull_image(self):
- extra_params = {}
- if self.module.params.get('insecure_registry'):
- if self.ensure_capability('insecure_registry', fail=False):
- extra_params['insecure_registry'] = self.module.params.get('insecure_registry')
-
- resource = self.module.params.get('image')
- image, tag = get_split_image_tag(resource)
- if self.module.params.get('username'):
- try:
- self.client.login(
- self.module.params.get('username'),
- password=self.module.params.get('password'),
- email=self.module.params.get('email'),
- registry=self.module.params.get('registry')
- )
- except Exception as e:
- self.module.fail_json(msg="failed to login to the remote registry, check your username/password.", error=repr(e))
- try:
- changes = list(self.client.pull(image, tag=tag, stream=True, **extra_params))
- pull_success = False
- for change in changes:
- status = json.loads(change).get('status', '')
- if status.startswith('Status: Image is up to date for'):
- # Image is already up to date. Don't increment the counter.
- pull_success = True
- break
- elif (status.startswith('Status: Downloaded newer image for') or
- status.startswith('Download complete')):
- # Image was updated. Increment the pull counter.
- self.increment_counter('pulled')
- pull_success = True
- break
- if not pull_success:
- # Unrecognized status string.
- self.module.fail_json(msg="Unrecognized status from pull.", status=status, changes=changes)
- except Exception as e:
- self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e))
-
- def create_containers(self, count=1):
- try:
- mem_limit = _human_to_bytes(self.module.params.get('memory_limit'))
- except ValueError as e:
- self.module.fail_json(msg=str(e))
- api_version = self.client.version()['ApiVersion']
-
- params = dict(
- image=self.module.params.get('image'),
- entrypoint=self.module.params.get('entrypoint'),
- command=self.module.params.get('command'),
- ports=self.exposed_ports,
- volumes=self.volumes,
- environment=self.environment,
- labels=self.module.params.get('labels'),
- hostname=self.module.params.get('hostname'),
- domainname=self.module.params.get('domainname'),
- detach=self.module.params.get('detach'),
- name=self.module.params.get('name'),
- stdin_open=self.module.params.get('stdin_open'),
- tty=self.module.params.get('tty'),
- cpuset=self.module.params.get('cpu_set'),
- cpu_shares=self.module.params.get('cpu_shares'),
- user=self.module.params.get('docker_user'),
- )
- if self.ensure_capability('host_config', fail=False):
- params['host_config'] = self.create_host_config()
-
- # For v1.19 API and above use HostConfig, otherwise use Config
- if docker.utils.compare_version('1.19', api_version) < 0:
- params['mem_limit'] = mem_limit
- else:
- params['host_config']['Memory'] = mem_limit
-
- if self.ulimits is not None:
- self.ensure_capability('ulimits')
- params['host_config']['ulimits'] = self.ulimits
-
- def do_create(count, params):
- results = []
- for _ in range(count):
- result = self.client.create_container(**params)
- self.increment_counter('created')
- results.append(result)
-
- return results
-
- try:
- containers = do_create(count, params)
- except docker.errors.APIError as e:
- if e.response.status_code != 404:
- raise
-
- self.pull_image()
- containers = do_create(count, params)
-
- return containers
-
- def start_containers(self, containers):
- params = {}
-
- if not self.ensure_capability('host_config', fail=False):
- params = self.get_start_params()
-
- for i in containers:
- self.client.start(i)
- self.increment_counter('started')
-
- if not self.module.params.get('detach'):
- status = self.client.wait(i['Id'])
- if status != 0:
- output = self.client.logs(i['Id'], stdout=True, stderr=True,
- stream=False, timestamps=False)
- self.module.fail_json(status=status, msg=output)
-
- def stop_containers(self, containers):
- for i in containers:
- self.client.stop(i['Id'], self.module.params.get('stop_timeout'))
- self.increment_counter('stopped')
-
- return [self.client.wait(i['Id']) for i in containers]
-
- def remove_containers(self, containers):
- for i in containers:
- self.client.remove_container(i['Id'])
- self.increment_counter('removed')
-
- def kill_containers(self, containers):
- for i in containers:
- self.client.kill(i['Id'], self.module.params.get('signal'))
- self.increment_counter('killed')
-
- def restart_containers(self, containers):
- for i in containers:
- self.client.restart(i['Id'])
- self.increment_counter('restarted')
-
-
-class ContainerSet:
-
- def __init__(self, manager):
- self.manager = manager
- self.running = []
- self.deployed = []
- self.changed = []
-
- def refresh(self):
- '''
- Update our view of the matching containers from the Docker daemon.
- '''
-
- self.deployed = self.manager.get_deployed_containers()
- self.running = [c for c in self.deployed if is_running(c)]
-
- def notice_changed(self, containers):
- '''
- Record a collection of containers as "changed".
- '''
-
- self.changed.extend(containers)
-
-
-def present(manager, containers, count, name):
- '''Ensure that exactly `count` matching containers exist in any state.'''
-
- containers.refresh()
- delta = count - len(containers.deployed)
-
- if delta > 0:
- created = manager.create_containers(delta)
- containers.notice_changed(manager.get_inspect_containers(created))
-
- if delta < 0:
- # If both running and stopped containers exist, remove
- # stopped containers first.
- # Use key param for python 2/3 compatibility.
- containers.deployed.sort(key=is_running)
-
- to_stop = []
- to_remove = []
- for c in containers.deployed[0:-delta]:
- if is_running(c):
- to_stop.append(c)
- to_remove.append(c)
-
- manager.stop_containers(to_stop)
- containers.notice_changed(manager.get_inspect_containers(to_remove))
- manager.remove_containers(to_remove)
-
-
-def started(manager, containers, count, name):
- '''Ensure that exactly `count` matching containers exist and are running.'''
-
- containers.refresh()
- delta = count - len(containers.running)
-
- if delta > 0:
- if name and containers.deployed:
- # A stopped container exists with the requested name.
- # Clean it up before attempting to start a new one.
- manager.remove_containers(containers.deployed)
-
- created = manager.create_containers(delta)
- manager.start_containers(created)
- containers.notice_changed(manager.get_inspect_containers(created))
-
- if delta < 0:
- excess = containers.running[0:-delta]
- containers.notice_changed(manager.get_inspect_containers(excess))
- manager.stop_containers(excess)
- manager.remove_containers(excess)
-
-
-def reloaded(manager, containers, count, name):
- '''
- Ensure that exactly `count` matching containers exist and are
- running. If any associated settings have been changed (volumes,
- ports or so on), restart those containers.
- '''
-
- containers.refresh()
-
- for container in manager.get_differing_containers():
- manager.stop_containers([container])
- manager.remove_containers([container])
-
- started(manager, containers, count, name)
-
-
-def restarted(manager, containers, count, name):
- '''
- Ensure that exactly `count` matching containers exist and are
- running. Unconditionally restart any that were already running.
- '''
-
- containers.refresh()
-
- for container in manager.get_differing_containers():
- manager.stop_containers([container])
- manager.remove_containers([container])
-
- containers.refresh()
-
- manager.restart_containers(containers.running)
- started(manager, containers, count, name)
-
-
-def stopped(manager, containers, count, name):
- '''Stop any matching containers that are running.'''
-
- containers.refresh()
-
- manager.stop_containers(containers.running)
- containers.notice_changed(manager.get_inspect_containers(containers.running))
-
-
-def killed(manager, containers, count, name):
- '''Kill any matching containers that are running.'''
-
- containers.refresh()
-
- manager.kill_containers(containers.running)
- containers.notice_changed(manager.get_inspect_containers(containers.running))
-
-
-def absent(manager, containers, count, name):
- '''Stop and remove any matching containers.'''
-
- containers.refresh()
-
- manager.stop_containers(containers.running)
- containers.notice_changed(manager.get_inspect_containers(containers.deployed))
- manager.remove_containers(containers.deployed)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- count=dict(type='int', default=1),
- image=dict(type='str', required=True),
- pull=dict(ttpe='str', default='missing', choices=['always', 'missing']),
- entrypoint=dict(type='str'),
- command=dict(type='str'),
- expose=dict(type='list'),
- ports=dict(type='list'),
- publish_all_ports=dict(type='bool', default=False),
- volumes=dict(type='list'),
- volumes_from=dict(type='list'),
- links=dict(type='list'),
- devices=dict(type='list'),
- memory_limit=dict(type='str', default=0),
- memory_swap=dict(type='int', default=0),
- cpu_shares=dict(type='int', default=0),
- docker_url=dict(type='str'),
- use_tls=dict(type='str', choices=['encrypt', 'no', 'verify']),
- tls_client_cert=dict(type='path'),
- tls_client_key=dict(type='path'),
- tls_ca_cert=dict(type='path'),
- tls_hostname=dict(type='str'),
- docker_api_version=dict(type='str', default=DEFAULT_DOCKER_API_VERSION),
- docker_user=dict(type='str'),
- username=dict(type='str',),
- password=dict(type='str', no_log=True),
- email=dict(type='str'),
- registry=dict(type='str'),
- hostname=dict(type='str'),
- domainname=dict(type='str'),
- env=dict(type='dict'),
- env_file=dict(type='str'),
- dns=dict(type='list'),
- detach=dict(type='bool', default=True),
- state=dict(type='str', default='started', choices=['absent', 'killed', 'present', 'reloaded', 'restarted', 'running', 'started', 'stopped']),
- signal=dict(type='str'),
- restart_policy=dict(type='str', choices=['always', 'no', 'on-failure', 'unless-stopped']),
- restart_policy_retry=dict(type='int', default=0),
- extra_hosts=dict(type='dict'),
- debug=dict(type='bool', default=False),
- privileged=dict(type='bool', default=False),
- stdin_open=dict(type='bool', default=False),
- tty=dict(type='bool', default=False),
- lxc_conf=dict(type='list'),
- name=dict(type='str'),
- net=dict(type='str'),
- pid=dict(type='str'),
- insecure_registry=dict(type='bool', default=False),
- log_driver=dict(type='str', choices=['awslogs', 'fluentd', 'gelf', 'journald', 'json-file', 'none', 'syslog']),
- log_opt=dict(type='dict'),
- cpu_set=dict(type='str'),
- cap_add=dict(type='list'),
- cap_drop=dict(type='list'),
- read_only=dict(type='bool'),
- labels=dict(type='dict', default={}),
- stop_timeout=dict(type='int', default=10),
- timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS),
- ulimits=dict(type='list'),
- ),
- required_together=(
- ['tls_client_cert', 'tls_client_key'],
- ),
- )
-
- check_dependencies(module)
-
- try:
- manager = DockerManager(module)
- count = module.params.get('count')
- name = module.params.get('name')
- pull = module.params.get('pull')
-
- state = module.params.get('state')
- if state == 'running':
- # Renamed running to started in 1.9
- state = 'started'
-
- if count < 0:
- module.fail_json(msg="Count must be greater than zero")
-
- if count > 1 and name:
- module.fail_json(msg="Count and name must not be used together")
-
- # Explicitly pull new container images, if requested. Do this before
- # noticing running and deployed containers so that the image names
- # will differ if a newer image has been pulled.
- # Missing images should be pulled first to avoid downtime when old
- # container is stopped, but image for new one is now downloaded yet.
- # It also prevents removal of running container before realizing
- # that requested image cannot be retrieved.
- if pull == "always" or (state == 'reloaded' and manager.get_inspect_image() is None):
- manager.pull_image()
-
- containers = ContainerSet(manager)
-
- if state == 'present':
- present(manager, containers, count, name)
- elif state == 'started':
- started(manager, containers, count, name)
- elif state == 'reloaded':
- reloaded(manager, containers, count, name)
- elif state == 'restarted':
- restarted(manager, containers, count, name)
- elif state == 'stopped':
- stopped(manager, containers, count, name)
- elif state == 'killed':
- killed(manager, containers, count, name)
- elif state == 'absent':
- absent(manager, containers, count, name)
- else:
- module.fail_json(msg='Unrecognized state %s. Must be one of: '
- 'present; started; reloaded; restarted; '
- 'stopped; killed; absent.' % state)
-
- module.exit_json(changed=manager.has_changed(),
- msg=manager.get_summary_message(),
- summary=manager.counters,
- reload_reasons=manager.get_reload_reason_message(),
- ansible_facts=_ansible_facts(containers.changed))
-
- except DockerAPIError as e:
- module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation)
-
- except RequestException as e:
- module.fail_json(changed=manager.has_changed(), msg=repr(e))
-
+from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
- main()
+ removed_module()
diff --git a/lib/ansible/modules/clustering/k8s/_kubernetes.py b/lib/ansible/modules/clustering/k8s/_kubernetes.py
index 4106399482..5e57e69e71 100755
--- a/lib/ansible/modules/clustering/k8s/_kubernetes.py
+++ b/lib/ansible/modules/clustering/k8s/_kubernetes.py
@@ -14,7 +14,10 @@ DOCUMENTATION = '''
---
module: kubernetes
version_added: "2.1"
-deprecated: In 2.5 use M(k8s_raw) instead.
+deprecated:
+ removed_in: "2.9"
+ why: This module used the oc command line tool, where as M(k8s_raw) goes over the REST API.
+ alternative: Use M(k8s_raw) instead.
short_description: Manage Kubernetes resources
description:
- This module can manage Kubernetes resources on an existing cluster using
diff --git a/lib/ansible/modules/clustering/openshift/_oc.py b/lib/ansible/modules/clustering/openshift/_oc.py
index c76ae8ac5b..3dd8d5e1ee 100644
--- a/lib/ansible/modules/clustering/openshift/_oc.py
+++ b/lib/ansible/modules/clustering/openshift/_oc.py
@@ -17,7 +17,10 @@ ANSIBLE_METADATA = {
DOCUMENTATION = """
author:
- "Kenneth D. Evensen (@kevensen)"
-deprecated: In 2.5 use M(openshift_raw) instead.
+deprecated:
+ removed_in: "2.9"
+ why: This module used the oc command line tool, where as M(openshift_raw) goes over the REST API.
+ alternative: Use M(openshift_raw) instead.
description:
- This module allows management of resources in an OpenShift cluster. The
inventory host can be any host with network connectivity to the OpenShift
diff --git a/lib/ansible/modules/network/citrix/_netscaler.py b/lib/ansible/modules/network/citrix/_netscaler.py
index c069d6cd24..b8bf36bf34 100644
--- a/lib/ansible/modules/network/citrix/_netscaler.py
+++ b/lib/ansible/modules/network/citrix/_netscaler.py
@@ -18,7 +18,10 @@ version_added: "1.1"
short_description: Manages Citrix NetScaler entities
description:
- Manages Citrix NetScaler server and service entities.
-deprecated: In 2.4 use M(netscaler_service) and M(netscaler_server) instead.
+deprecated:
+ removed_in: "2.8"
+ why: Replaced with Citrix maintained version.
+ alternative: Use M(netscaler_service) and M(netscaler_server) instead.
options:
nsc_host:
description:
diff --git a/lib/ansible/modules/network/cumulus/_cl_bond.py b/lib/ansible/modules/network/cumulus/_cl_bond.py
index d2db8306af..fed468d30d 100644
--- a/lib/ansible/modules/network/cumulus/_cl_bond.py
+++ b/lib/ansible/modules/network/cumulus/_cl_bond.py
@@ -19,7 +19,10 @@ module: cl_bond
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configures a bond port on Cumulus Linux
-deprecated: Deprecated in 2.3. Use M(nclu) instead.
+deprecated:
+ removed_in: "2.5"
+ why: The M(nclu) module is designed to be easier to use for individuals who are new to Cumulus Linux by exposing the NCLU interface in an automatable way.
+ alternative: Use M(nclu) instead.
description:
- Configures a bond interface on Cumulus Linux To configure a bridge port
use the cl_bridge module. To configure any other type of interface use the
@@ -209,281 +212,7 @@ msg:
sample: "interface bond0 config updated"
'''
-import os
-import re
-import tempfile
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-# handy helper for calling system calls.
-# calls AnsibleModule.run_command and prints a more appropriate message
-# exec_path - path to file to execute, with all its arguments.
-# E.g "/sbin/ip -o link show"
-# failure_msg - what message to print on failure
-def run_cmd(module, exec_path):
- (_rc, out, _err) = module.run_command(exec_path)
- if _rc > 0:
- if re.search('cannot find interface', _err):
- return '[{}]'
- failure_msg = "Failed; %s Error: %s" % (exec_path, _err)
- module.fail_json(msg=failure_msg)
- else:
- return out
-
-
-def current_iface_config(module):
- # due to a bug in ifquery, have to check for presence of interface file
- # and not rely solely on ifquery. when bug is fixed, this check can be
- # removed
- _ifacename = module.params.get('name')
- _int_dir = module.params.get('location')
- module.custom_current_config = {}
- if os.path.exists(_int_dir + '/' + _ifacename):
- _cmd = "/sbin/ifquery -o json %s" % (module.params.get('name'))
- module.custom_current_config = module.from_json(
- run_cmd(module, _cmd))[0]
-
-
-def build_address(module):
- # if addr_method == 'dhcp', don't add IP address
- if module.params.get('addr_method') == 'dhcp':
- return
- _ipv4 = module.params.get('ipv4')
- _ipv6 = module.params.get('ipv6')
- _addresslist = []
- if _ipv4 and len(_ipv4) > 0:
- _addresslist += _ipv4
-
- if _ipv6 and len(_ipv6) > 0:
- _addresslist += _ipv6
- if len(_addresslist) > 0:
- module.custom_desired_config['config']['address'] = ' '.join(
- _addresslist)
-
-
-def build_vids(module):
- _vids = module.params.get('vids')
- if _vids and len(_vids) > 0:
- module.custom_desired_config['config']['bridge-vids'] = ' '.join(_vids)
-
-
-def build_pvid(module):
- _pvid = module.params.get('pvid')
- if _pvid:
- module.custom_desired_config['config']['bridge-pvid'] = str(_pvid)
-
-
-def conv_bool_to_str(_value):
- if isinstance(_value, bool):
- if _value is True:
- return 'yes'
- else:
- return 'no'
- return _value
-
-
-def conv_array_to_str(_value):
- if isinstance(_value, list):
- return ' '.join(_value)
- return _value
-
-
-def build_generic_attr(module, _attr):
- _value = module.params.get(_attr)
- _value = conv_bool_to_str(_value)
- _value = conv_array_to_str(_value)
- if _value:
- module.custom_desired_config['config'][
- re.sub('_', '-', _attr)] = str(_value)
-
-
-def build_alias_name(module):
- alias_name = module.params.get('alias_name')
- if alias_name:
- module.custom_desired_config['config']['alias'] = alias_name
-
-
-def build_addr_method(module):
- _addr_method = module.params.get('addr_method')
- if _addr_method:
- module.custom_desired_config['addr_family'] = 'inet'
- module.custom_desired_config['addr_method'] = _addr_method
-
-
-def build_vrr(module):
- _virtual_ip = module.params.get('virtual_ip')
- _virtual_mac = module.params.get('virtual_mac')
- vrr_config = []
- if _virtual_ip:
- vrr_config.append(_virtual_mac)
- vrr_config.append(_virtual_ip)
- module.custom_desired_config.get('config')['address-virtual'] = \
- ' '.join(vrr_config)
-
-
-def add_glob_to_array(_bondmems):
- """
- goes through each bond member if it sees a dash add glob
- before it
- """
- result = []
- if isinstance(_bondmems, list):
- for _entry in _bondmems:
- if re.search('-', _entry):
- _entry = 'glob ' + _entry
- result.append(_entry)
- return ' '.join(result)
- return _bondmems
-
-
-def build_bond_attr(module, _attr):
- _value = module.params.get(_attr)
- _value = conv_bool_to_str(_value)
- _value = add_glob_to_array(_value)
- if _value:
- module.custom_desired_config['config'][
- 'bond-' + re.sub('_', '-', _attr)] = str(_value)
-
-
-def build_desired_iface_config(module):
- """
- take parameters defined and build ifupdown2 compatible hash
- """
- module.custom_desired_config = {
- 'addr_family': None,
- 'auto': True,
- 'config': {},
- 'name': module.params.get('name')
- }
-
- for _attr in ['slaves', 'mode', 'xmit_hash_policy',
- 'miimon', 'lacp_rate', 'lacp_bypass_allow',
- 'lacp_bypass_period', 'lacp_bypass_all_active',
- 'min_links']:
- build_bond_attr(module, _attr)
-
- build_addr_method(module)
- build_address(module)
- build_vids(module)
- build_pvid(module)
- build_alias_name(module)
- build_vrr(module)
-
- for _attr in ['mtu', 'mstpctl_portnetwork', 'mstpctl_portadminedge'
- 'mstpctl_bpduguard', 'clag_id',
- 'lacp_bypass_priority']:
- build_generic_attr(module, _attr)
-
-
-def config_dict_changed(module):
- """
- return true if 'config' dict in hash is different
- between desired and current config
- """
- current_config = module.custom_current_config.get('config')
- desired_config = module.custom_desired_config.get('config')
- return current_config != desired_config
-
-
-def config_changed(module):
- """
- returns true if config has changed
- """
- if config_dict_changed(module):
- return True
- # check if addr_method is changed
- return module.custom_desired_config.get('addr_method') != \
- module.custom_current_config.get('addr_method')
-
-
-def replace_config(module):
- temp = tempfile.NamedTemporaryFile()
- desired_config = module.custom_desired_config
- # by default it will be something like /etc/network/interfaces.d/swp1
- final_location = module.params.get('location') + '/' + \
- module.params.get('name')
- final_text = ''
- _fh = open(final_location, 'w')
- # make sure to put hash in array or else ifquery will fail
- # write to temp file
- try:
- temp.write(module.jsonify([desired_config]))
- # need to seek to 0 so that data is written to tempfile.
- temp.seek(0)
- _cmd = "/sbin/ifquery -a -i %s -t json" % (temp.name)
- final_text = run_cmd(module, _cmd)
- finally:
- temp.close()
-
- try:
- _fh.write(final_text)
- finally:
- _fh.close()
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- slaves=dict(required=True, type='list'),
- name=dict(required=True, type='str'),
- ipv4=dict(type='list'),
- ipv6=dict(type='list'),
- alias_name=dict(type='str'),
- addr_method=dict(type='str',
- choices=['', 'dhcp']),
- mtu=dict(type='str'),
- virtual_ip=dict(type='str'),
- virtual_mac=dict(type='str'),
- vids=dict(type='list'),
- pvid=dict(type='str'),
- mstpctl_portnetwork=dict(type='bool'),
- mstpctl_portadminedge=dict(type='bool'),
- mstpctl_bpduguard=dict(type='bool'),
- clag_id=dict(type='str'),
- min_links=dict(type='int', default=1),
- mode=dict(type='str', default='802.3ad'),
- miimon=dict(type='int', default=100),
- xmit_hash_policy=dict(type='str', default='layer3+4'),
- lacp_rate=dict(type='int', default=1),
- lacp_bypass_allow=dict(type='int', choices=[0, 1]),
- lacp_bypass_all_active=dict(type='int', choices=[0, 1]),
- lacp_bypass_priority=dict(type='list'),
- lacp_bypass_period=dict(type='int'),
- location=dict(type='str',
- default='/etc/network/interfaces.d')
- ),
- mutually_exclusive=[['lacp_bypass_priority', 'lacp_bypass_all_active']],
- required_together=[['virtual_ip', 'virtual_mac']]
- )
-
- # if using the jinja default filter, this resolves to
- # create an list with an empty string ['']. The following
- # checks all lists and removes it, so that functions expecting
- # an empty list, get this result. May upstream this fix into
- # the AnsibleModule code to have it check for this.
- for k, _param in module.params.items():
- if isinstance(_param, list):
- module.params[k] = [x for x in _param if x]
-
- _location = module.params.get('location')
- if not os.path.exists(_location):
- _msg = "%s does not exist." % (_location)
- module.fail_json(msg=_msg)
- return # for testing purposes only
-
- ifacename = module.params.get('name')
- _changed = False
- _msg = "interface %s config not changed" % (ifacename)
- current_iface_config(module)
- build_desired_iface_config(module)
- if config_changed(module):
- replace_config(module)
- _msg = "interface %s config updated" % (ifacename)
- _changed = True
-
- module.exit_json(changed=_changed, msg=_msg)
-
+from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
- main()
+ removed_module()
diff --git a/lib/ansible/modules/network/cumulus/_cl_bridge.py b/lib/ansible/modules/network/cumulus/_cl_bridge.py
index 5c9af56b2e..00b498d5e9 100644
--- a/lib/ansible/modules/network/cumulus/_cl_bridge.py
+++ b/lib/ansible/modules/network/cumulus/_cl_bridge.py
@@ -19,7 +19,10 @@ module: cl_bridge
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configures a bridge port on Cumulus Linux
-deprecated: Deprecated in 2.3. Use M(nclu) instead.
+deprecated:
+ removed_in: "2.5"
+ why: The M(nclu) module is designed to be easier to use for individuals who are new to Cumulus Linux by exposing the NCLU interface in an automatable way.
+ alternative: Use M(nclu) instead.
description:
- Configures a bridge interface on Cumulus Linux To configure a bond port
use the cl_bond module. To configure any other type of interface use the
@@ -157,258 +160,7 @@ msg:
sample: "interface bond0 config updated"
'''
-import os
-import re
-import tempfile
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-# handy helper for calling system calls.
-# calls AnsibleModule.run_command and prints a more appropriate message
-# exec_path - path to file to execute, with all its arguments.
-# E.g "/sbin/ip -o link show"
-# failure_msg - what message to print on failure
-def run_cmd(module, exec_path):
- (_rc, out, _err) = module.run_command(exec_path)
- if _rc > 0:
- if re.search('cannot find interface', _err):
- return '[{}]'
- failure_msg = "Failed; %s Error: %s" % (exec_path, _err)
- module.fail_json(msg=failure_msg)
- else:
- return out
-
-
-def current_iface_config(module):
- # due to a bug in ifquery, have to check for presence of interface file
- # and not rely solely on ifquery. when bug is fixed, this check can be
- # removed
- _ifacename = module.params.get('name')
- _int_dir = module.params.get('location')
- module.custom_current_config = {}
- if os.path.exists(_int_dir + '/' + _ifacename):
- _cmd = "/sbin/ifquery -o json %s" % (module.params.get('name'))
- module.custom_current_config = module.from_json(
- run_cmd(module, _cmd))[0]
-
-
-def build_address(module):
- # if addr_method == 'dhcp', don't add IP address
- if module.params.get('addr_method') == 'dhcp':
- return
- _ipv4 = module.params.get('ipv4')
- _ipv6 = module.params.get('ipv6')
- _addresslist = []
- if _ipv4 and len(_ipv4) > 0:
- _addresslist += _ipv4
-
- if _ipv6 and len(_ipv6) > 0:
- _addresslist += _ipv6
- if len(_addresslist) > 0:
- module.custom_desired_config['config']['address'] = ' '.join(
- _addresslist)
-
-
-def build_vids(module):
- _vids = module.params.get('vids')
- if _vids and len(_vids) > 0:
- module.custom_desired_config['config']['bridge-vids'] = ' '.join(_vids)
-
-
-def build_pvid(module):
- _pvid = module.params.get('pvid')
- if _pvid:
- module.custom_desired_config['config']['bridge-pvid'] = str(_pvid)
-
-
-def conv_bool_to_str(_value):
- if isinstance(_value, bool):
- if _value is True:
- return 'yes'
- else:
- return 'no'
- return _value
-
-
-def build_generic_attr(module, _attr):
- _value = module.params.get(_attr)
- _value = conv_bool_to_str(_value)
- if _value:
- module.custom_desired_config['config'][
- re.sub('_', '-', _attr)] = str(_value)
-
-
-def build_alias_name(module):
- alias_name = module.params.get('alias_name')
- if alias_name:
- module.custom_desired_config['config']['alias'] = alias_name
-
-
-def build_addr_method(module):
- _addr_method = module.params.get('addr_method')
- if _addr_method:
- module.custom_desired_config['addr_family'] = 'inet'
- module.custom_desired_config['addr_method'] = _addr_method
-
-
-def build_vrr(module):
- _virtual_ip = module.params.get('virtual_ip')
- _virtual_mac = module.params.get('virtual_mac')
- vrr_config = []
- if _virtual_ip:
- vrr_config.append(_virtual_mac)
- vrr_config.append(_virtual_ip)
- module.custom_desired_config.get('config')['address-virtual'] = \
- ' '.join(vrr_config)
-
-
-def add_glob_to_array(_bridgemems):
- """
- goes through each bridge member if it sees a dash add glob
- before it
- """
- result = []
- if isinstance(_bridgemems, list):
- for _entry in _bridgemems:
- if re.search('-', _entry):
- _entry = 'glob ' + _entry
- result.append(_entry)
- return ' '.join(result)
- return _bridgemems
-
-
-def build_bridge_attr(module, _attr):
- _value = module.params.get(_attr)
- _value = conv_bool_to_str(_value)
- _value = add_glob_to_array(_value)
- if _value:
- module.custom_desired_config['config'][
- 'bridge-' + re.sub('_', '-', _attr)] = str(_value)
-
-
-def build_desired_iface_config(module):
- """
- take parameters defined and build ifupdown2 compatible hash
- """
- module.custom_desired_config = {
- 'addr_family': None,
- 'auto': True,
- 'config': {},
- 'name': module.params.get('name')
- }
-
- for _attr in ['vlan_aware', 'pvid', 'ports', 'stp']:
- build_bridge_attr(module, _attr)
-
- build_addr_method(module)
- build_address(module)
- build_vids(module)
- build_alias_name(module)
- build_vrr(module)
- for _attr in ['mtu', 'mstpctl_treeprio']:
- build_generic_attr(module, _attr)
-
-
-def config_dict_changed(module):
- """
- return true if 'config' dict in hash is different
- between desired and current config
- """
- current_config = module.custom_current_config.get('config')
- desired_config = module.custom_desired_config.get('config')
- return current_config != desired_config
-
-
-def config_changed(module):
- """
- returns true if config has changed
- """
- if config_dict_changed(module):
- return True
- # check if addr_method is changed
- return module.custom_desired_config.get('addr_method') != \
- module.custom_current_config.get('addr_method')
-
-
-def replace_config(module):
- temp = tempfile.NamedTemporaryFile()
- desired_config = module.custom_desired_config
- # by default it will be something like /etc/network/interfaces.d/swp1
- final_location = module.params.get('location') + '/' + \
- module.params.get('name')
- final_text = ''
- _fh = open(final_location, 'w')
- # make sure to put hash in array or else ifquery will fail
- # write to temp file
- try:
- temp.write(module.jsonify([desired_config]))
- # need to seek to 0 so that data is written to tempfile.
- temp.seek(0)
- _cmd = "/sbin/ifquery -a -i %s -t json" % (temp.name)
- final_text = run_cmd(module, _cmd)
- finally:
- temp.close()
-
- try:
- _fh.write(final_text)
- finally:
- _fh.close()
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- ports=dict(required=True, type='list'),
- name=dict(required=True, type='str'),
- ipv4=dict(type='list'),
- ipv6=dict(type='list'),
- alias_name=dict(type='str'),
- addr_method=dict(type='str',
- choices=['', 'dhcp']),
- mtu=dict(type='str'),
- virtual_ip=dict(type='str'),
- virtual_mac=dict(type='str'),
- vids=dict(type='list'),
- pvid=dict(type='str'),
- mstpctl_treeprio=dict(type='str'),
- vlan_aware=dict(type='bool'),
- stp=dict(type='bool', default='yes'),
- location=dict(type='str',
- default='/etc/network/interfaces.d')
- ),
- required_together=[
- ['virtual_ip', 'virtual_mac']
- ]
- )
-
- # if using the jinja default filter, this resolves to
- # create an list with an empty string ['']. The following
- # checks all lists and removes it, so that functions expecting
- # an empty list, get this result. May upstream this fix into
- # the AnsibleModule code to have it check for this.
- for k, _param in module.params.items():
- if isinstance(_param, list):
- module.params[k] = [x for x in _param if x]
-
- _location = module.params.get('location')
- if not os.path.exists(_location):
- _msg = "%s does not exist." % (_location)
- module.fail_json(msg=_msg)
- return # for testing purposes only
-
- ifacename = module.params.get('name')
- _changed = False
- _msg = "interface %s config not changed" % (ifacename)
- current_iface_config(module)
- build_desired_iface_config(module)
- if config_changed(module):
- replace_config(module)
- _msg = "interface %s config updated" % (ifacename)
- _changed = True
-
- module.exit_json(changed=_changed, msg=_msg)
-
+from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
- main()
+ removed_module()
diff --git a/lib/ansible/modules/network/cumulus/_cl_img_install.py b/lib/ansible/modules/network/cumulus/_cl_img_install.py
index 2265847537..7342c70abb 100644
--- a/lib/ansible/modules/network/cumulus/_cl_img_install.py
+++ b/lib/ansible/modules/network/cumulus/_cl_img_install.py
@@ -19,7 +19,10 @@ module: cl_img_install
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Install a different Cumulus Linux version.
-deprecated: Deprecated in 2.3. The image slot system no longer exists in Cumulus Linux.
+deprecated:
+ removed_in: "2.5"
+ why: The image slot system no longer exists in Cumulus Linux.
+ alternative: n/a
description:
- install a different version of Cumulus Linux in the inactive slot. For
more details go the Image Management User Guide at
@@ -103,213 +106,7 @@ msg:
sample: "interface bond0 config updated"
'''
-import re
-
-from ansible.module_utils.basic import AnsibleModule, platform
-from ansible.module_utils.six.moves.urllib import parse as urlparse
-
-
-def check_url(module, url):
- parsed_url = urlparse(url)
- if len(parsed_url.path) > 0:
- sch = parsed_url.scheme
- if (sch == 'http' or sch == 'https' or len(parsed_url.scheme) == 0):
- return True
- module.fail_json(msg="Image Path URL. Wrong Format %s" % (url))
- return False
-
-
-def run_cl_cmd(module, cmd, check_rc=True):
- try:
- (rc, out, err) = module.run_command(cmd, check_rc=check_rc)
- except Exception as e:
- module.fail_json(msg=e.strerror)
- # trim last line as it is always empty
- ret = out.splitlines()
- return ret
-
-
-def get_slot_info(module):
- slots = {}
- slots['1'] = {}
- slots['2'] = {}
- active_slotnum = get_active_slot(module)
- primary_slotnum = get_primary_slot_num(module)
- for _num in range(1, 3):
- slot = slots[str(_num)]
- slot['version'] = get_slot_version(module, str(_num))
- if _num == int(active_slotnum):
- slot['active'] = True
- if _num == int(primary_slotnum):
- slot['primary'] = True
- return slots
-
-
-def get_slot_version(module, slot_num):
- lsb_release = check_mnt_root_lsb_release(slot_num)
- switch_firm_ver = check_fw_print_env(module, slot_num)
- _version = module.sw_version
- if lsb_release == _version or switch_firm_ver == _version:
- return _version
- elif lsb_release:
- return lsb_release
- else:
- return switch_firm_ver
-
-
-def check_mnt_root_lsb_release(slot_num):
- _path = '/mnt/root-rw/config%s/etc/lsb-release' % (slot_num)
- try:
- lsb_release = open(_path)
- lines = lsb_release.readlines()
- for line in lines:
- _match = re.search('DISTRIB_RELEASE=([0-9a-zA-Z.]+)', line)
- if _match:
- return _match.group(1).split('-')[0]
- except:
- pass
- return None
-
-
-def check_fw_print_env(module, slot_num):
- cmd = None
- if platform.machine() == 'ppc':
- cmd = "/usr/sbin/fw_printenv -n cl.ver%s" % (slot_num)
- fw_output = run_cl_cmd(module, cmd)
- return fw_output[0].split('-')[0]
- elif platform.machine() == 'x86_64':
- cmd = "/usr/bin/grub-editenv list"
- grub_output = run_cl_cmd(module, cmd)
- for _line in grub_output:
- _regex_str = re.compile('cl.ver' + slot_num + r'=([\w.]+)-')
- m0 = re.match(_regex_str, _line)
- if m0:
- return m0.group(1)
-
-
-def get_primary_slot_num(module):
- cmd = None
- if platform.machine() == 'ppc':
- cmd = "/usr/sbin/fw_printenv -n cl.active"
- return ''.join(run_cl_cmd(module, cmd))
- elif platform.machine() == 'x86_64':
- cmd = "/usr/bin/grub-editenv list"
- grub_output = run_cl_cmd(module, cmd)
- for _line in grub_output:
- _regex_str = re.compile(r'cl.active=(\d)')
- m0 = re.match(_regex_str, _line)
- if m0:
- return m0.group(1)
-
-
-def get_active_slot(module):
- try:
- cmdline = open('/proc/cmdline').readline()
- except:
- module.fail_json(msg='Failed to open /proc/cmdline. ' +
- 'Unable to determine active slot')
-
- _match = re.search(r'active=(\d+)', cmdline)
- if _match:
- return _match.group(1)
- return None
-
-
-def install_img(module):
- src = module.params.get('src')
- _version = module.sw_version
- app_path = '/usr/cumulus/bin/cl-img-install -f %s' % (src)
- run_cl_cmd(module, app_path)
- perform_switch_slot = module.params.get('switch_slot')
- if perform_switch_slot is True:
- check_sw_version(module)
- else:
- _changed = True
- _msg = "Cumulus Linux Version " + _version + " successfully" + \
- " installed in alternate slot"
- module.exit_json(changed=_changed, msg=_msg)
-
-
-def switch_slot(module, slotnum):
- _switch_slot = module.params.get('switch_slot')
- if _switch_slot is True:
- app_path = '/usr/cumulus/bin/cl-img-select %s' % (slotnum)
- run_cl_cmd(module, app_path)
-
-
-def determine_sw_version(module):
- _version = module.params.get('version')
- _filename = ''
- # Use _version if user defines it
- if _version:
- module.sw_version = _version
- return
- else:
- _filename = module.params.get('src').split('/')[-1]
- _match = re.search(r'\d+\W\d+\W\w+', _filename)
- if _match:
- module.sw_version = re.sub(r'\W', '.', _match.group())
- return
- _msg = 'Unable to determine version from file %s' % (_filename)
- module.exit_json(changed=False, msg=_msg)
-
-
-def check_sw_version(module):
- slots = get_slot_info(module)
- _version = module.sw_version
- perform_switch_slot = module.params.get('switch_slot')
- for _num, slot in slots.items():
- if slot['version'] == _version:
- if 'active' in slot:
- _msg = "Version %s is installed in the active slot" \
- % (_version)
- module.exit_json(changed=False, msg=_msg)
- else:
- _msg = "Version " + _version + \
- " is installed in the alternate slot. "
- if 'primary' not in slot:
- if perform_switch_slot is True:
- switch_slot(module, _num)
- _msg = _msg + \
- "cl-img-select has made the alternate " + \
- "slot the primary slot. " +\
- "Next reboot, switch will load " + _version + "."
- module.exit_json(changed=True, msg=_msg)
- else:
- _msg = _msg + \
- "Next reboot will not load " + _version + ". " + \
- "switch_slot keyword set to 'no'."
- module.exit_json(changed=False, msg=_msg)
- else:
- if perform_switch_slot is True:
- _msg = _msg + \
- "Next reboot, switch will load " + _version + "."
- module.exit_json(changed=False, msg=_msg)
- else:
- _msg = _msg + \
- 'switch_slot set to "no". ' + \
- 'No further action to take'
- module.exit_json(changed=False, msg=_msg)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- src=dict(required=True, type='str'),
- version=dict(type='str'),
- switch_slot=dict(type='bool', default=False),
- ),
- )
-
- determine_sw_version(module)
- _url = module.params.get('src')
-
- check_sw_version(module)
-
- check_url(module, _url)
-
- install_img(module)
-
+from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
- main()
+ removed_module()
diff --git a/lib/ansible/modules/network/cumulus/_cl_interface.py b/lib/ansible/modules/network/cumulus/_cl_interface.py
index e6499685c4..153529716e 100644
--- a/lib/ansible/modules/network/cumulus/_cl_interface.py
+++ b/lib/ansible/modules/network/cumulus/_cl_interface.py
@@ -20,7 +20,10 @@ version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configures a front panel port, loopback or
management port on Cumulus Linux.
-deprecated: Deprecated in 2.3. Use M(nclu) instead.
+deprecated:
+ removed_in: "2.5"
+ why: The M(nclu) module is designed to be easier to use for individuals who are new to Cumulus Linux by exposing the NCLU interface in an automatable way.
+ alternative: Use M(nclu) instead.
description:
- Configures a front panel, sub-interface, SVI, management or loopback port
on a Cumulus Linux switch. For bridge ports use the cl_bridge module. For
@@ -202,249 +205,7 @@ msg:
sample: "interface bond0 config updated"
'''
-import os
-import re
-import tempfile
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-# handy helper for calling system calls.
-# calls AnsibleModule.run_command and prints a more appropriate message
-# exec_path - path to file to execute, with all its arguments.
-# E.g "/sbin/ip -o link show"
-# failure_msg - what message to print on failure
-def run_cmd(module, exec_path):
- (_rc, out, _err) = module.run_command(exec_path)
- if _rc > 0:
- if re.search('cannot find interface', _err):
- return '[{}]'
- failure_msg = "Failed; %s Error: %s" % (exec_path, _err)
- module.fail_json(msg=failure_msg)
- else:
- return out
-
-
-def current_iface_config(module):
- # due to a bug in ifquery, have to check for presence of interface file
- # and not rely solely on ifquery. when bug is fixed, this check can be
- # removed
- _ifacename = module.params.get('name')
- _int_dir = module.params.get('location')
- module.custom_current_config = {}
- if os.path.exists(_int_dir + '/' + _ifacename):
- _cmd = "/sbin/ifquery -o json %s" % (module.params.get('name'))
- module.custom_current_config = module.from_json(
- run_cmd(module, _cmd))[0]
-
-
-def build_address(module):
- # if addr_method == 'dhcp', don't add IP address
- if module.params.get('addr_method') == 'dhcp':
- return
- _ipv4 = module.params.get('ipv4')
- _ipv6 = module.params.get('ipv6')
- _addresslist = []
- if _ipv4 and len(_ipv4) > 0:
- _addresslist += _ipv4
- if _ipv6 and len(_ipv6) > 0:
- _addresslist += _ipv6
- if len(_addresslist) > 0:
- module.custom_desired_config['config']['address'] = ' '.join(
- _addresslist)
-
-
-def build_vids(module):
- _vids = module.params.get('vids')
- if _vids and len(_vids) > 0:
- module.custom_desired_config['config']['bridge-vids'] = ' '.join(_vids)
-
-
-def build_pvid(module):
- _pvid = module.params.get('pvid')
- if _pvid:
- module.custom_desired_config['config']['bridge-pvid'] = str(_pvid)
-
-
-def build_speed(module):
- _speed = module.params.get('speed')
- if _speed:
- module.custom_desired_config['config']['link-speed'] = str(_speed)
- module.custom_desired_config['config']['link-duplex'] = 'full'
-
-
-def conv_bool_to_str(_value):
- if isinstance(_value, bool):
- if _value is True:
- return 'yes'
- else:
- return 'no'
- return _value
-
-
-def build_generic_attr(module, _attr):
- _value = module.params.get(_attr)
- _value = conv_bool_to_str(_value)
- if _value:
- module.custom_desired_config['config'][
- re.sub('_', '-', _attr)] = str(_value)
-
-
-def build_alias_name(module):
- alias_name = module.params.get('alias_name')
- if alias_name:
- module.custom_desired_config['config']['alias'] = alias_name
-
-
-def build_addr_method(module):
- _addr_method = module.params.get('addr_method')
- if _addr_method:
- module.custom_desired_config['addr_family'] = 'inet'
- module.custom_desired_config['addr_method'] = _addr_method
-
-
-def build_vrr(module):
- _virtual_ip = module.params.get('virtual_ip')
- _virtual_mac = module.params.get('virtual_mac')
- vrr_config = []
- if _virtual_ip:
- vrr_config.append(_virtual_mac)
- vrr_config.append(_virtual_ip)
- module.custom_desired_config.get('config')['address-virtual'] = \
- ' '.join(vrr_config)
-
-
-def build_desired_iface_config(module):
- """
- take parameters defined and build ifupdown2 compatible hash
- """
- module.custom_desired_config = {
- 'addr_family': None,
- 'auto': True,
- 'config': {},
- 'name': module.params.get('name')
- }
-
- build_addr_method(module)
- build_address(module)
- build_vids(module)
- build_pvid(module)
- build_speed(module)
- build_alias_name(module)
- build_vrr(module)
- for _attr in ['mtu', 'mstpctl_portnetwork', 'mstpctl_portadminedge',
- 'mstpctl_bpduguard', 'clagd_enable',
- 'clagd_priority', 'clagd_peer_ip',
- 'clagd_sys_mac', 'clagd_args']:
- build_generic_attr(module, _attr)
-
-
-def config_dict_changed(module):
- """
- return true if 'config' dict in hash is different
- between desired and current config
- """
- current_config = module.custom_current_config.get('config')
- desired_config = module.custom_desired_config.get('config')
- return current_config != desired_config
-
-
-def config_changed(module):
- """
- returns true if config has changed
- """
- if config_dict_changed(module):
- return True
- # check if addr_method is changed
- return module.custom_desired_config.get('addr_method') != \
- module.custom_current_config.get('addr_method')
-
-
-def replace_config(module):
- temp = tempfile.NamedTemporaryFile()
- desired_config = module.custom_desired_config
- # by default it will be something like /etc/network/interfaces.d/swp1
- final_location = module.params.get('location') + '/' + \
- module.params.get('name')
- final_text = ''
- _fh = open(final_location, 'w')
- # make sure to put hash in array or else ifquery will fail
- # write to temp file
- try:
- temp.write(module.jsonify([desired_config]))
- # need to seek to 0 so that data is written to tempfile.
- temp.seek(0)
- _cmd = "/sbin/ifquery -a -i %s -t json" % (temp.name)
- final_text = run_cmd(module, _cmd)
- finally:
- temp.close()
-
- try:
- _fh.write(final_text)
- finally:
- _fh.close()
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True, type='str'),
- ipv4=dict(type='list'),
- ipv6=dict(type='list'),
- alias_name=dict(type='str'),
- addr_method=dict(type='str',
- choices=['', 'loopback', 'dhcp']),
- speed=dict(type='str'),
- mtu=dict(type='str'),
- virtual_ip=dict(type='str'),
- virtual_mac=dict(type='str'),
- vids=dict(type='list'),
- pvid=dict(type='str'),
- mstpctl_portnetwork=dict(type='bool'),
- mstpctl_portadminedge=dict(type='bool'),
- mstpctl_bpduguard=dict(type='bool'),
- clagd_enable=dict(type='bool'),
- clagd_priority=dict(type='str'),
- clagd_peer_ip=dict(type='str'),
- clagd_sys_mac=dict(type='str'),
- clagd_args=dict(type='str'),
- location=dict(type='str',
- default='/etc/network/interfaces.d')
- ),
- required_together=[
- ['virtual_ip', 'virtual_mac'],
- ['clagd_enable', 'clagd_priority',
- 'clagd_peer_ip', 'clagd_sys_mac']
- ]
- )
-
- # if using the jinja default filter, this resolves to
- # create an list with an empty string ['']. The following
- # checks all lists and removes it, so that functions expecting
- # an empty list, get this result. May upstream this fix into
- # the AnsibleModule code to have it check for this.
- for k, _param in module.params.items():
- if isinstance(_param, list):
- module.params[k] = [x for x in _param if x]
-
- _location = module.params.get('location')
- if not os.path.exists(_location):
- _msg = "%s does not exist." % (_location)
- module.fail_json(msg=_msg)
- return # for testing purposes only
-
- ifacename = module.params.get('name')
- _changed = False
- _msg = "interface %s config not changed" % (ifacename)
- current_iface_config(module)
- build_desired_iface_config(module)
- if config_changed(module):
- replace_config(module)
- _msg = "interface %s config updated" % (ifacename)
- _changed = True
-
- module.exit_json(changed=_changed, msg=_msg)
-
+from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
- main()
+ removed_module()
diff --git a/lib/ansible/modules/network/cumulus/_cl_interface_policy.py b/lib/ansible/modules/network/cumulus/_cl_interface_policy.py
index bb2e32bc5a..e1049cfc01 100644
--- a/lib/ansible/modules/network/cumulus/_cl_interface_policy.py
+++ b/lib/ansible/modules/network/cumulus/_cl_interface_policy.py
@@ -19,7 +19,10 @@ module: cl_interface_policy
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configure interface enforcement policy on Cumulus Linux
-deprecated: Deprecated in 2.3. Use M(nclu) instead.
+deprecated:
+ removed_in: "2.5"
+ why: The M(nclu) module is designed to be easier to use for individuals who are new to Cumulus Linux by exposing the NCLU interface in an automatable way.
+ alternative: Use M(nclu) instead.
description:
- This module affects the configuration files located in the interfaces
folder defined by ifupdown2. Interfaces port and port ranges listed in the
@@ -64,82 +67,8 @@ msg:
type: string
sample: "interface bond0 config updated"
'''
-import os
-import re
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-# get list of interface files that are currently "configured".
-# doesn't mean actually applied to the system, but most likely are
-def read_current_int_dir(module):
- module.custom_currentportlist = os.listdir(module.params.get('location'))
-
-
-# take the allowed list and convert it to into a list
-# of ports.
-def convert_allowed_list_to_port_range(module):
- allowedlist = module.params.get('allowed')
- for portrange in allowedlist:
- module.custom_allowedportlist += breakout_portrange(portrange)
-
-
-def breakout_portrange(prange):
- _m0 = re.match(r'(\w+[a-z.])(\d+)?-?(\d+)?(\w+)?', prange.strip())
- # no range defined
- if _m0.group(3) is None:
- return [_m0.group(0)]
- else:
- portarray = []
- intrange = range(int(_m0.group(2)), int(_m0.group(3)) + 1)
- for _int in intrange:
- portarray.append(''.join([_m0.group(1),
- str(_int),
- str(_m0.group(4) or '')
- ]
- )
- )
- return portarray
-
-
-# deletes the interface files
-def unconfigure_interfaces(module):
- currentportset = set(module.custom_currentportlist)
- allowedportset = set(module.custom_allowedportlist)
- remove_list = currentportset.difference(allowedportset)
- fileprefix = module.params.get('location')
- module.msg = "remove config for interfaces %s" % (', '.join(remove_list))
- for _file in remove_list:
- os.unlink(fileprefix + _file)
-
-
-# check to see if policy should be enforced
-# returns true if policy needs to be enforced
-# that is delete interface files
-def int_policy_enforce(module):
- currentportset = set(module.custom_currentportlist)
- allowedportset = set(module.custom_allowedportlist)
- return not currentportset.issubset(allowedportset)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- allowed=dict(type='list', required=True),
- location=dict(type='str', default='/etc/network/interfaces.d/')
- ),
- )
- module.custom_currentportlist = []
- module.custom_allowedportlist = []
- module.changed = False
- module.msg = 'configured port list is part of allowed port list'
- read_current_int_dir(module)
- convert_allowed_list_to_port_range(module)
- if int_policy_enforce(module):
- module.changed = True
- unconfigure_interfaces(module)
- module.exit_json(changed=module.changed, msg=module.msg)
+from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
- main()
+ removed_module()
diff --git a/lib/ansible/modules/network/cumulus/_cl_license.py b/lib/ansible/modules/network/cumulus/_cl_license.py
index 175e79fb68..d88d4ad9e1 100644
--- a/lib/ansible/modules/network/cumulus/_cl_license.py
+++ b/lib/ansible/modules/network/cumulus/_cl_license.py
@@ -18,8 +18,11 @@ DOCUMENTATION = '''
module: cl_license
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
-short_description: Install licenses fo Cumulus Linux
-deprecated: Deprecated in 2.3.
+short_description: Install licenses for Cumulus Linux
+deprecated:
+ why: The M(nclu) module is designed to be easier to use for individuals who are new to Cumulus Linux by exposing the NCLU interface in an automatable way.
+ removed_in: "2.5"
+ alternative: Use M(nclu) instead.
description:
- Installs a Cumulus Linux license. The module reports no change of status
when a license is installed.
@@ -100,43 +103,7 @@ msg:
sample: "interface bond0 config updated"
'''
-from ansible.module_utils.basic import AnsibleModule
-
-
-CL_LICENSE_PATH = '/usr/cumulus/bin/cl-license'
-
-
-def install_license(module):
- # license is not installed, install it
- _url = module.params.get('src')
- (_rc, out, _err) = module.run_command("%s -i %s" % (CL_LICENSE_PATH, _url))
- if _rc > 0:
- module.fail_json(msg=_err)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- src=dict(required=True, type='str'),
- force=dict(type='bool', default=False)
- ),
- )
-
- # check if license is installed
- # if force is enabled then set return code to nonzero
- if module.params.get('force') is True:
- _rc = 10
- else:
- (_rc, out, _err) = module.run_command(CL_LICENSE_PATH)
- if _rc == 0:
- module.msg = "No change. License already installed"
- module.changed = False
- else:
- install_license(module)
- module.msg = "License installation completed"
- module.changed = True
- module.exit_json(changed=module.changed, msg=module.msg)
-
+from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
- main()
+ removed_module()
diff --git a/lib/ansible/modules/network/cumulus/_cl_ports.py b/lib/ansible/modules/network/cumulus/_cl_ports.py
index 41e329ae29..e1bf209343 100644
--- a/lib/ansible/modules/network/cumulus/_cl_ports.py
+++ b/lib/ansible/modules/network/cumulus/_cl_ports.py
@@ -19,7 +19,10 @@ module: cl_ports
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configure Cumulus Switch port attributes (ports.conf)
-deprecated: Deprecated in 2.3. Use M(nclu) instead.
+deprecated:
+ removed_in: "2.5"
+ why: The M(nclu) module is designed to be easier to use for individuals who are new to Cumulus Linux by exposing the NCLU interface in an automatable way.
+ alternative: Use M(nclu) instead.
description:
- Set the initial port attribute defined in the Cumulus Linux ports.conf,
file. This module does not do any error checking at the moment. Be careful
@@ -77,139 +80,8 @@ msg:
type: string
sample: "interface bond0 config updated"
'''
-import os
-import re
-import tempfile
-import shutil
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils._text import to_native
-
-
-PORTS_CONF = '/etc/cumulus/ports.conf'
-
-
-def hash_existing_ports_conf(module):
- module.ports_conf_hash = {}
- if not os.path.exists(PORTS_CONF):
- return False
-
- try:
- existing_ports_conf = open(PORTS_CONF).readlines()
- except IOError as e:
- _msg = "Failed to open %s: %s" % (PORTS_CONF, to_native(e))
- module.fail_json(msg=_msg)
- return # for testing only should return on module.fail_json
-
- for _line in existing_ports_conf:
- _m0 = re.match(r'^(\d+)=(\w+)', _line)
- if _m0:
- _portnum = int(_m0.group(1))
- _speed = _m0.group(2)
- module.ports_conf_hash[_portnum] = _speed
-
-
-def generate_new_ports_conf_hash(module):
- new_ports_conf_hash = {}
- convert_hash = {
- 'speed_40g_div_4': '40G/4',
- 'speed_4_by_10g': '4x10G',
- 'speed_10g': '10G',
- 'speed_40g': '40G'
- }
- for k in module.params.keys():
- port_range = module.params[k]
- port_setting = convert_hash[k]
- if port_range:
- port_range = [x for x in port_range if x]
- for port_str in port_range:
- port_range_str = port_str.replace('swp', '').split('-')
- if len(port_range_str) == 1:
- new_ports_conf_hash[int(port_range_str[0])] = \
- port_setting
- else:
- int_range = map(int, port_range_str)
- portnum_range = range(int_range[0], int_range[1] + 1)
- for i in portnum_range:
- new_ports_conf_hash[i] = port_setting
- module.new_ports_hash = new_ports_conf_hash
-
-
-def compare_new_and_old_port_conf_hash(module):
- ports_conf_hash_copy = module.ports_conf_hash.copy()
- module.ports_conf_hash.update(module.new_ports_hash)
- port_num_length = len(module.ports_conf_hash.keys())
- orig_port_num_length = len(ports_conf_hash_copy.keys())
- if port_num_length != orig_port_num_length:
- module.fail_json(msg="Port numbering is wrong. \
-Too many or two few ports configured")
- return False
- elif ports_conf_hash_copy == module.ports_conf_hash:
- return False
- return True
-
-
-def make_copy_of_orig_ports_conf(module):
- if os.path.exists(PORTS_CONF + '.orig'):
- return
-
- try:
- shutil.copyfile(PORTS_CONF, PORTS_CONF + '.orig')
- except IOError as e:
- _msg = "Failed to save the original %s: %s" % (PORTS_CONF, to_native(e))
- module.fail_json(msg=_msg)
- return # for testing only
-
-
-def write_to_ports_conf(module):
- """
- use tempfile to first write out config in temp file
- then write to actual location. may help prevent file
- corruption. Ports.conf is a critical file for Cumulus.
- Don't want to corrupt this file under any circumstance.
- """
- temp = tempfile.NamedTemporaryFile()
- try:
- try:
- temp.write('# Managed By Ansible\n')
- for k in sorted(module.ports_conf_hash.keys()):
- port_setting = module.ports_conf_hash[k]
- _str = "%s=%s\n" % (k, port_setting)
- temp.write(_str)
- temp.seek(0)
- shutil.copyfile(temp.name, PORTS_CONF)
- except IOError as e:
- module.fail_json(msg="Failed to write to %s: %s" % (PORTS_CONF, to_native(e)))
- finally:
- temp.close()
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- speed_40g_div_4=dict(type='list'),
- speed_4_by_10g=dict(type='list'),
- speed_10g=dict(type='list'),
- speed_40g=dict(type='list')
- ),
- required_one_of=[['speed_40g_div_4',
- 'speed_4_by_10g',
- 'speed_10g',
- 'speed_40g']]
- )
-
- _changed = False
- hash_existing_ports_conf(module)
- generate_new_ports_conf_hash(module)
- if compare_new_and_old_port_conf_hash(module):
- make_copy_of_orig_ports_conf(module)
- write_to_ports_conf(module)
- _changed = True
- _msg = "/etc/cumulus/ports.conf changed"
- else:
- _msg = 'No change in /etc/ports.conf'
- module.exit_json(changed=_changed, msg=_msg)
+from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
- main()
+ removed_module()
diff --git a/lib/ansible/modules/network/nxos/_nxos_ip_interface.py b/lib/ansible/modules/network/nxos/_nxos_ip_interface.py
index 1d9d9f496a..18600b2fd6 100644
--- a/lib/ansible/modules/network/nxos/_nxos_ip_interface.py
+++ b/lib/ansible/modules/network/nxos/_nxos_ip_interface.py
@@ -24,7 +24,10 @@ DOCUMENTATION = '''
---
module: nxos_ip_interface
version_added: "2.1"
-deprecated: Deprecated in 2.5. Use M(nxos_l3_interface) instead.
+deprecated:
+ removed_in: "2.9"
+ why: Replaced with common C(*_l3_interface) network modules.
+ alternative: Use M(nxos_l3_interface) instead.
short_description: Manages L3 attributes for IPv4 and IPv6 interfaces.
description:
- Manages Layer 3 attributes for IPv4 and IPv6 interfaces.
diff --git a/lib/ansible/modules/network/nxos/_nxos_mtu.py b/lib/ansible/modules/network/nxos/_nxos_mtu.py
index f2a52127a6..6fe6707013 100644
--- a/lib/ansible/modules/network/nxos/_nxos_mtu.py
+++ b/lib/ansible/modules/network/nxos/_nxos_mtu.py
@@ -25,7 +25,10 @@ DOCUMENTATION = '''
module: nxos_mtu
extends_documentation_fragment: nxos
version_added: "2.2"
-deprecated: Deprecated in 2.3 use M(nxos_system)'s C(mtu) option.
+deprecated:
+ removed_in: "2.5"
+ why: Replaced with common C(*_system) network modules.
+ alternative: Use M(nxos_system)'s C(system_mtu) option. To specify an interfaces MTU use M(nxos_interface).
short_description: Manages MTU settings on Nexus switch.
description:
- Manages MTU settings on Nexus switch.
@@ -121,264 +124,8 @@ changed:
type: boolean
sample: true
'''
-from ansible.module_utils.network.nxos.nxos import load_config, run_commands
-from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
-from ansible.module_utils.basic import AnsibleModule
-
-
-def execute_show_command(command, module):
- if 'show run' not in command:
- output = 'json'
- else:
- output = 'text'
- cmds = [{
- 'command': command,
- 'output': output,
- }]
-
- body = run_commands(module, cmds)
- return body
-
-
-def flatten_list(command_lists):
- flat_command_list = []
- for command in command_lists:
- if isinstance(command, list):
- flat_command_list.extend(command)
- else:
- flat_command_list.append(command)
- return flat_command_list
-
-
-def get_mtu(interface, module):
- command = 'show interface {0}'.format(interface)
- mtu = {}
-
- body = execute_show_command(command, module)
-
- try:
- mtu_table = body[0]['TABLE_interface']['ROW_interface']
- mtu['mtu'] = str(
- mtu_table.get('eth_mtu',
- mtu_table.get('svi_mtu', 'unreadable_via_api')))
- mtu['sysmtu'] = get_system_mtu(module)['sysmtu']
- except KeyError:
- mtu = {}
-
- return mtu
-
-
-def get_system_mtu(module):
- command = 'show run all | inc jumbomtu'
- sysmtu = ''
-
- body = execute_show_command(command, module)
-
- if body:
- sysmtu = str(body[0].split(' ')[-1])
- try:
- sysmtu = int(sysmtu)
- except:
- sysmtu = ""
-
- return dict(sysmtu=str(sysmtu))
-
-
-def get_commands_config_mtu(delta, interface):
- CONFIG_ARGS = {
- 'mtu': 'mtu {mtu}',
- 'sysmtu': 'system jumbomtu {sysmtu}',
- }
-
- commands = []
- for param, value in delta.items():
- command = CONFIG_ARGS.get(param, 'DNE').format(**delta)
- if command and command != 'DNE':
- commands.append(command)
- command = None
- mtu_check = delta.get('mtu', None)
- if mtu_check:
- commands.insert(0, 'interface {0}'.format(interface))
- return commands
-
-
-def get_commands_remove_mtu(delta, interface):
- CONFIG_ARGS = {
- 'mtu': 'no mtu {mtu}',
- 'sysmtu': 'no system jumbomtu {sysmtu}',
- }
- commands = []
- for param, value in delta.items():
- command = CONFIG_ARGS.get(param, 'DNE').format(**delta)
- if command and command != 'DNE':
- commands.append(command)
- command = None
- mtu_check = delta.get('mtu', None)
- if mtu_check:
- commands.insert(0, 'interface {0}'.format(interface))
- return commands
-
-
-def get_interface_type(interface):
- if interface.upper().startswith('ET'):
- return 'ethernet'
- elif interface.upper().startswith('VL'):
- return 'svi'
- elif interface.upper().startswith('LO'):
- return 'loopback'
- elif interface.upper().startswith('MG'):
- return 'management'
- elif interface.upper().startswith('MA'):
- return 'management'
- elif interface.upper().startswith('PO'):
- return 'portchannel'
- else:
- return 'unknown'
-
-
-def is_default(interface, module):
- command = 'show run interface {0}'.format(interface)
-
- try:
- body = execute_show_command(command, module)[0]
- if body == 'DNE':
- return 'DNE'
- else:
- raw_list = body.split('\n')
- if raw_list[-1].startswith('interface'):
- return True
- else:
- return False
- except (KeyError):
- return 'DNE'
-
-
-def get_interface_mode(interface, intf_type, module):
- command = 'show interface {0}'.format(interface)
- mode = 'unknown'
- interface_table = {}
- body = execute_show_command(command, module)
-
- try:
- interface_table = body[0]['TABLE_interface']['ROW_interface']
- except (KeyError, AttributeError, IndexError):
- return mode
-
- if intf_type in ['ethernet', 'portchannel']:
- mode = str(interface_table.get('eth_mode', 'layer3'))
- if mode in ['access', 'trunk']:
- mode = 'layer2'
- elif mode == 'routed':
- mode = 'layer3'
- elif intf_type in ['loopback', 'svi']:
- mode = 'layer3'
- return mode
-
-
-def main():
- argument_spec = dict(
- mtu=dict(type='str'),
- interface=dict(type='str'),
- sysmtu=dict(type='str'),
- state=dict(choices=['absent', 'present'], default='present'),
- )
-
- argument_spec.update(nxos_argument_spec)
-
- module = AnsibleModule(argument_spec=argument_spec,
- required_together=[['mtu', 'interface']],
- supports_check_mode=True)
-
- warnings = list()
- check_args(module, warnings)
-
- interface = module.params['interface']
- mtu = module.params['mtu']
- sysmtu = module.params['sysmtu']
- state = module.params['state']
-
- if sysmtu and (interface or mtu):
- module.fail_json(msg='Proper usage-- either just use the sysmtu param '
- 'or use interface AND mtu params')
-
- if interface:
- intf_type = get_interface_type(interface)
- if intf_type != 'ethernet':
- if is_default(interface, module) == 'DNE':
- module.fail_json(msg='Invalid interface. It does not exist '
- 'on the switch.')
-
- existing = get_mtu(interface, module)
- else:
- existing = get_system_mtu(module)
-
- if interface and mtu:
- if intf_type == 'loopback':
- module.fail_json(msg='Cannot set MTU for loopback interface.')
- mode = get_interface_mode(interface, intf_type, module)
- if mode == 'layer2':
- if intf_type in ['ethernet', 'portchannel']:
- if mtu not in [existing['sysmtu'], '1500']:
- module.fail_json(msg='MTU on L2 interfaces can only be set'
- ' to the system default (1500) or '
- 'existing sysmtu value which is '
- ' {0}'.format(existing['sysmtu']))
- elif mode == 'layer3':
- if intf_type in ['ethernet', 'portchannel', 'svi']:
- if ((int(mtu) < 576 or int(mtu) > 9216) or
- ((int(mtu) % 2) != 0)):
- module.fail_json(msg='Invalid MTU for Layer 3 interface'
- 'needs to be an even number between'
- '576 and 9216')
- if sysmtu:
- if ((int(sysmtu) < 576 or int(sysmtu) > 9216 or
- ((int(sysmtu) % 2) != 0))):
- module.fail_json(msg='Invalid MTU- needs to be an even '
- 'number between 576 and 9216')
-
- args = dict(mtu=mtu, sysmtu=sysmtu)
- proposed = dict((k, v) for k, v in args.items() if v is not None)
- delta = dict(set(proposed.items()).difference(existing.items()))
-
- changed = False
- end_state = existing
- commands = []
-
- if state == 'present':
- if delta:
- command = get_commands_config_mtu(delta, interface)
- commands.append(command)
-
- elif state == 'absent':
- common = set(proposed.items()).intersection(existing.items())
- if common:
- command = get_commands_remove_mtu(dict(common), interface)
- commands.append(command)
-
- cmds = flatten_list(commands)
- if cmds:
- if module.check_mode:
- module.exit_json(changed=True, commands=cmds)
- else:
- changed = True
- load_config(module, cmds)
- if interface:
- end_state = get_mtu(interface, module)
- else:
- end_state = get_system_mtu(module)
- if 'configure' in cmds:
- cmds.pop(0)
-
- results = {}
- results['proposed'] = proposed
- results['existing'] = existing
- results['end_state'] = end_state
- results['updates'] = cmds
- results['changed'] = changed
- results['warnings'] = warnings
-
- module.exit_json(**results)
+from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
- main()
+ removed_module()
diff --git a/lib/ansible/modules/network/nxos/_nxos_portchannel.py b/lib/ansible/modules/network/nxos/_nxos_portchannel.py
index 1d113f382c..575dfe8de1 100644
--- a/lib/ansible/modules/network/nxos/_nxos_portchannel.py
+++ b/lib/ansible/modules/network/nxos/_nxos_portchannel.py
@@ -25,7 +25,10 @@ DOCUMENTATION = '''
module: nxos_portchannel
extends_documentation_fragment: nxos
version_added: "2.2"
-deprecated: Deprecated in 2.5. Use M(nxos_linkagg) instead.
+deprecated:
+ removed_in: "2.9"
+ why: Replaced with common C(*_linkagg) network modules.
+ alternative: Use M(nxos_linkagg) instead.
short_description: Manages port-channel interfaces.
description:
- Manages port-channel specific configuration parameters.
diff --git a/lib/ansible/modules/network/nxos/_nxos_switchport.py b/lib/ansible/modules/network/nxos/_nxos_switchport.py
index d736d97b87..3a726b4964 100644
--- a/lib/ansible/modules/network/nxos/_nxos_switchport.py
+++ b/lib/ansible/modules/network/nxos/_nxos_switchport.py
@@ -25,7 +25,10 @@ DOCUMENTATION = '''
module: nxos_switchport
extends_documentation_fragment: nxos
version_added: "2.1"
-deprecated: Use M(nxos_l2_interface) instead.
+deprecated:
+ removed_in: "2.9"
+ why: Replaced with generic version.
+ alternative: Use M(nxos_l2_interface) instead.
short_description: Manages Layer 2 switchport interfaces.
description:
- Manages Layer 2 interfaces
diff --git a/lib/ansible/modules/network/panos/panos_nat_policy.py b/lib/ansible/modules/network/panos/_panos_nat_policy.py
index 67cdefbf0b..a476af0b96 100644
--- a/lib/ansible/modules/network/panos/panos_nat_policy.py
+++ b/lib/ansible/modules/network/panos/_panos_nat_policy.py
@@ -30,7 +30,10 @@ author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
-deprecated: In 2.4 use M(panos_nat_rule) instead.
+deprecated:
+ removed_in: "2.8"
+ why: M(panos_nat_rule) uses next generation SDK (PanDevice).
+ alternative: Use M(panos_nat_rule) instead.
options:
ip_address:
description:
@@ -143,7 +146,7 @@ RETURN = '''
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
+ 'status': ['deprecated'],
'supported_by': 'community'}
diff --git a/lib/ansible/modules/network/panos/panos_security_policy.py b/lib/ansible/modules/network/panos/_panos_security_policy.py
index 3d35964e13..34fb151cbe 100644
--- a/lib/ansible/modules/network/panos/panos_security_policy.py
+++ b/lib/ansible/modules/network/panos/_panos_security_policy.py
@@ -20,7 +20,7 @@
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
+ 'status': ['deprecated'],
'supported_by': 'community'}
@@ -35,7 +35,10 @@ description:
traffic is applied, the more specific rules must precede the more general ones.
author: "Ivan Bojer (@ivanbojer)"
version_added: "2.3"
-deprecated: In 2.4 use M(panos_security_rule) instead.
+deprecated:
+ removed_in: "2.8"
+ why: Renamed to M(panos_security_rule) in order to align with API calls and UI object references, which also has extra support for PanDevice SDK.
+ alternative: Use M(panos_security_rule) instead.
requirements:
- pan-python can be obtained from PyPi U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPi U(https://pypi.python.org/pypi/pandevice)
diff --git a/lib/ansible/modules/utilities/helper/_accelerate.py b/lib/ansible/modules/utilities/helper/_accelerate.py
index e14535810c..e0fbe8c068 100644
--- a/lib/ansible/modules/utilities/helper/_accelerate.py
+++ b/lib/ansible/modules/utilities/helper/_accelerate.py
@@ -16,11 +16,14 @@ ANSIBLE_METADATA = {'metadata_version': '1.1',
DOCUMENTATION = '''
---
module: accelerate
-removed: True
short_description: Enable accelerated mode on remote node
-deprecated: "Use SSH with ControlPersist instead."
+deprecated:
+ removed_in: "2.4"
+ why: Replaced by ControlPersist
+ alternative: Use SSH with ControlPersist instead.
+ removed: True
description:
- - This module has been removed, this file is kept for historicaly documentation purposes
+ - This module has been removed, this file is kept for historical documentation purposes.
- This modules launches an ephemeral I(accelerate) daemon on the remote node which
Ansible can use to communicate with nodes at high speed.
- The daemon listens on a configurable port for a configurable amount of time.
@@ -77,3 +80,8 @@ EXAMPLES = '''
tasks:
- command: /usr/bin/anything
'''
+
+from ansible.module_utils.common.removed import removed_module
+
+if __name__ == '__main__':
+ removed_module()
diff --git a/lib/ansible/modules/utilities/logic/_include.py b/lib/ansible/modules/utilities/logic/_include.py
index 9afc17ad98..025b792184 100644
--- a/lib/ansible/modules/utilities/logic/_include.py
+++ b/lib/ansible/modules/utilities/logic/_include.py
@@ -20,8 +20,9 @@ author: Ansible Core Team (@ansible)
module: include
short_description: Include a play or task list
deprecated:
- The include action was too confusing, dealing with both plays and tasks, being both dynamic and static. This module
- will be removed in version 2.8. As alternatives use M(include_tasks), M(import_playbook), M(import_tasks).
+ removed_in: "2.8"
+ why: The include action was too confusing, dealing with both plays and tasks, being both dynamic and static. This module will be removed in version 2.8.
+ alternative: Use M(include_tasks), M(import_playbook), M(import_tasks).
description:
- Includes a file with a list of plays or tasks to be executed in the current playbook.
- Files with a list of plays can only be included at the top level. Lists of tasks can only be included where tasks
diff --git a/lib/ansible/modules/windows/_win_msi.py b/lib/ansible/modules/windows/_win_msi.py
index aeed7556a0..d1cb689826 100644
--- a/lib/ansible/modules/windows/_win_msi.py
+++ b/lib/ansible/modules/windows/_win_msi.py
@@ -29,7 +29,10 @@ DOCUMENTATION = r'''
---
module: win_msi
version_added: '1.7'
-deprecated: In 2.4 and will be removed in 2.8, use M(win_package) instead.
+deprecated:
+ removed_in: "2.8"
+ why: The win_msi module has a number of issues, the M(win_package) module is easier to maintain and use.
+ alternative: Use M(win_package) instead.
short_description: Installs and uninstalls Windows MSI files
description:
- Installs or uninstalls a Windows MSI file that is already located on the