summaryrefslogtreecommitdiff
path: root/lib/ansible/plugins
diff options
context:
space:
mode:
authorAnsible Core Team <info@ansible.com>2020-03-09 09:40:31 +0000
committerAnsible Core Team <info@ansible.com>2020-03-09 09:40:31 +0000
commit42b02d1be2d0ede8e1d05c54bb415b03b162ce41 (patch)
tree99aac704166cb859c6e366a9521b644fb5b6247c /lib/ansible/plugins
parentab5942a760c399a1b3a47e6afaf38cac44522be9 (diff)
downloadansible-42b02d1be2d0ede8e1d05c54bb415b03b162ce41.tar.gz
Migrated to ansible.amazon
Diffstat (limited to 'lib/ansible/plugins')
-rw-r--r--lib/ansible/plugins/action/aws_s3.py71
-rw-r--r--lib/ansible/plugins/callback/aws_resource_actions.py72
-rw-r--r--lib/ansible/plugins/doc_fragments/aws.py75
-rw-r--r--lib/ansible/plugins/doc_fragments/aws_credentials.py42
-rw-r--r--lib/ansible/plugins/doc_fragments/aws_region.py18
-rw-r--r--lib/ansible/plugins/doc_fragments/ec2.py18
-rw-r--r--lib/ansible/plugins/inventory/aws_ec2.py659
-rw-r--r--lib/ansible/plugins/inventory/aws_rds.py326
-rw-r--r--lib/ansible/plugins/lookup/aws_account_attribute.py131
-rw-r--r--lib/ansible/plugins/lookup/aws_secret.py140
-rw-r--r--lib/ansible/plugins/lookup/aws_service_ip_ranges.py79
-rw-r--r--lib/ansible/plugins/lookup/aws_ssm.py233
12 files changed, 0 insertions, 1864 deletions
diff --git a/lib/ansible/plugins/action/aws_s3.py b/lib/ansible/plugins/action/aws_s3.py
deleted file mode 100644
index a454922a10..0000000000
--- a/lib/ansible/plugins/action/aws_s3.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# (c) 2018, Will Thames <will@thames.id.au>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import os
-
-from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleFileNotFound
-from ansible.module_utils._text import to_text
-from ansible.plugins.action import ActionBase
-from ansible.utils.vars import merge_hash
-
-
-class ActionModule(ActionBase):
-
- TRANSFERS_FILES = True
-
- def run(self, tmp=None, task_vars=None):
- ''' handler for aws_s3 operations '''
- self._supports_async = True
-
- if task_vars is None:
- task_vars = dict()
-
- result = super(ActionModule, self).run(tmp, task_vars)
- del tmp # tmp no longer has any effect
-
- source = self._task.args.get('src', None)
-
- try:
- new_module_args = self._task.args.copy()
- if source:
- source = os.path.expanduser(source)
-
- # For backward compatibility check if the file exists on the remote; it should take precedence
- if not self._remote_file_exists(source):
- try:
- source = self._loader.get_real_file(self._find_needle('files', source), decrypt=False)
- new_module_args['src'] = source
- except AnsibleFileNotFound as e:
- # module handles error message for nonexistent files
- new_module_args['src'] = source
- except AnsibleError as e:
- raise AnsibleActionFail(to_text(e))
-
- wrap_async = self._task.async_val and not self._connection.has_native_async
- # execute the aws_s3 module with the updated args
- result = merge_hash(result, self._execute_module(module_args=new_module_args, task_vars=task_vars, wrap_async=wrap_async))
-
- if not wrap_async:
- # remove a temporary path we created
- self._remove_tmp_path(self._connection._shell.tmpdir)
-
- except AnsibleAction as e:
- result.update(e.result)
- return result
diff --git a/lib/ansible/plugins/callback/aws_resource_actions.py b/lib/ansible/plugins/callback/aws_resource_actions.py
deleted file mode 100644
index f871fe5479..0000000000
--- a/lib/ansible/plugins/callback/aws_resource_actions.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# (C) 2018 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- callback: aws_resource_actions
- type: aggregate
- short_description: summarizes all "resource:actions" completed
- version_added: "2.8"
- description:
- - Ansible callback plugin for collecting the AWS actions completed by all boto3 modules using
- AnsibleAWSModule in a playbook. Botocore endpoint logs need to be enabled for those modules, which can
- be done easily by setting debug_botocore_endpoint_logs to True for group/aws using module_defaults.
- requirements:
- - whitelisting in configuration - see examples section below for details.
-'''
-
-EXAMPLES = '''
-example: >
- To enable, add this to your ansible.cfg file in the defaults block
- [defaults]
- callback_whitelist = aws_resource_actions
-sample output: >
-#
-# AWS ACTIONS: ['s3:PutBucketAcl', 's3:HeadObject', 's3:DeleteObject', 's3:PutObjectAcl', 's3:CreateMultipartUpload',
-# 's3:DeleteBucket', 's3:GetObject', 's3:DeleteObjects', 's3:CreateBucket', 's3:CompleteMultipartUpload',
-# 's3:ListObjectsV2', 's3:HeadBucket', 's3:UploadPart', 's3:PutObject']
-#
-sample output: >
-#
-# AWS ACTIONS: ['ec2:DescribeVpcAttribute', 'ec2:DescribeVpcClassicLink', 'ec2:ModifyVpcAttribute', 'ec2:CreateTags',
-# 'sts:GetCallerIdentity', 'ec2:DescribeSecurityGroups', 'ec2:DescribeTags', 'ec2:DescribeVpcs', 'ec2:CreateVpc']
-#
-'''
-
-from ansible.plugins.callback import CallbackBase
-from ansible.module_utils._text import to_native
-
-
-class CallbackModule(CallbackBase):
- CALLBACK_VERSION = 2.8
- CALLBACK_TYPE = 'aggregate'
- CALLBACK_NAME = 'aws_resource_actions'
- CALLBACK_NEEDS_WHITELIST = True
-
- def __init__(self):
- self.aws_resource_actions = []
- super(CallbackModule, self).__init__()
-
- def extend_aws_resource_actions(self, result):
- if result.get('resource_actions'):
- self.aws_resource_actions.extend(result['resource_actions'])
-
- def runner_on_ok(self, host, res):
- self.extend_aws_resource_actions(res)
-
- def runner_on_failed(self, host, res, ignore_errors=False):
- self.extend_aws_resource_actions(res)
-
- def v2_runner_item_on_ok(self, result):
- self.extend_aws_resource_actions(result._result)
-
- def v2_runner_item_on_failed(self, result):
- self.extend_aws_resource_actions(result._result)
-
- def playbook_on_stats(self, stats):
- if self.aws_resource_actions:
- self.aws_resource_actions = sorted(list(to_native(action) for action in set(self.aws_resource_actions)))
- self._display.display("AWS ACTIONS: {0}".format(self.aws_resource_actions))
diff --git a/lib/ansible/plugins/doc_fragments/aws.py b/lib/ansible/plugins/doc_fragments/aws.py
deleted file mode 100644
index 668955196f..0000000000
--- a/lib/ansible/plugins/doc_fragments/aws.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2014, Will Thames <will@thames.id.au>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-class ModuleDocFragment(object):
-
- # AWS only documentation fragment
- DOCUMENTATION = r'''
-options:
- debug_botocore_endpoint_logs:
- description:
- - Use a botocore.endpoint logger to parse the unique (rather than total) "resource:action" API calls made during a task, outputing
- the set to the resource_actions key in the task results. Use the aws_resource_action callback to output to total list made during
- a playbook. The ANSIBLE_DEBUG_BOTOCORE_LOGS environment variable may also be used.
- type: bool
- default: 'no'
- version_added: "2.8"
- ec2_url:
- description:
- - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints).
- Ignored for modules where region is required. Must be specified for all other modules if region is not used.
- If not set then the value of the EC2_URL environment variable, if any, is used.
- type: str
- aws_secret_key:
- description:
- - AWS secret key. If not set then the value of the AWS_SECRET_ACCESS_KEY, AWS_SECRET_KEY, or EC2_SECRET_KEY environment variable is used.
- type: str
- aliases: [ ec2_secret_key, secret_key ]
- aws_access_key:
- description:
- - AWS access key. If not set then the value of the AWS_ACCESS_KEY_ID, AWS_ACCESS_KEY or EC2_ACCESS_KEY environment variable is used.
- type: str
- aliases: [ ec2_access_key, access_key ]
- security_token:
- description:
- - AWS STS security token. If not set then the value of the AWS_SECURITY_TOKEN or EC2_SECURITY_TOKEN environment variable is used.
- type: str
- aliases: [ access_token ]
- version_added: "1.6"
- validate_certs:
- description:
- - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
- type: bool
- default: yes
- version_added: "1.5"
- profile:
- description:
- - Uses a boto profile. Only works with boto >= 2.24.0.
- type: str
- version_added: "1.6"
- aws_config:
- description:
- - A dictionary to modify the botocore configuration.
- - Parameters can be found at U(https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html#botocore.config.Config).
- - Only the 'user_agent' key is used for boto modules. See U(http://boto.cloudhackers.com/en/latest/boto_config_tut.html#boto) for more boto configuration.
- type: dict
- version_added: "2.10"
-requirements:
- - python >= 2.6
- - boto
-notes:
- - If parameters are not set within the module, the following
- environment variables can be used in decreasing order of precedence
- C(AWS_URL) or C(EC2_URL),
- C(AWS_ACCESS_KEY_ID) or C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY),
- C(AWS_SECRET_ACCESS_KEY) or C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY),
- C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN),
- C(AWS_REGION) or C(EC2_REGION)
- - Ansible uses the boto configuration file (typically ~/.boto) if no
- credentials are provided. See https://boto.readthedocs.io/en/latest/boto_config_tut.html
- - C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the
- AWS region, when required, but this can also be configured in the boto config file
-'''
diff --git a/lib/ansible/plugins/doc_fragments/aws_credentials.py b/lib/ansible/plugins/doc_fragments/aws_credentials.py
deleted file mode 100644
index ef37ca1932..0000000000
--- a/lib/ansible/plugins/doc_fragments/aws_credentials.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-class ModuleDocFragment(object):
-
- # Plugin options for AWS credentials
- DOCUMENTATION = r'''
-options:
- aws_profile:
- description: The AWS profile
- type: str
- aliases: [ boto_profile ]
- env:
- - name: AWS_DEFAULT_PROFILE
- - name: AWS_PROFILE
- aws_access_key:
- description: The AWS access key to use.
- type: str
- aliases: [ aws_access_key_id ]
- env:
- - name: EC2_ACCESS_KEY
- - name: AWS_ACCESS_KEY
- - name: AWS_ACCESS_KEY_ID
- aws_secret_key:
- description: The AWS secret key that corresponds to the access key.
- type: str
- aliases: [ aws_secret_access_key ]
- env:
- - name: EC2_SECRET_KEY
- - name: AWS_SECRET_KEY
- - name: AWS_SECRET_ACCESS_KEY
- aws_security_token:
- description: The AWS security token if using temporary access and secret keys.
- type: str
- env:
- - name: EC2_SECURITY_TOKEN
- - name: AWS_SESSION_TOKEN
- - name: AWS_SECURITY_TOKEN
-'''
diff --git a/lib/ansible/plugins/doc_fragments/aws_region.py b/lib/ansible/plugins/doc_fragments/aws_region.py
deleted file mode 100644
index e214d78a2e..0000000000
--- a/lib/ansible/plugins/doc_fragments/aws_region.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-class ModuleDocFragment(object):
-
- # Plugin option for AWS region
- DOCUMENTATION = r'''
-options:
- region:
- description: The region for which to create the connection.
- type: str
- env:
- - name: EC2_REGION
- - name: AWS_REGION
-'''
diff --git a/lib/ansible/plugins/doc_fragments/ec2.py b/lib/ansible/plugins/doc_fragments/ec2.py
deleted file mode 100644
index 0ec0cf7a8d..0000000000
--- a/lib/ansible/plugins/doc_fragments/ec2.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2015, Ansible, Inc
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-class ModuleDocFragment(object):
-
- # EC2 only documentation fragment
- DOCUMENTATION = r'''
-options:
- region:
- description:
- - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
- See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
- type: str
- aliases: [ aws_region, ec2_region ]
-'''
diff --git a/lib/ansible/plugins/inventory/aws_ec2.py b/lib/ansible/plugins/inventory/aws_ec2.py
deleted file mode 100644
index 5f75795616..0000000000
--- a/lib/ansible/plugins/inventory/aws_ec2.py
+++ /dev/null
@@ -1,659 +0,0 @@
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: aws_ec2
- plugin_type: inventory
- short_description: EC2 inventory source
- requirements:
- - boto3
- - botocore
- extends_documentation_fragment:
- - inventory_cache
- - constructed
- - aws_credentials
- description:
- - Get inventory hosts from Amazon Web Services EC2.
- - Uses a YAML configuration file that ends with C(aws_ec2.(yml|yaml)).
- notes:
- - If no credentials are provided and the control node has an associated IAM instance profile then the
- role will be used for authentication.
- author:
- - Sloane Hertel (@s-hertel)
- options:
- plugin:
- description: Token that ensures this is a source file for the plugin.
- required: True
- choices: ['aws_ec2']
- iam_role_arn:
- description: The ARN of the IAM role to assume to perform the inventory lookup. You should still provide AWS
- credentials with enough privilege to perform the AssumeRole action.
- version_added: '2.9'
- regions:
- description:
- - A list of regions in which to describe EC2 instances.
- - If empty (the default) default this will include all regions, except possibly restricted ones like us-gov-west-1 and cn-north-1.
- type: list
- default: []
- hostnames:
- description:
- - A list in order of precedence for hostname variables.
- - You can use the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
- - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag.
- type: list
- default: []
- filters:
- description:
- - A dictionary of filter value pairs.
- - Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
- type: dict
- default: {}
- include_extra_api_calls:
- description:
- - Add two additional API calls for every instance to include 'persistent' and 'events' host variables.
- - Spot instances may be persistent and instances may have associated events.
- type: bool
- default: False
- version_added: '2.8'
- strict_permissions:
- description:
- - By default if a 403 (Forbidden) error code is encountered this plugin will fail.
- - You can set this option to False in the inventory config file which will allow 403 errors to be gracefully skipped.
- type: bool
- default: True
- use_contrib_script_compatible_sanitization:
- description:
- - By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible.
- This option allows you to override that, in efforts to allow migration from the old inventory script and
- matches the sanitization of groups when the script's ``replace_dash_in_groups`` option is set to ``False``.
- To replicate behavior of ``replace_dash_in_groups = True`` with constructed groups,
- you will need to replace hyphens with underscores via the regex_replace filter for those entries.
- - For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting,
- otherwise the core engine will just use the standard sanitization on top.
- - This is not the default as such names break certain functionality as not all characters are valid Python identifiers
- which group names end up being used as.
- type: bool
- default: False
- version_added: '2.8'
-'''
-
-EXAMPLES = '''
-# Minimal example using environment vars or instance role credentials
-# Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address
-plugin: aws_ec2
-regions:
- - us-east-1
-
-# Example using filters, ignoring permission errors, and specifying the hostname precedence
-plugin: aws_ec2
-boto_profile: aws_profile
-# Populate inventory with instances in these regions
-regions:
- - us-east-1
- - us-east-2
-filters:
- # All instances with their `Environment` tag set to `dev`
- tag:Environment: dev
- # All dev and QA hosts
- tag:Environment:
- - dev
- - qa
- instance.group-id: sg-xxxxxxxx
-# Ignores 403 errors rather than failing
-strict_permissions: False
-# Note: I(hostnames) sets the inventory_hostname. To modify ansible_host without modifying
-# inventory_hostname use compose (see example below).
-hostnames:
- - tag:Name=Tag1,Name=Tag2 # Return specific hosts only
- - tag:CustomDNSName
- - dns-name
- - private-ip-address
-
-# Example using constructed features to create groups and set ansible_host
-plugin: aws_ec2
-regions:
- - us-east-1
- - us-west-1
-# keyed_groups may be used to create custom groups
-strict: False
-keyed_groups:
- # Add e.g. x86_64 hosts to an arch_x86_64 group
- - prefix: arch
- key: 'architecture'
- # Add hosts to tag_Name_Value groups for each Name/Value tag pair
- - prefix: tag
- key: tags
- # Add hosts to e.g. instance_type_z3_tiny
- - prefix: instance_type
- key: instance_type
- # Create security_groups_sg_abcd1234 group for each SG
- - key: 'security_groups|json_query("[].group_id")'
- prefix: 'security_groups'
- # Create a group for each value of the Application tag
- - key: tags.Application
- separator: ''
- # Create a group per region e.g. aws_region_us_east_2
- - key: placement.region
- prefix: aws_region
- # Create a group (or groups) based on the value of a custom tag "Role" and add them to a metagroup called "project"
- - key: tags['Role']
- prefix: foo
- parent_group: "project"
-# Set individual variables with compose
-compose:
- # Use the private IP address to connect to the host
- # (note: this does not modify inventory_hostname, which is set via I(hostnames))
- ansible_host: private_ip_address
-'''
-
-import re
-
-from ansible.errors import AnsibleError
-from ansible.module_utils._text import to_native, to_text
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
-from ansible.utils.display import Display
-
-try:
- import boto3
- import botocore
-except ImportError:
- raise AnsibleError('The ec2 dynamic inventory plugin requires boto3 and botocore.')
-
-display = Display()
-
-# The mappings give an array of keys to get from the filter name to the value
-# returned by boto3's EC2 describe_instances method.
-
-instance_meta_filter_to_boto_attr = {
- 'group-id': ('Groups', 'GroupId'),
- 'group-name': ('Groups', 'GroupName'),
- 'network-interface.attachment.instance-owner-id': ('OwnerId',),
- 'owner-id': ('OwnerId',),
- 'requester-id': ('RequesterId',),
- 'reservation-id': ('ReservationId',),
-}
-
-instance_data_filter_to_boto_attr = {
- 'affinity': ('Placement', 'Affinity'),
- 'architecture': ('Architecture',),
- 'availability-zone': ('Placement', 'AvailabilityZone'),
- 'block-device-mapping.attach-time': ('BlockDeviceMappings', 'Ebs', 'AttachTime'),
- 'block-device-mapping.delete-on-termination': ('BlockDeviceMappings', 'Ebs', 'DeleteOnTermination'),
- 'block-device-mapping.device-name': ('BlockDeviceMappings', 'DeviceName'),
- 'block-device-mapping.status': ('BlockDeviceMappings', 'Ebs', 'Status'),
- 'block-device-mapping.volume-id': ('BlockDeviceMappings', 'Ebs', 'VolumeId'),
- 'client-token': ('ClientToken',),
- 'dns-name': ('PublicDnsName',),
- 'host-id': ('Placement', 'HostId'),
- 'hypervisor': ('Hypervisor',),
- 'iam-instance-profile.arn': ('IamInstanceProfile', 'Arn'),
- 'image-id': ('ImageId',),
- 'instance-id': ('InstanceId',),
- 'instance-lifecycle': ('InstanceLifecycle',),
- 'instance-state-code': ('State', 'Code'),
- 'instance-state-name': ('State', 'Name'),
- 'instance-type': ('InstanceType',),
- 'instance.group-id': ('SecurityGroups', 'GroupId'),
- 'instance.group-name': ('SecurityGroups', 'GroupName'),
- 'ip-address': ('PublicIpAddress',),
- 'kernel-id': ('KernelId',),
- 'key-name': ('KeyName',),
- 'launch-index': ('AmiLaunchIndex',),
- 'launch-time': ('LaunchTime',),
- 'monitoring-state': ('Monitoring', 'State'),
- 'network-interface.addresses.private-ip-address': ('NetworkInterfaces', 'PrivateIpAddress'),
- 'network-interface.addresses.primary': ('NetworkInterfaces', 'PrivateIpAddresses', 'Primary'),
- 'network-interface.addresses.association.public-ip': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'PublicIp'),
- 'network-interface.addresses.association.ip-owner-id': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'IpOwnerId'),
- 'network-interface.association.public-ip': ('NetworkInterfaces', 'Association', 'PublicIp'),
- 'network-interface.association.ip-owner-id': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
- 'network-interface.association.allocation-id': ('ElasticGpuAssociations', 'ElasticGpuId'),
- 'network-interface.association.association-id': ('ElasticGpuAssociations', 'ElasticGpuAssociationId'),
- 'network-interface.attachment.attachment-id': ('NetworkInterfaces', 'Attachment', 'AttachmentId'),
- 'network-interface.attachment.instance-id': ('InstanceId',),
- 'network-interface.attachment.device-index': ('NetworkInterfaces', 'Attachment', 'DeviceIndex'),
- 'network-interface.attachment.status': ('NetworkInterfaces', 'Attachment', 'Status'),
- 'network-interface.attachment.attach-time': ('NetworkInterfaces', 'Attachment', 'AttachTime'),
- 'network-interface.attachment.delete-on-termination': ('NetworkInterfaces', 'Attachment', 'DeleteOnTermination'),
- 'network-interface.availability-zone': ('Placement', 'AvailabilityZone'),
- 'network-interface.description': ('NetworkInterfaces', 'Description'),
- 'network-interface.group-id': ('NetworkInterfaces', 'Groups', 'GroupId'),
- 'network-interface.group-name': ('NetworkInterfaces', 'Groups', 'GroupName'),
- 'network-interface.ipv6-addresses.ipv6-address': ('NetworkInterfaces', 'Ipv6Addresses', 'Ipv6Address'),
- 'network-interface.mac-address': ('NetworkInterfaces', 'MacAddress'),
- 'network-interface.network-interface-id': ('NetworkInterfaces', 'NetworkInterfaceId'),
- 'network-interface.owner-id': ('NetworkInterfaces', 'OwnerId'),
- 'network-interface.private-dns-name': ('NetworkInterfaces', 'PrivateDnsName'),
- # 'network-interface.requester-id': (),
- 'network-interface.requester-managed': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
- 'network-interface.status': ('NetworkInterfaces', 'Status'),
- 'network-interface.source-dest-check': ('NetworkInterfaces', 'SourceDestCheck'),
- 'network-interface.subnet-id': ('NetworkInterfaces', 'SubnetId'),
- 'network-interface.vpc-id': ('NetworkInterfaces', 'VpcId'),
- 'placement-group-name': ('Placement', 'GroupName'),
- 'platform': ('Platform',),
- 'private-dns-name': ('PrivateDnsName',),
- 'private-ip-address': ('PrivateIpAddress',),
- 'product-code': ('ProductCodes', 'ProductCodeId'),
- 'product-code.type': ('ProductCodes', 'ProductCodeType'),
- 'ramdisk-id': ('RamdiskId',),
- 'reason': ('StateTransitionReason',),
- 'root-device-name': ('RootDeviceName',),
- 'root-device-type': ('RootDeviceType',),
- 'source-dest-check': ('SourceDestCheck',),
- 'spot-instance-request-id': ('SpotInstanceRequestId',),
- 'state-reason-code': ('StateReason', 'Code'),
- 'state-reason-message': ('StateReason', 'Message'),
- 'subnet-id': ('SubnetId',),
- 'tag': ('Tags',),
- 'tag-key': ('Tags',),
- 'tag-value': ('Tags',),
- 'tenancy': ('Placement', 'Tenancy'),
- 'virtualization-type': ('VirtualizationType',),
- 'vpc-id': ('VpcId',),
-}
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
-
- NAME = 'aws_ec2'
-
- def __init__(self):
- super(InventoryModule, self).__init__()
-
- self.group_prefix = 'aws_ec2_'
-
- # credentials
- self.boto_profile = None
- self.aws_secret_access_key = None
- self.aws_access_key_id = None
- self.aws_security_token = None
- self.iam_role_arn = None
-
- def _compile_values(self, obj, attr):
- '''
- :param obj: A list or dict of instance attributes
- :param attr: A key
- :return The value(s) found via the attr
- '''
- if obj is None:
- return
-
- temp_obj = []
-
- if isinstance(obj, list) or isinstance(obj, tuple):
- for each in obj:
- value = self._compile_values(each, attr)
- if value:
- temp_obj.append(value)
- else:
- temp_obj = obj.get(attr)
-
- has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)])
- if has_indexes and len(temp_obj) == 1:
- return temp_obj[0]
-
- return temp_obj
-
- def _get_boto_attr_chain(self, filter_name, instance):
- '''
- :param filter_name: The filter
- :param instance: instance dict returned by boto3 ec2 describe_instances()
- '''
- allowed_filters = sorted(list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys()))
- if filter_name not in allowed_filters:
- raise AnsibleError("Invalid filter '%s' provided; filter must be one of %s." % (filter_name,
- allowed_filters))
- if filter_name in instance_data_filter_to_boto_attr:
- boto_attr_list = instance_data_filter_to_boto_attr[filter_name]
- else:
- boto_attr_list = instance_meta_filter_to_boto_attr[filter_name]
-
- instance_value = instance
- for attribute in boto_attr_list:
- instance_value = self._compile_values(instance_value, attribute)
- return instance_value
-
- def _get_credentials(self):
- '''
- :return A dictionary of boto client credentials
- '''
- boto_params = {}
- for credential in (('aws_access_key_id', self.aws_access_key_id),
- ('aws_secret_access_key', self.aws_secret_access_key),
- ('aws_session_token', self.aws_security_token)):
- if credential[1]:
- boto_params[credential[0]] = credential[1]
-
- return boto_params
-
- def _get_connection(self, credentials, region='us-east-1'):
- try:
- connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **credentials)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- if self.boto_profile:
- try:
- connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- else:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- return connection
-
- def _boto3_assume_role(self, credentials, region):
- """
- Assume an IAM role passed by iam_role_arn parameter
-
- :return: a dict containing the credentials of the assumed role
- """
-
- iam_role_arn = self.iam_role_arn
-
- try:
- sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials)
- sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_ec2_dynamic_inventory')
- return dict(
- aws_access_key_id=sts_session['Credentials']['AccessKeyId'],
- aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'],
- aws_session_token=sts_session['Credentials']['SessionToken']
- )
- except botocore.exceptions.ClientError as e:
- raise AnsibleError("Unable to assume IAM role: %s" % to_native(e))
-
- def _boto3_conn(self, regions):
- '''
- :param regions: A list of regions to create a boto3 client
-
- Generator that yields a boto3 client and the region
- '''
-
- credentials = self._get_credentials()
- iam_role_arn = self.iam_role_arn
-
- if not regions:
- try:
- # as per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-regions-avail-zones.html
- client = self._get_connection(credentials)
- resp = client.describe_regions()
- regions = [x['RegionName'] for x in resp.get('Regions', [])]
- except botocore.exceptions.NoRegionError:
- # above seems to fail depending on boto3 version, ignore and lets try something else
- pass
-
- # fallback to local list hardcoded in boto3 if still no regions
- if not regions:
- session = boto3.Session()
- regions = session.get_available_regions('ec2')
-
- # I give up, now you MUST give me regions
- if not regions:
- raise AnsibleError('Unable to get regions list from available methods, you must specify the "regions" option to continue.')
-
- for region in regions:
- connection = self._get_connection(credentials, region)
- try:
- if iam_role_arn is not None:
- assumed_credentials = self._boto3_assume_role(credentials, region)
- else:
- assumed_credentials = credentials
- connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **assumed_credentials)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- if self.boto_profile:
- try:
- connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- else:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- yield connection, region
-
- def _get_instances_by_region(self, regions, filters, strict_permissions):
- '''
- :param regions: a list of regions in which to describe instances
- :param filters: a list of boto3 filter dictionaries
- :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
- :return A list of instance dictionaries
- '''
- all_instances = []
-
- for connection, region in self._boto3_conn(regions):
- try:
- # By default find non-terminated/terminating instances
- if not any([f['Name'] == 'instance-state-name' for f in filters]):
- filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']})
- paginator = connection.get_paginator('describe_instances')
- reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations')
- instances = []
- for r in reservations:
- new_instances = r['Instances']
- for instance in new_instances:
- instance.update(self._get_reservation_details(r))
- if self.get_option('include_extra_api_calls'):
- instance.update(self._get_event_set_and_persistence(connection, instance['InstanceId'], instance.get('SpotInstanceRequestId')))
- instances.extend(new_instances)
- except botocore.exceptions.ClientError as e:
- if e.response['ResponseMetadata']['HTTPStatusCode'] == 403 and not strict_permissions:
- instances = []
- else:
- raise AnsibleError("Failed to describe instances: %s" % to_native(e))
- except botocore.exceptions.BotoCoreError as e:
- raise AnsibleError("Failed to describe instances: %s" % to_native(e))
-
- all_instances.extend(instances)
-
- return sorted(all_instances, key=lambda x: x['InstanceId'])
-
- def _get_reservation_details(self, reservation):
- return {
- 'OwnerId': reservation['OwnerId'],
- 'RequesterId': reservation.get('RequesterId', ''),
- 'ReservationId': reservation['ReservationId']
- }
-
- def _get_event_set_and_persistence(self, connection, instance_id, spot_instance):
- host_vars = {'Events': '', 'Persistent': False}
- try:
- kwargs = {'InstanceIds': [instance_id]}
- host_vars['Events'] = connection.describe_instance_status(**kwargs)['InstanceStatuses'][0].get('Events', '')
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- if not self.get_option('strict_permissions'):
- pass
- else:
- raise AnsibleError("Failed to describe instance status: %s" % to_native(e))
- if spot_instance:
- try:
- kwargs = {'SpotInstanceRequestIds': [spot_instance]}
- host_vars['Persistent'] = bool(
- connection.describe_spot_instance_requests(**kwargs)['SpotInstanceRequests'][0].get('Type') == 'persistent'
- )
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- if not self.get_option('strict_permissions'):
- pass
- else:
- raise AnsibleError("Failed to describe spot instance requests: %s" % to_native(e))
- return host_vars
-
- def _get_tag_hostname(self, preference, instance):
- tag_hostnames = preference.split('tag:', 1)[1]
- if ',' in tag_hostnames:
- tag_hostnames = tag_hostnames.split(',')
- else:
- tag_hostnames = [tag_hostnames]
- tags = boto3_tag_list_to_ansible_dict(instance.get('Tags', []))
- for v in tag_hostnames:
- if '=' in v:
- tag_name, tag_value = v.split('=')
- if tags.get(tag_name) == tag_value:
- return to_text(tag_name) + "_" + to_text(tag_value)
- else:
- tag_value = tags.get(v)
- if tag_value:
- return to_text(tag_value)
- return None
-
- def _get_hostname(self, instance, hostnames):
- '''
- :param instance: an instance dict returned by boto3 ec2 describe_instances()
- :param hostnames: a list of hostname destination variables in order of preference
- :return the preferred identifer for the host
- '''
- if not hostnames:
- hostnames = ['dns-name', 'private-dns-name']
-
- hostname = None
- for preference in hostnames:
- if 'tag' in preference:
- if not preference.startswith('tag:'):
- raise AnsibleError("To name a host by tags name_value, use 'tag:name=value'.")
- hostname = self._get_tag_hostname(preference, instance)
- else:
- hostname = self._get_boto_attr_chain(preference, instance)
- if hostname:
- break
- if hostname:
- if ':' in to_text(hostname):
- return self._sanitize_group_name((to_text(hostname)))
- else:
- return to_text(hostname)
-
- def _query(self, regions, filters, strict_permissions):
- '''
- :param regions: a list of regions to query
- :param filters: a list of boto3 filter dictionaries
- :param hostnames: a list of hostname destination variables in order of preference
- :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
- '''
- return {'aws_ec2': self._get_instances_by_region(regions, filters, strict_permissions)}
-
- def _populate(self, groups, hostnames):
- for group in groups:
- group = self.inventory.add_group(group)
- self._add_hosts(hosts=groups[group], group=group, hostnames=hostnames)
- self.inventory.add_child('all', group)
-
- def _add_hosts(self, hosts, group, hostnames):
- '''
- :param hosts: a list of hosts to be added to a group
- :param group: the name of the group to which the hosts belong
- :param hostnames: a list of hostname destination variables in order of preference
- '''
- for host in hosts:
- hostname = self._get_hostname(host, hostnames)
-
- host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
- host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
-
- # Allow easier grouping by region
- host['placement']['region'] = host['placement']['availability_zone'][:-1]
-
- if not hostname:
- continue
- self.inventory.add_host(hostname, group=group)
- for hostvar, hostval in host.items():
- self.inventory.set_variable(hostname, hostvar, hostval)
-
- # Use constructed if applicable
-
- strict = self.get_option('strict')
-
- # Composed variables
- self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
-
- # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
- self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
-
- # Create groups based on variable values and add the corresponding hosts to it
- self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
-
- def _set_credentials(self):
- '''
- :param config_data: contents of the inventory config file
- '''
-
- self.boto_profile = self.get_option('aws_profile')
- self.aws_access_key_id = self.get_option('aws_access_key')
- self.aws_secret_access_key = self.get_option('aws_secret_key')
- self.aws_security_token = self.get_option('aws_security_token')
- self.iam_role_arn = self.get_option('iam_role_arn')
-
- if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
- session = botocore.session.get_session()
- try:
- credentials = session.get_credentials().get_frozen_credentials()
- except AttributeError:
- pass
- else:
- self.aws_access_key_id = credentials.access_key
- self.aws_secret_access_key = credentials.secret_key
- self.aws_security_token = credentials.token
-
- if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
- raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
- "inventory configuration file or set them as environment variables.")
-
- def verify_file(self, path):
- '''
- :param loader: an ansible.parsing.dataloader.DataLoader object
- :param path: the path to the inventory config file
- :return the contents of the config file
- '''
- if super(InventoryModule, self).verify_file(path):
- if path.endswith(('aws_ec2.yml', 'aws_ec2.yaml')):
- return True
- display.debug("aws_ec2 inventory filename must end with 'aws_ec2.yml' or 'aws_ec2.yaml'")
- return False
-
- def parse(self, inventory, loader, path, cache=True):
-
- super(InventoryModule, self).parse(inventory, loader, path)
-
- self._read_config_data(path)
-
- if self.get_option('use_contrib_script_compatible_sanitization'):
- self._sanitize_group_name = self._legacy_script_compatible_group_sanitization
-
- self._set_credentials()
-
- # get user specifications
- regions = self.get_option('regions')
- filters = ansible_dict_to_boto3_filter_list(self.get_option('filters'))
- hostnames = self.get_option('hostnames')
- strict_permissions = self.get_option('strict_permissions')
-
- cache_key = self.get_cache_key(path)
- # false when refresh_cache or --flush-cache is used
- if cache:
- # get the user-specified directive
- cache = self.get_option('cache')
-
- # Generate inventory
- cache_needs_update = False
- if cache:
- try:
- results = self._cache[cache_key]
- except KeyError:
- # if cache expires or cache file doesn't exist
- cache_needs_update = True
-
- if not cache or cache_needs_update:
- results = self._query(regions, filters, strict_permissions)
-
- self._populate(results, hostnames)
-
- # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
- # when the user is using caching, update the cached inventory
- if cache_needs_update or (not cache and self.get_option('cache')):
- self._cache[cache_key] = results
-
- @staticmethod
- def _legacy_script_compatible_group_sanitization(name):
-
- # note that while this mirrors what the script used to do, it has many issues with unicode and usability in python
- regex = re.compile(r"[^A-Za-z0-9\_\-]")
-
- return regex.sub('_', name)
diff --git a/lib/ansible/plugins/inventory/aws_rds.py b/lib/ansible/plugins/inventory/aws_rds.py
deleted file mode 100644
index 6b89032473..0000000000
--- a/lib/ansible/plugins/inventory/aws_rds.py
+++ /dev/null
@@ -1,326 +0,0 @@
-# Copyright (c) 2018 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: aws_rds
- plugin_type: inventory
- short_description: rds instance source
- description:
- - Get instances and clusters from Amazon Web Services RDS.
- - Uses a YAML configuration file that ends with aws_rds.(yml|yaml).
- options:
- regions:
- description: A list of regions in which to describe RDS instances and clusters. Available regions are listed here
- U(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html)
- default: []
- filters:
- description: A dictionary of filter value pairs. Available filters are listed here
- U(https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-instances.html#options). If you filter by
- db-cluster-id and I(include_clusters) is True it will apply to clusters as well.
- default: {}
- strict_permissions:
- description: By default if an AccessDenied exception is encountered this plugin will fail. You can set strict_permissions to
- False in the inventory config file which will allow the restrictions to be gracefully skipped.
- type: bool
- default: True
- include_clusters:
- description: Whether or not to query for Aurora clusters as well as instances
- type: bool
- default: False
- statuses:
- description: A list of desired states for instances/clusters to be added to inventory. Set to ['all'] as a shorthand to find everything.
- type: list
- default:
- - creating
- - available
- extends_documentation_fragment:
- - inventory_cache
- - constructed
- - aws_credentials
- requirements:
- - boto3
- - botocore
- author: Sloane Hertel (@s-hertel)
-'''
-
-EXAMPLES = '''
-plugin: aws_rds
-regions:
- - us-east-1
- - ca-central-1
-keyed_groups:
- - key: 'db_parameter_groups|json_query("[].db_parameter_group_name")'
- prefix: rds_parameter_group
- - key: engine
- prefix: rds
- - key: tags
- - key: region
-'''
-
-from ansible.errors import AnsibleError
-from ansible.module_utils._text import to_native
-from ansible.module_utils.aws.core import is_boto3_error_code
-from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict
-from ansible.module_utils.ec2 import camel_dict_to_snake_dict
-from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
-
-try:
- import boto3
- import botocore
-except ImportError:
- raise AnsibleError('The RDS dynamic inventory plugin requires boto3 and botocore.')
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
-
- NAME = 'aws_rds'
-
- def __init__(self):
- super(InventoryModule, self).__init__()
- self.credentials = {}
- self.boto_profile = None
-
- def _boto3_conn(self, regions):
- '''
- :param regions: A list of regions to create a boto3 client
-
- Generator that yields a boto3 client and the region
- '''
- for region in regions:
- try:
- connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **self.credentials)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- if self.boto_profile:
- try:
- connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- else:
- raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
- yield connection, region
-
- def _get_hosts_by_region(self, connection, filters, strict):
-
- def _add_tags_for_hosts(connection, hosts, strict):
- for host in hosts:
- if 'DBInstanceArn' in host:
- resource_arn = host['DBInstanceArn']
- else:
- resource_arn = host['DBClusterArn']
-
- try:
- tags = connection.list_tags_for_resource(ResourceName=resource_arn)['TagList']
- except is_boto3_error_code('AccessDenied') as e:
- if not strict:
- tags = []
- else:
- raise e
- host['Tags'] = tags
-
- def wrapper(f, *args, **kwargs):
- try:
- results = f(*args, **kwargs)
- if 'DBInstances' in results:
- results = results['DBInstances']
- else:
- results = results['DBClusters']
- _add_tags_for_hosts(connection, results, strict)
- except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except
- if not strict:
- results = []
- else:
- raise AnsibleError("Failed to query RDS: {0}".format(to_native(e)))
- except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
- raise AnsibleError("Failed to query RDS: {0}".format(to_native(e)))
- return results
- return wrapper
-
- def _get_all_hosts(self, regions, instance_filters, cluster_filters, strict, statuses, gather_clusters=False):
- '''
- :param regions: a list of regions in which to describe hosts
- :param instance_filters: a list of boto3 filter dictionaries
- :param cluster_filters: a list of boto3 filter dictionaries
- :param strict: a boolean determining whether to fail or ignore 403 error codes
- :param statuses: a list of statuses that the returned hosts should match
- :return A list of host dictionaries
- '''
- all_instances = []
- all_clusters = []
- for connection, region in self._boto3_conn(regions):
- paginator = connection.get_paginator('describe_db_instances')
- all_instances.extend(
- self._get_hosts_by_region(connection, instance_filters, strict)
- (paginator.paginate(Filters=instance_filters).build_full_result)
- )
- if gather_clusters:
- all_clusters.extend(
- self._get_hosts_by_region(connection, cluster_filters, strict)
- (connection.describe_db_clusters, **{'Filters': cluster_filters})
- )
- sorted_hosts = list(
- sorted(all_instances, key=lambda x: x['DBInstanceIdentifier']) +
- sorted(all_clusters, key=lambda x: x['DBClusterIdentifier'])
- )
- return self.find_hosts_with_valid_statuses(sorted_hosts, statuses)
-
- def find_hosts_with_valid_statuses(self, hosts, statuses):
- if 'all' in statuses:
- return hosts
- valid_hosts = []
- for host in hosts:
- if host.get('DBInstanceStatus') in statuses:
- valid_hosts.append(host)
- elif host.get('Status') in statuses:
- valid_hosts.append(host)
- return valid_hosts
-
- def _populate(self, hosts):
- group = 'aws_rds'
- self.inventory.add_group(group)
- if hosts:
- self._add_hosts(hosts=hosts, group=group)
- self.inventory.add_child('all', group)
-
- def _populate_from_source(self, source_data):
- hostvars = source_data.pop('_meta', {}).get('hostvars', {})
- for group in source_data:
- if group == 'all':
- continue
- else:
- self.inventory.add_group(group)
- hosts = source_data[group].get('hosts', [])
- for host in hosts:
- self._populate_host_vars([host], hostvars.get(host, {}), group)
- self.inventory.add_child('all', group)
-
- def _get_hostname(self, host):
- if host.get('DBInstanceIdentifier'):
- return host['DBInstanceIdentifier']
- else:
- return host['DBClusterIdentifier']
-
- def _format_inventory(self, hosts):
- results = {'_meta': {'hostvars': {}}}
- group = 'aws_rds'
- results[group] = {'hosts': []}
- for host in hosts:
- hostname = self._get_hostname(host)
- results[group]['hosts'].append(hostname)
- h = self.inventory.get_host(hostname)
- results['_meta']['hostvars'][h.name] = h.vars
- return results
-
- def _add_hosts(self, hosts, group):
- '''
- :param hosts: a list of hosts to be added to a group
- :param group: the name of the group to which the hosts belong
- '''
- for host in hosts:
- hostname = self._get_hostname(host)
- host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
- host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
-
- # Allow easier grouping by region
- if 'availability_zone' in host:
- host['region'] = host['availability_zone'][:-1]
- elif 'availability_zones' in host:
- host['region'] = host['availability_zones'][0][:-1]
-
- self.inventory.add_host(hostname, group=group)
- for hostvar, hostval in host.items():
- self.inventory.set_variable(hostname, hostvar, hostval)
-
- # Use constructed if applicable
- strict = self.get_option('strict')
- # Composed variables
- self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
- # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
- self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
- # Create groups based on variable values and add the corresponding hosts to it
- self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
-
- def _set_credentials(self):
- '''
- :param config_data: contents of the inventory config file
- '''
- self.boto_profile = self.get_option('aws_profile')
- aws_access_key_id = self.get_option('aws_access_key')
- aws_secret_access_key = self.get_option('aws_secret_key')
- aws_security_token = self.get_option('aws_security_token')
-
- if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key):
- session = botocore.session.get_session()
- if session.get_credentials() is not None:
- aws_access_key_id = session.get_credentials().access_key
- aws_secret_access_key = session.get_credentials().secret_key
- aws_security_token = session.get_credentials().token
-
- if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key):
- raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
- "inventory configuration file or set them as environment variables.")
-
- if aws_access_key_id:
- self.credentials['aws_access_key_id'] = aws_access_key_id
- if aws_secret_access_key:
- self.credentials['aws_secret_access_key'] = aws_secret_access_key
- if aws_security_token:
- self.credentials['aws_session_token'] = aws_security_token
-
- def verify_file(self, path):
- '''
- :param loader: an ansible.parsing.dataloader.DataLoader object
- :param path: the path to the inventory config file
- :return the contents of the config file
- '''
- if super(InventoryModule, self).verify_file(path):
- if path.endswith(('aws_rds.yml', 'aws_rds.yaml')):
- return True
- return False
-
- def parse(self, inventory, loader, path, cache=True):
- super(InventoryModule, self).parse(inventory, loader, path)
-
- config_data = self._read_config_data(path)
- self._set_credentials()
-
- # get user specifications
- regions = self.get_option('regions')
- filters = self.get_option('filters')
- strict_permissions = self.get_option('strict_permissions')
- statuses = self.get_option('statuses')
- include_clusters = self.get_option('include_clusters')
- instance_filters = ansible_dict_to_boto3_filter_list(filters)
- cluster_filters = []
- if 'db-cluster-id' in filters and include_clusters:
- cluster_filters = ansible_dict_to_boto3_filter_list({'db-cluster-id': filters['db-cluster-id']})
-
- cache_key = self.get_cache_key(path)
- # false when refresh_cache or --flush-cache is used
- if cache:
- # get the user-specified directive
- cache = self.get_option('cache')
-
- # Generate inventory
- formatted_inventory = {}
- cache_needs_update = False
- if cache:
- try:
- results = self._cache[cache_key]
- except KeyError:
- # if cache expires or cache file doesn't exist
- cache_needs_update = True
- else:
- self._populate_from_source(results)
-
- if not cache or cache_needs_update:
- results = self._get_all_hosts(regions, instance_filters, cluster_filters, strict_permissions, statuses, include_clusters)
- self._populate(results)
- formatted_inventory = self._format_inventory(results)
-
- # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
- # when the user is using caching, update the cached inventory
- if cache_needs_update or (not cache and self.get_option('cache')):
- self._cache[cache_key] = formatted_inventory
diff --git a/lib/ansible/plugins/lookup/aws_account_attribute.py b/lib/ansible/plugins/lookup/aws_account_attribute.py
deleted file mode 100644
index 23f311da42..0000000000
--- a/lib/ansible/plugins/lookup/aws_account_attribute.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = """
-lookup: aws_account_attribute
-author:
- - Sloane Hertel <shertel@redhat.com>
-version_added: "2.5"
-requirements:
- - boto3
- - botocore
-extends_documentation_fragment:
- - aws_credentials
- - aws_region
-short_description: Look up AWS account attributes.
-description:
- - Describes attributes of your AWS account. You can specify one of the listed
- attribute choices or omit it to see all attributes.
-options:
- attribute:
- description: The attribute for which to get the value(s).
- choices:
- - supported-platforms
- - default-vpc
- - max-instances
- - vpc-max-security-groups-per-interface
- - max-elastic-ips
- - vpc-max-elastic-ips
- - has-ec2-classic
-"""
-
-EXAMPLES = """
-vars:
- has_ec2_classic: "{{ lookup('aws_account_attribute', attribute='has-ec2-classic') }}"
- # true | false
-
- default_vpc_id: "{{ lookup('aws_account_attribute', attribute='default-vpc') }}"
- # vpc-xxxxxxxx | none
-
- account_details: "{{ lookup('aws_account_attribute', wantlist='true') }}"
- # {'default-vpc': ['vpc-xxxxxxxx'], 'max-elastic-ips': ['5'], 'max-instances': ['20'],
- # 'supported-platforms': ['VPC', 'EC2'], 'vpc-max-elastic-ips': ['5'], 'vpc-max-security-groups-per-interface': ['5']}
-
-"""
-
-RETURN = """
-_raw:
- description:
- Returns a boolean when I(attribute) is check_ec2_classic. Otherwise returns the value(s) of the attribute
- (or all attributes if one is not specified).
-"""
-
-from ansible.errors import AnsibleError
-
-try:
- import boto3
- import botocore
-except ImportError:
- raise AnsibleError("The lookup aws_account_attribute requires boto3 and botocore.")
-
-from ansible.plugins import AnsiblePlugin
-from ansible.plugins.lookup import LookupBase
-from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info
-from ansible.module_utils._text import to_native
-from ansible.module_utils.six import string_types
-import os
-
-
-def _boto3_conn(region, credentials):
- boto_profile = credentials.pop('aws_profile', None)
-
- try:
- connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region, **credentials)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- if boto_profile:
- try:
- connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- raise AnsibleError("Insufficient credentials found.")
- else:
- raise AnsibleError("Insufficient credentials found.")
- return connection
-
-
-def _get_credentials(options):
- credentials = {}
- credentials['aws_profile'] = options['aws_profile']
- credentials['aws_secret_access_key'] = options['aws_secret_key']
- credentials['aws_access_key_id'] = options['aws_access_key']
- credentials['aws_session_token'] = options['aws_security_token']
-
- return credentials
-
-
-class LookupModule(LookupBase):
- def run(self, terms, variables, **kwargs):
-
- self.set_options(var_options=variables, direct=kwargs)
- boto_credentials = _get_credentials(self._options)
-
- region = self._options['region']
- client = _boto3_conn(region, boto_credentials)
-
- attribute = kwargs.get('attribute')
- params = {'AttributeNames': []}
- check_ec2_classic = False
- if 'has-ec2-classic' == attribute:
- check_ec2_classic = True
- params['AttributeNames'] = ['supported-platforms']
- elif attribute:
- params['AttributeNames'] = [attribute]
-
- try:
- response = client.describe_account_attributes(**params)['AccountAttributes']
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- raise AnsibleError("Failed to describe account attributes: %s" % to_native(e))
-
- if check_ec2_classic:
- attr = response[0]
- return any(value['AttributeValue'] == 'EC2' for value in attr['AttributeValues'])
-
- if attribute:
- attr = response[0]
- return [value['AttributeValue'] for value in attr['AttributeValues']]
-
- flattened = {}
- for k_v_dict in response:
- flattened[k_v_dict['AttributeName']] = [value['AttributeValue'] for value in k_v_dict['AttributeValues']]
- return flattened
diff --git a/lib/ansible/plugins/lookup/aws_secret.py b/lib/ansible/plugins/lookup/aws_secret.py
deleted file mode 100644
index fa100e7df5..0000000000
--- a/lib/ansible/plugins/lookup/aws_secret.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r"""
-lookup: aws_secret
-author:
- - Aaron Smith <ajsmith10381@gmail.com>
-version_added: "2.8"
-requirements:
- - boto3
- - botocore>=1.10.0
-extends_documentation_fragment:
- - aws_credentials
- - aws_region
-short_description: Look up secrets stored in AWS Secrets Manager.
-description:
- - Look up secrets stored in AWS Secrets Manager provided the caller
- has the appropriate permissions to read the secret.
- - Lookup is based on the secret's `Name` value.
- - Optional parameters can be passed into this lookup; `version_id` and `version_stage`
-options:
- _terms:
- description: Name of the secret to look up in AWS Secrets Manager.
- required: True
- version_id:
- description: Version of the secret(s).
- required: False
- version_stage:
- description: Stage of the secret version.
- required: False
- join:
- description:
- - Join two or more entries to form an extended secret.
- - This is useful for overcoming the 4096 character limit imposed by AWS.
- type: boolean
- default: false
-"""
-
-EXAMPLES = r"""
- - name: Create RDS instance with aws_secret lookup for password param
- rds:
- command: create
- instance_name: app-db
- db_engine: MySQL
- size: 10
- instance_type: db.m1.small
- username: dbadmin
- password: "{{ lookup('aws_secret', 'DbSecret') }}"
- tags:
- Environment: staging
-"""
-
-RETURN = r"""
-_raw:
- description:
- Returns the value of the secret stored in AWS Secrets Manager.
-"""
-
-from ansible.errors import AnsibleError
-
-try:
- import boto3
- import botocore
-except ImportError:
- raise AnsibleError("The lookup aws_secret requires boto3 and botocore.")
-
-from ansible.plugins import AnsiblePlugin
-from ansible.plugins.lookup import LookupBase
-from ansible.module_utils._text import to_native
-
-
-def _boto3_conn(region, credentials):
- boto_profile = credentials.pop('aws_profile', None)
-
- try:
- connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region, **credentials)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- if boto_profile:
- try:
- connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
- raise AnsibleError("Insufficient credentials found.")
- else:
- raise AnsibleError("Insufficient credentials found.")
- return connection
-
-
-class LookupModule(LookupBase):
- def _get_credentials(self):
- credentials = {}
- credentials['aws_profile'] = self.get_option('aws_profile')
- credentials['aws_secret_access_key'] = self.get_option('aws_secret_key')
- credentials['aws_access_key_id'] = self.get_option('aws_access_key')
- credentials['aws_session_token'] = self.get_option('aws_security_token')
-
- # fallback to IAM role credentials
- if not credentials['aws_profile'] and not (credentials['aws_access_key_id'] and credentials['aws_secret_access_key']):
- session = botocore.session.get_session()
- if session.get_credentials() is not None:
- credentials['aws_access_key_id'] = session.get_credentials().access_key
- credentials['aws_secret_access_key'] = session.get_credentials().secret_key
- credentials['aws_session_token'] = session.get_credentials().token
-
- return credentials
-
- def run(self, terms, variables, **kwargs):
-
- self.set_options(var_options=variables, direct=kwargs)
- boto_credentials = self._get_credentials()
-
- region = self.get_option('region')
- client = _boto3_conn(region, boto_credentials)
-
- secrets = []
- for term in terms:
- params = {}
- params['SecretId'] = term
- if kwargs.get('version_id'):
- params['VersionId'] = kwargs.get('version_id')
- if kwargs.get('version_stage'):
- params['VersionStage'] = kwargs.get('version_stage')
-
- try:
- response = client.get_secret_value(**params)
- if 'SecretBinary' in response:
- secrets.append(response['SecretBinary'])
- if 'SecretString' in response:
- secrets.append(response['SecretString'])
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- raise AnsibleError("Failed to retrieve secret: %s" % to_native(e))
-
- if kwargs.get('join'):
- joined_secret = []
- joined_secret.append(''.join(secrets))
- return joined_secret
- else:
- return secrets
diff --git a/lib/ansible/plugins/lookup/aws_service_ip_ranges.py b/lib/ansible/plugins/lookup/aws_service_ip_ranges.py
deleted file mode 100644
index 89072f6962..0000000000
--- a/lib/ansible/plugins/lookup/aws_service_ip_ranges.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# (c) 2016 James Turner <turnerjsm@gmail.com>
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = """
-lookup: aws_service_ip_ranges
-author:
- - James Turner <turnerjsm@gmail.com>
-version_added: "2.5"
-requirements:
- - must have public internet connectivity
-short_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.
-description:
- - AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.
- - This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.
-options:
- service:
- description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'
- region:
- description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'
-"""
-
-EXAMPLES = """
-vars:
- ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}"
-tasks:
-
-- name: "use list return option and iterate as a loop"
- debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}"
-# "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 "
-
-- name: "Pull S3 IP ranges, and print the default return style"
- debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}"
-# "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17"
-"""
-
-RETURN = """
-_raw:
- description: comma-separated list of CIDR ranges
-"""
-
-
-import json
-
-from ansible.errors import AnsibleError
-from ansible.plugins.lookup import LookupBase
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
-from ansible.module_utils._text import to_native
-from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
-
-
-class LookupModule(LookupBase):
- def run(self, terms, variables, **kwargs):
- try:
- resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')
- amazon_response = json.load(resp)['prefixes']
- except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:
- # on Python 3+, json.decoder.JSONDecodeError is raised for bad
- # JSON. On 2.x it's a ValueError
- raise AnsibleError("Could not decode AWS IP ranges: %s" % to_native(e))
- except HTTPError as e:
- raise AnsibleError("Received HTTP error while pulling IP ranges: %s" % to_native(e))
- except SSLValidationError as e:
- raise AnsibleError("Error validating the server's certificate for: %s" % to_native(e))
- except URLError as e:
- raise AnsibleError("Failed look up IP range service: %s" % to_native(e))
- except ConnectionError as e:
- raise AnsibleError("Error connecting to IP range service: %s" % to_native(e))
-
- if 'region' in kwargs:
- region = kwargs['region']
- amazon_response = (item for item in amazon_response if item['region'] == region)
- if 'service' in kwargs:
- service = str.upper(kwargs['service'])
- amazon_response = (item for item in amazon_response if item['service'] == service)
-
- return [item['ip_prefix'] for item in amazon_response]
diff --git a/lib/ansible/plugins/lookup/aws_ssm.py b/lib/ansible/plugins/lookup/aws_ssm.py
deleted file mode 100644
index 7d875ce3e5..0000000000
--- a/lib/ansible/plugins/lookup/aws_ssm.py
+++ /dev/null
@@ -1,233 +0,0 @@
-# (c) 2016, Bill Wang <ozbillwang(at)gmail.com>
-# (c) 2017, Marat Bakeev <hawara(at)gmail.com>
-# (c) 2018, Michael De La Rue <siblemitcom.mddlr(at)spamgourmet.com>
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
-lookup: aws_ssm
-author:
- - Bill Wang <ozbillwang(at)gmail.com>
- - Marat Bakeev <hawara(at)gmail.com>
- - Michael De La Rue <siblemitcom.mddlr@spamgourmet.com>
-version_added: 2.5
-requirements:
- - boto3
- - botocore
-short_description: Get the value for a SSM parameter or all parameters under a path.
-description:
- - Get the value for an Amazon Simple Systems Manager parameter or a hierarchy of parameters.
- The first argument you pass the lookup can either be a parameter name or a hierarchy of
- parameters. Hierarchies start with a forward slash and end with the parameter name. Up to
- 5 layers may be specified.
- - If looking up an explicitly listed parameter by name which does not exist then the lookup will
- return a None value which will be interpreted by Jinja2 as an empty string. You can use the
- ```default``` filter to give a default value in this case but must set the second parameter to
- true (see examples below)
- - When looking up a path for parameters under it a dictionary will be returned for each path.
- If there is no parameter under that path then the return will be successful but the
- dictionary will be empty.
- - If the lookup fails due to lack of permissions or due to an AWS client error then the aws_ssm
- will generate an error, normally crashing the current ansible task. This is normally the right
- thing since ignoring a value that IAM isn't giving access to could cause bigger problems and
- wrong behaviour or loss of data. If you want to continue in this case then you will have to set
- up two ansible tasks, one which sets a variable and ignores failures one which uses the value
- of that variable with a default. See the examples below.
-
-options:
- decrypt:
- description: A boolean to indicate whether to decrypt the parameter.
- default: true
- type: boolean
- bypath:
- description: A boolean to indicate whether the parameter is provided as a hierarchy.
- default: false
- type: boolean
- recursive:
- description: A boolean to indicate whether to retrieve all parameters within a hierarchy.
- default: false
- type: boolean
- shortnames:
- description: Indicates whether to return the name only without path if using a parameter hierarchy.
- default: false
- type: boolean
-'''
-
-EXAMPLES = '''
-# lookup sample:
-- name: lookup ssm parameter store in the current region
- debug: msg="{{ lookup('aws_ssm', 'Hello' ) }}"
-
-- name: lookup ssm parameter store in nominated region
- debug: msg="{{ lookup('aws_ssm', 'Hello', region='us-east-2' ) }}"
-
-- name: lookup ssm parameter store without decrypted
- debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=False ) }}"
-
-- name: lookup ssm parameter store in nominated aws profile
- debug: msg="{{ lookup('aws_ssm', 'Hello', aws_profile='myprofile' ) }}"
-
-- name: lookup ssm parameter store using explicit aws credentials
- debug: msg="{{ lookup('aws_ssm', 'Hello', aws_access_key=my_aws_access_key, aws_secret_key=my_aws_secret_key, aws_security_token=my_security_token ) }}"
-
-- name: lookup ssm parameter store with all options.
- debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=false, region='us-east-2', aws_profile='myprofile') }}"
-
-- name: lookup a key which doesn't exist, returns ""
- debug: msg="{{ lookup('aws_ssm', 'NoKey') }}"
-
-- name: lookup a key which doesn't exist, returning a default ('root')
- debug: msg="{{ lookup('aws_ssm', 'AdminID') | default('root', true) }}"
-
-- name: lookup a key which doesn't exist failing to store it in a fact
- set_fact:
- temp_secret: "{{ lookup('aws_ssm', '/NoAccess/hiddensecret') }}"
- ignore_errors: true
-
-- name: show fact default to "access failed" if we don't have access
- debug: msg="{{ 'the secret was:' ~ temp_secret | default('could not access secret') }}"
-
-- name: return a dictionary of ssm parameters from a hierarchy path
- debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', bypath=true, recursive=true ) }}"
-
-- name: return a dictionary of ssm parameters from a hierarchy path with shortened names (param instead of /PATH/to/param)
- debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', shortnames=true, bypath=true, recursive=true ) }}"
-
-- name: Iterate over a parameter hierarchy (one iteration per parameter)
- debug: msg='Key contains {{ item.key }} , with value {{ item.value }}'
- loop: '{{ lookup("aws_ssm", "/demo/", region="ap-southeast-2", bypath=True) | dict2items }}'
-
-- name: Iterate over multiple paths as dictionaries (one iteration per path)
- debug: msg='Path contains {{ item }}'
- loop: '{{ lookup("aws_ssm", "/demo/", "/demo1/", bypath=True)}}'
-
-'''
-
-from ansible.module_utils._text import to_native
-from ansible.module_utils.ec2 import HAS_BOTO3, boto3_tag_list_to_ansible_dict
-from ansible.errors import AnsibleError
-from ansible.plugins.lookup import LookupBase
-from ansible.utils.display import Display
-
-try:
- from botocore.exceptions import ClientError
- import botocore
- import boto3
-except ImportError:
- pass # will be captured by imported HAS_BOTO3
-
-display = Display()
-
-
-def _boto3_conn(region, credentials):
- if 'boto_profile' in credentials:
- boto_profile = credentials.pop('boto_profile')
- else:
- boto_profile = None
-
- try:
- connection = boto3.session.Session(profile_name=boto_profile).client('ssm', region, **credentials)
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
- if boto_profile:
- try:
- connection = boto3.session.Session(profile_name=boto_profile).client('ssm', region)
- # FIXME: we should probably do better passing on of the error information
- except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
- raise AnsibleError("Insufficient credentials found.")
- else:
- raise AnsibleError("Insufficient credentials found.")
- return connection
-
-
-class LookupModule(LookupBase):
- def run(self, terms, variables=None, boto_profile=None, aws_profile=None,
- aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None,
- bypath=False, shortnames=False, recursive=False, decrypt=True):
- '''
- :arg terms: a list of lookups to run.
- e.g. ['parameter_name', 'parameter_name_too' ]
- :kwarg variables: ansible variables active at the time of the lookup
- :kwarg aws_secret_key: identity of the AWS key to use
- :kwarg aws_access_key: AWS secret key (matching identity)
- :kwarg aws_security_token: AWS session key if using STS
- :kwarg decrypt: Set to True to get decrypted parameters
- :kwarg region: AWS region in which to do the lookup
- :kwarg bypath: Set to True to do a lookup of variables under a path
- :kwarg recursive: Set to True to recurse below the path (requires bypath=True)
- :returns: A list of parameter values or a list of dictionaries if bypath=True.
- '''
-
- if not HAS_BOTO3:
- raise AnsibleError('botocore and boto3 are required for aws_ssm lookup.')
-
- ret = []
- response = {}
- ssm_dict = {}
-
- credentials = {}
- if aws_profile:
- credentials['boto_profile'] = aws_profile
- else:
- credentials['boto_profile'] = boto_profile
- credentials['aws_secret_access_key'] = aws_secret_key
- credentials['aws_access_key_id'] = aws_access_key
- credentials['aws_session_token'] = aws_security_token
-
- client = _boto3_conn(region, credentials)
-
- ssm_dict['WithDecryption'] = decrypt
-
- # Lookup by path
- if bypath:
- ssm_dict['Recursive'] = recursive
- for term in terms:
- ssm_dict["Path"] = term
- display.vvv("AWS_ssm path lookup term: %s in region: %s" % (term, region))
- try:
- response = client.get_parameters_by_path(**ssm_dict)
- except ClientError as e:
- raise AnsibleError("SSM lookup exception: {0}".format(to_native(e)))
- paramlist = list()
- paramlist.extend(response['Parameters'])
-
- # Manual pagination, since boto doesn't support it yet for get_parameters_by_path
- while 'NextToken' in response:
- response = client.get_parameters_by_path(NextToken=response['NextToken'], **ssm_dict)
- paramlist.extend(response['Parameters'])
-
- # shorten parameter names. yes, this will return duplicate names with different values.
- if shortnames:
- for x in paramlist:
- x['Name'] = x['Name'][x['Name'].rfind('/') + 1:]
-
- display.vvvv("AWS_ssm path lookup returned: %s" % str(paramlist))
- if len(paramlist):
- ret.append(boto3_tag_list_to_ansible_dict(paramlist,
- tag_name_key_name="Name",
- tag_value_key_name="Value"))
- else:
- ret.append({})
- # Lookup by parameter name - always returns a list with one or no entry.
- else:
- display.vvv("AWS_ssm name lookup term: %s" % terms)
- ssm_dict["Names"] = terms
- try:
- response = client.get_parameters(**ssm_dict)
- except ClientError as e:
- raise AnsibleError("SSM lookup exception: {0}".format(to_native(e)))
- params = boto3_tag_list_to_ansible_dict(response['Parameters'], tag_name_key_name="Name",
- tag_value_key_name="Value")
- for i in terms:
- if i.split(':', 1)[0] in params:
- ret.append(params[i])
- elif i in response['InvalidParameters']:
- ret.append(None)
- else:
- raise AnsibleError("Ansible internal error: aws_ssm lookup failed to understand boto3 return value: {0}".format(str(response)))
- return ret
-
- display.vvvv("AWS_ssm path lookup returning: %s " % str(ret))
- return ret