summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThe Magician <magic-modules@google.com>2019-04-02 11:09:05 -0700
committeransibot <ansibot@users.noreply.github.com>2019-04-02 14:09:05 -0400
commit696e9c1462793ad331d5b232842052f141b6e962 (patch)
tree659d0b19b1327efee3ea0dd6f7e1f61e6ab1f6c3
parent08d254588327463d8dbecbdd69c5e762c32ae984 (diff)
downloadansible-696e9c1462793ad331d5b232842052f141b6e962.tar.gz
Bug fixes for GCP modules (#54706)
-rw-r--r--lib/ansible/modules/cloud/google/gcp_cloudbuild_trigger.py237
-rw-r--r--lib/ansible/modules/cloud/google/gcp_cloudbuild_trigger_facts.py87
-rw-r--r--lib/ansible/modules/cloud/google/gcp_compute_backend_bucket.py54
-rw-r--r--lib/ansible/modules/cloud/google/gcp_compute_backend_bucket_facts.py17
-rw-r--r--lib/ansible/modules/cloud/google/gcp_compute_backend_service.py40
-rw-r--r--lib/ansible/modules/cloud/google/gcp_compute_backend_service_facts.py11
6 files changed, 439 insertions, 7 deletions
diff --git a/lib/ansible/modules/cloud/google/gcp_cloudbuild_trigger.py b/lib/ansible/modules/cloud/google/gcp_cloudbuild_trigger.py
index f37e6b67bb..13759a8401 100644
--- a/lib/ansible/modules/cloud/google/gcp_cloudbuild_trigger.py
+++ b/lib/ansible/modules/cloud/google/gcp_cloudbuild_trigger.py
@@ -176,6 +176,82 @@ options:
define an entrypoint, the first element in args is used as the entrypoint,
and the remainder will be used as arguments.
required: false
+ env:
+ description:
+ - A list of environment variable definitions to be used when running a
+ step.
+ - The elements are of the form "KEY=VALUE" for the environment variable
+ "KEY" being given the value "VALUE".
+ required: false
+ id:
+ description:
+ - Unique identifier for this build step, used in `wait_for` to reference
+ this build step as a dependency.
+ required: false
+ entrypoint:
+ description:
+ - Entrypoint to be used instead of the build step image's default entrypoint.
+ - If unset, the image's default entrypoint is used .
+ required: false
+ dir:
+ description:
+ - Working directory to use when running this step's container.
+ - If this value is a relative path, it is relative to the build's working
+ directory. If this value is absolute, it may be outside the build's
+ working directory, in which case the contents of the path may not be
+ persisted across build step executions, unless a `volume` for that path
+ is specified.
+ - If the build specifies a `RepoSource` with `dir` and a step with a `dir`,
+ which specifies an absolute path, the `RepoSource` `dir` is ignored
+ for the step's execution.
+ required: false
+ secret_env:
+ description:
+ - A list of environment variables which are encrypted using a Cloud Key
+ Management Service crypto key. These values must be specified in the
+ build's `Secret`.
+ required: false
+ timeout:
+ description:
+ - Time limit for executing this build step. If not defined, the step has
+ no time limit and will be allowed to continue to run until either it
+ completes or the build itself times out.
+ required: false
+ timing:
+ description:
+ - Output only. Stores timing information for executing this build step.
+ required: false
+ volumes:
+ description:
+ - List of volumes to mount into the build step.
+ - Each volume is created as an empty volume prior to execution of the
+ build step. Upon completion of the build, volumes and their contents
+ are discarded.
+ - Using a named volume in only one step is not valid as it is indicative
+ of a build request with an incorrect configuration.
+ required: false
+ suboptions:
+ name:
+ description:
+ - Name of the volume to mount.
+ - Volume names must be unique per build step and must be valid names
+ for Docker volumes. Each named volume must be used by at least two
+ build steps.
+ required: false
+ path:
+ description:
+ - Path at which to mount the volume.
+ - Paths must be absolute and cannot conflict with other volume paths
+ on the same build step or with certain reserved volume paths.
+ required: false
+ wait_for:
+ description:
+ - The ID(s) of the step(s) that this build step depends on.
+ - This build step will not start until all the build steps in `wait_for`
+ have completed successfully. If `wait_for` is empty, this build step
+ will start when all previous build steps in the `Build.Steps` list have
+ completed successfully.
+ required: false
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/cloud-build/docs/api/reference/rest/)'
@@ -359,6 +435,90 @@ build:
the remainder will be used as arguments.
returned: success
type: list
+ env:
+ description:
+ - A list of environment variable definitions to be used when running a step.
+ - The elements are of the form "KEY=VALUE" for the environment variable
+ "KEY" being given the value "VALUE".
+ returned: success
+ type: list
+ id:
+ description:
+ - Unique identifier for this build step, used in `wait_for` to reference
+ this build step as a dependency.
+ returned: success
+ type: str
+ entrypoint:
+ description:
+ - Entrypoint to be used instead of the build step image's default entrypoint.
+ - If unset, the image's default entrypoint is used .
+ returned: success
+ type: str
+ dir:
+ description:
+ - Working directory to use when running this step's container.
+ - If this value is a relative path, it is relative to the build's working
+ directory. If this value is absolute, it may be outside the build's working
+ directory, in which case the contents of the path may not be persisted
+ across build step executions, unless a `volume` for that path is specified.
+ - If the build specifies a `RepoSource` with `dir` and a step with a `dir`,
+ which specifies an absolute path, the `RepoSource` `dir` is ignored for
+ the step's execution.
+ returned: success
+ type: str
+ secretEnv:
+ description:
+ - A list of environment variables which are encrypted using a Cloud Key
+ Management Service crypto key. These values must be specified in the build's
+ `Secret`.
+ returned: success
+ type: list
+ timeout:
+ description:
+ - Time limit for executing this build step. If not defined, the step has
+ no time limit and will be allowed to continue to run until either it completes
+ or the build itself times out.
+ returned: success
+ type: str
+ timing:
+ description:
+ - Output only. Stores timing information for executing this build step.
+ returned: success
+ type: str
+ volumes:
+ description:
+ - List of volumes to mount into the build step.
+ - Each volume is created as an empty volume prior to execution of the build
+ step. Upon completion of the build, volumes and their contents are discarded.
+ - Using a named volume in only one step is not valid as it is indicative
+ of a build request with an incorrect configuration.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description:
+ - Name of the volume to mount.
+ - Volume names must be unique per build step and must be valid names
+ for Docker volumes. Each named volume must be used by at least two
+ build steps.
+ returned: success
+ type: str
+ path:
+ description:
+ - Path at which to mount the volume.
+ - Paths must be absolute and cannot conflict with other volume paths
+ on the same build step or with certain reserved volume paths.
+ returned: success
+ type: str
+ waitFor:
+ description:
+ - The ID(s) of the step(s) that this build step depends on.
+ - This build step will not start until all the build steps in `wait_for`
+ have completed successfully. If `wait_for` is empty, this build step will
+ start when all previous build steps in the `Build.Steps` list have completed
+ successfully.
+ returned: success
+ type: list
'''
################################################################################
@@ -402,7 +562,23 @@ def main():
options=dict(
tags=dict(type='list', elements='str'),
images=dict(type='list', elements='str'),
- steps=dict(type='list', elements='dict', options=dict(name=dict(type='str'), args=dict(type='list', elements='str'))),
+ steps=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ name=dict(type='str'),
+ args=dict(type='list', elements='str'),
+ env=dict(type='list', elements='str'),
+ id=dict(type='str'),
+ entrypoint=dict(type='str'),
+ dir=dict(type='str'),
+ secret_env=dict(type='list', elements='str'),
+ timeout=dict(type='str'),
+ timing=dict(type='str'),
+ volumes=dict(type='list', elements='dict', options=dict(name=dict(type='str'), path=dict(type='str'))),
+ wait_for=dict(type='list', elements='str'),
+ ),
+ ),
),
),
),
@@ -627,10 +803,65 @@ class TriggerStepsArray(object):
return items
def _request_for_item(self, item):
- return remove_nones_from_dict({u'name': item.get('name'), u'args': item.get('args')})
+ return remove_nones_from_dict(
+ {
+ u'name': item.get('name'),
+ u'args': item.get('args'),
+ u'env': item.get('env'),
+ u'id': item.get('id'),
+ u'entrypoint': item.get('entrypoint'),
+ u'dir': item.get('dir'),
+ u'secretEnv': item.get('secret_env'),
+ u'timeout': item.get('timeout'),
+ u'timing': item.get('timing'),
+ u'volumes': TriggerVolumesArray(item.get('volumes', []), self.module).to_request(),
+ u'waitFor': item.get('wait_for'),
+ }
+ )
+
+ def _response_from_item(self, item):
+ return remove_nones_from_dict(
+ {
+ u'name': item.get(u'name'),
+ u'args': item.get(u'args'),
+ u'env': item.get(u'env'),
+ u'id': item.get(u'id'),
+ u'entrypoint': item.get(u'entrypoint'),
+ u'dir': item.get(u'dir'),
+ u'secretEnv': item.get(u'secretEnv'),
+ u'timeout': item.get(u'timeout'),
+ u'timing': item.get(u'timing'),
+ u'volumes': TriggerVolumesArray(item.get(u'volumes', []), self.module).from_response(),
+ u'waitFor': item.get(u'waitFor'),
+ }
+ )
+
+
+class TriggerVolumesArray(object):
+ def __init__(self, request, module):
+ self.module = module
+ if request:
+ self.request = request
+ else:
+ self.request = []
+
+ def to_request(self):
+ items = []
+ for item in self.request:
+ items.append(self._request_for_item(item))
+ return items
+
+ def from_response(self):
+ items = []
+ for item in self.request:
+ items.append(self._response_from_item(item))
+ return items
+
+ def _request_for_item(self, item):
+ return remove_nones_from_dict({u'name': item.get('name'), u'path': item.get('path')})
def _response_from_item(self, item):
- return remove_nones_from_dict({u'name': item.get(u'name'), u'args': item.get(u'args')})
+ return remove_nones_from_dict({u'name': item.get(u'name'), u'path': item.get(u'path')})
if __name__ == '__main__':
diff --git a/lib/ansible/modules/cloud/google/gcp_cloudbuild_trigger_facts.py b/lib/ansible/modules/cloud/google/gcp_cloudbuild_trigger_facts.py
index ac40c6e7e7..05faee0071 100644
--- a/lib/ansible/modules/cloud/google/gcp_cloudbuild_trigger_facts.py
+++ b/lib/ansible/modules/cloud/google/gcp_cloudbuild_trigger_facts.py
@@ -214,6 +214,93 @@ items:
and the remainder will be used as arguments.
returned: success
type: list
+ env:
+ description:
+ - A list of environment variable definitions to be used when running
+ a step.
+ - The elements are of the form "KEY=VALUE" for the environment variable
+ "KEY" being given the value "VALUE".
+ returned: success
+ type: list
+ id:
+ description:
+ - Unique identifier for this build step, used in `wait_for` to reference
+ this build step as a dependency.
+ returned: success
+ type: str
+ entrypoint:
+ description:
+ - Entrypoint to be used instead of the build step image's default entrypoint.
+ - If unset, the image's default entrypoint is used .
+ returned: success
+ type: str
+ dir:
+ description:
+ - Working directory to use when running this step's container.
+ - If this value is a relative path, it is relative to the build's working
+ directory. If this value is absolute, it may be outside the build's
+ working directory, in which case the contents of the path may not
+ be persisted across build step executions, unless a `volume` for that
+ path is specified.
+ - If the build specifies a `RepoSource` with `dir` and a step with a
+ `dir`, which specifies an absolute path, the `RepoSource` `dir` is
+ ignored for the step's execution.
+ returned: success
+ type: str
+ secretEnv:
+ description:
+ - A list of environment variables which are encrypted using a Cloud
+ Key Management Service crypto key. These values must be specified
+ in the build's `Secret`.
+ returned: success
+ type: list
+ timeout:
+ description:
+ - Time limit for executing this build step. If not defined, the step
+ has no time limit and will be allowed to continue to run until either
+ it completes or the build itself times out.
+ returned: success
+ type: str
+ timing:
+ description:
+ - Output only. Stores timing information for executing this build step.
+ returned: success
+ type: str
+ volumes:
+ description:
+ - List of volumes to mount into the build step.
+ - Each volume is created as an empty volume prior to execution of the
+ build step. Upon completion of the build, volumes and their contents
+ are discarded.
+ - Using a named volume in only one step is not valid as it is indicative
+ of a build request with an incorrect configuration.
+ returned: success
+ type: complex
+ contains:
+ name:
+ description:
+ - Name of the volume to mount.
+ - Volume names must be unique per build step and must be valid names
+ for Docker volumes. Each named volume must be used by at least
+ two build steps.
+ returned: success
+ type: str
+ path:
+ description:
+ - Path at which to mount the volume.
+ - Paths must be absolute and cannot conflict with other volume paths
+ on the same build step or with certain reserved volume paths.
+ returned: success
+ type: str
+ waitFor:
+ description:
+ - The ID(s) of the step(s) that this build step depends on.
+ - This build step will not start until all the build steps in `wait_for`
+ have completed successfully. If `wait_for` is empty, this build step
+ will start when all previous build steps in the `Build.Steps` list
+ have completed successfully.
+ returned: success
+ type: list
'''
################################################################################
diff --git a/lib/ansible/modules/cloud/google/gcp_compute_backend_bucket.py b/lib/ansible/modules/cloud/google/gcp_compute_backend_bucket.py
index d1bddb47c4..2658387d8c 100644
--- a/lib/ansible/modules/cloud/google/gcp_compute_backend_bucket.py
+++ b/lib/ansible/modules/cloud/google/gcp_compute_backend_bucket.py
@@ -55,6 +55,23 @@ options:
description:
- Cloud Storage bucket name.
required: true
+ cdn_policy:
+ description:
+ - Cloud CDN configuration for this Backend Bucket.
+ required: false
+ version_added: 2.8
+ suboptions:
+ signed_url_cache_max_age_sec:
+ description:
+ - Maximum number of seconds the response to a signed URL request will be considered
+ fresh. Defaults to 1hr (3600s). After this time period, the response will
+ be revalidated before being served.
+ - 'When serving responses to signed URL requests, Cloud CDN will internally
+ behave as though all responses from this backend had a "Cache-Control: public,
+ max-age=[TTL]" header, regardless of any existing Cache-Control header.
+ The actual headers served in responses will not be altered.'
+ required: false
+ default: '3600'
description:
description:
- An optional textual description of the resource; provided by the client when
@@ -108,6 +125,23 @@ bucketName:
- Cloud Storage bucket name.
returned: success
type: str
+cdnPolicy:
+ description:
+ - Cloud CDN configuration for this Backend Bucket.
+ returned: success
+ type: complex
+ contains:
+ signedUrlCacheMaxAgeSec:
+ description:
+ - Maximum number of seconds the response to a signed URL request will be considered
+ fresh. Defaults to 1hr (3600s). After this time period, the response will
+ be revalidated before being served.
+ - 'When serving responses to signed URL requests, Cloud CDN will internally
+ behave as though all responses from this backend had a "Cache-Control: public,
+ max-age=[TTL]" header, regardless of any existing Cache-Control header. The
+ actual headers served in responses will not be altered.'
+ returned: success
+ type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
@@ -145,7 +179,7 @@ name:
# Imports
################################################################################
-from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
+from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import time
@@ -161,6 +195,7 @@ def main():
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
bucket_name=dict(required=True, type='str'),
+ cdn_policy=dict(type='dict', options=dict(signed_url_cache_max_age_sec=dict(default=3600, type='int'))),
description=dict(type='str'),
enable_cdn=dict(type='bool'),
name=dict(required=True, type='str'),
@@ -217,6 +252,7 @@ def resource_to_request(module):
request = {
u'kind': 'compute#backendBucket',
u'bucketName': module.params.get('bucket_name'),
+ u'cdnPolicy': BackendBucketCdnpolicy(module.params.get('cdn_policy', {}), module).to_request(),
u'description': module.params.get('description'),
u'enableCdn': module.params.get('enable_cdn'),
u'name': module.params.get('name'),
@@ -286,6 +322,7 @@ def is_different(module, response):
def response_to_hash(module, response):
return {
u'bucketName': response.get(u'bucketName'),
+ u'cdnPolicy': BackendBucketCdnpolicy(response.get(u'cdnPolicy', {}), module).from_response(),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': response.get(u'description'),
u'enableCdn': response.get(u'enableCdn'),
@@ -329,5 +366,20 @@ def raise_if_errors(response, err_path, module):
module.fail_json(msg=errors)
+class BackendBucketCdnpolicy(object):
+ def __init__(self, request, module):
+ self.module = module
+ if request:
+ self.request = request
+ else:
+ self.request = {}
+
+ def to_request(self):
+ return remove_nones_from_dict({u'signedUrlCacheMaxAgeSec': self.request.get('signed_url_cache_max_age_sec')})
+
+ def from_response(self):
+ return remove_nones_from_dict({u'signedUrlCacheMaxAgeSec': self.request.get(u'signedUrlCacheMaxAgeSec')})
+
+
if __name__ == '__main__':
main()
diff --git a/lib/ansible/modules/cloud/google/gcp_compute_backend_bucket_facts.py b/lib/ansible/modules/cloud/google/gcp_compute_backend_bucket_facts.py
index fe21d60bdc..aa1dd49920 100644
--- a/lib/ansible/modules/cloud/google/gcp_compute_backend_bucket_facts.py
+++ b/lib/ansible/modules/cloud/google/gcp_compute_backend_bucket_facts.py
@@ -70,6 +70,23 @@ items:
- Cloud Storage bucket name.
returned: success
type: str
+ cdnPolicy:
+ description:
+ - Cloud CDN configuration for this Backend Bucket.
+ returned: success
+ type: complex
+ contains:
+ signedUrlCacheMaxAgeSec:
+ description:
+ - Maximum number of seconds the response to a signed URL request will be
+ considered fresh. Defaults to 1hr (3600s). After this time period, the
+ response will be revalidated before being served.
+ - 'When serving responses to signed URL requests, Cloud CDN will internally
+ behave as though all responses from this backend had a "Cache-Control:
+ public, max-age=[TTL]" header, regardless of any existing Cache-Control
+ header. The actual headers served in responses will not be altered.'
+ returned: success
+ type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
diff --git a/lib/ansible/modules/cloud/google/gcp_compute_backend_service.py b/lib/ansible/modules/cloud/google/gcp_compute_backend_service.py
index 03d1a19338..f528e06882 100644
--- a/lib/ansible/modules/cloud/google/gcp_compute_backend_service.py
+++ b/lib/ansible/modules/cloud/google/gcp_compute_backend_service.py
@@ -182,6 +182,18 @@ options:
or query_string_blacklist, not both.
- "'&' and '=' will be percent encoded and not treated as delimiters."
required: false
+ signed_url_cache_max_age_sec:
+ description:
+ - Maximum number of seconds the response to a signed URL request will be considered
+ fresh, defaults to 1hr (3600s). After this time period, the response will
+ be revalidated before being served.
+ - 'When serving responses to signed URL requests, Cloud CDN will internally
+ behave as though all responses from this backend had a "Cache-Control: public,
+ max-age=[TTL]" header, regardless of any existing Cache-Control header.
+ The actual headers served in responses will not be altered.'
+ required: false
+ default: '3600'
+ version_added: 2.8
connection_draining:
description:
- Settings for connection draining.
@@ -473,6 +485,17 @@ cdnPolicy:
- "'&' and '=' will be percent encoded and not treated as delimiters."
returned: success
type: list
+ signedUrlCacheMaxAgeSec:
+ description:
+ - Maximum number of seconds the response to a signed URL request will be considered
+ fresh, defaults to 1hr (3600s). After this time period, the response will
+ be revalidated before being served.
+ - 'When serving responses to signed URL requests, Cloud CDN will internally
+ behave as though all responses from this backend had a "Cache-Control: public,
+ max-age=[TTL]" header, regardless of any existing Cache-Control header. The
+ actual headers served in responses will not be altered.'
+ returned: success
+ type: int
connectionDraining:
description:
- Settings for connection draining.
@@ -644,7 +667,8 @@ def main():
query_string_blacklist=dict(type='list', elements='str'),
query_string_whitelist=dict(type='list', elements='str'),
),
- )
+ ),
+ signed_url_cache_max_age_sec=dict(default=3600, type='int'),
),
),
connection_draining=dict(type='dict', options=dict(draining_timeout_sec=dict(type='int'))),
@@ -915,10 +939,20 @@ class BackendServiceCdnpolicy(object):
self.request = {}
def to_request(self):
- return remove_nones_from_dict({u'cacheKeyPolicy': BackendServiceCachekeypolicy(self.request.get('cache_key_policy', {}), self.module).to_request()})
+ return remove_nones_from_dict(
+ {
+ u'cacheKeyPolicy': BackendServiceCachekeypolicy(self.request.get('cache_key_policy', {}), self.module).to_request(),
+ u'signedUrlCacheMaxAgeSec': self.request.get('signed_url_cache_max_age_sec'),
+ }
+ )
def from_response(self):
- return remove_nones_from_dict({u'cacheKeyPolicy': BackendServiceCachekeypolicy(self.request.get(u'cacheKeyPolicy', {}), self.module).from_response()})
+ return remove_nones_from_dict(
+ {
+ u'cacheKeyPolicy': BackendServiceCachekeypolicy(self.request.get(u'cacheKeyPolicy', {}), self.module).from_response(),
+ u'signedUrlCacheMaxAgeSec': self.request.get(u'signedUrlCacheMaxAgeSec'),
+ }
+ )
class BackendServiceCachekeypolicy(object):
diff --git a/lib/ansible/modules/cloud/google/gcp_compute_backend_service_facts.py b/lib/ansible/modules/cloud/google/gcp_compute_backend_service_facts.py
index e9bbb3e6c6..e7339098bd 100644
--- a/lib/ansible/modules/cloud/google/gcp_compute_backend_service_facts.py
+++ b/lib/ansible/modules/cloud/google/gcp_compute_backend_service_facts.py
@@ -206,6 +206,17 @@ items:
- "'&' and '=' will be percent encoded and not treated as delimiters."
returned: success
type: list
+ signedUrlCacheMaxAgeSec:
+ description:
+ - Maximum number of seconds the response to a signed URL request will be
+ considered fresh, defaults to 1hr (3600s). After this time period, the
+ response will be revalidated before being served.
+ - 'When serving responses to signed URL requests, Cloud CDN will internally
+ behave as though all responses from this backend had a "Cache-Control:
+ public, max-age=[TTL]" header, regardless of any existing Cache-Control
+ header. The actual headers served in responses will not be altered.'
+ returned: success
+ type: int
connectionDraining:
description:
- Settings for connection draining.