summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Stephen <alexstephen@google.com>2018-04-25 12:26:13 -0700
committerRyan Brown <sb@ryansb.com>2018-04-25 15:26:13 -0400
commit58bf4ae6119e9ce483a1a63c5543bbeaed063de8 (patch)
tree9da0c888ed4f23ec14cd789de822e533c7d0519a
parent39bf7d3655fdbf65acb3ccc7c3b0bfb414e7bd60 (diff)
downloadansible-58bf4ae6119e9ce483a1a63c5543bbeaed063de8.tar.gz
New module: GCP Storage Buckets (#37288)
-rw-r--r--lib/ansible/modules/cloud/google/gcp_storage_bucket.py1166
-rw-r--r--test/integration/targets/gcp_storage_bucket/aliases2
-rw-r--r--test/integration/targets/gcp_storage_bucket/defaults/main.yml3
-rw-r--r--test/integration/targets/gcp_storage_bucket/meta/main.yml0
-rw-r--r--test/integration/targets/gcp_storage_bucket/tasks/main.yml88
5 files changed, 1259 insertions, 0 deletions
diff --git a/lib/ansible/modules/cloud/google/gcp_storage_bucket.py b/lib/ansible/modules/cloud/google/gcp_storage_bucket.py
new file mode 100644
index 0000000000..2df5851a98
--- /dev/null
+++ b/lib/ansible/modules/cloud/google/gcp_storage_bucket.py
@@ -0,0 +1,1166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2017 Google
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# ----------------------------------------------------------------------------
+#
+# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
+#
+# ----------------------------------------------------------------------------
+#
+# This file is automatically generated by Magic Modules and manual
+# changes will be clobbered when the file is regenerated.
+#
+# Please read more about how to change this file at
+# https://www.github.com/GoogleCloudPlatform/magic-modules
+#
+# ----------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+################################################################################
+# Documentation
+################################################################################
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ["preview"],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: gcp_storage_bucket
+description:
+ - The Buckets resource represents a bucket in Google Cloud Storage. There is
+ a single global namespace shared by all buckets. For more information, see
+ Bucket Name Requirements.
+ - Buckets contain objects which can be accessed by their own methods. In
+ addition to the acl property, buckets contain bucketAccessControls, for
+ use in fine-grained manipulation of an existing bucket's access controls.
+ - A bucket is always owned by the project team owners group.
+short_description: Creates a GCP Bucket
+version_added: 2.6
+author: Google Inc. (@googlecloudplatform)
+requirements:
+ - python >= 2.6
+ - requests >= 2.18.4
+ - google-auth >= 1.3.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in GCP
+ required: true
+ choices: ['present', 'absent']
+ default: 'present'
+ acl:
+ description:
+ - Access controls on the bucket.
+ required: false
+ suboptions:
+ bucket:
+ description:
+ - A reference to Bucket resource.
+ required: true
+ domain:
+ description:
+ - The domain associated with the entity.
+ required: false
+ email:
+ description:
+ - The email address associated with the entity.
+ required: false
+ entity:
+ description:
+ - |
+ The entity holding the permission, in one of the following
+ forms: user-userId user-email group-groupId
+ group-email domain-domain project-team-projectId
+ allUsers allAuthenticatedUsers Examples: The user
+ liz@example.com would be user-liz@example.com.
+ - The group example@googlegroups.com would be
+ group-example@googlegroups.com.
+ - To refer to all members of the Google Apps for Business domain
+ example.com, the entity would be domain-example.com.
+ required: true
+ entity_id:
+ description:
+ - The ID for the entity.
+ required: false
+ id:
+ description:
+ - The ID of the access-control entry.
+ required: false
+ project_team:
+ description:
+ - The project team associated with the entity.
+ required: false
+ suboptions:
+ project_number:
+ description:
+ - The project team associated with the entity.
+ required: false
+ team:
+ description:
+ - The team.
+ required: false
+ choices: ['editors', 'owners', 'viewers']
+ role:
+ description:
+ - The access permission for the entity.
+ required: false
+ choices: ['OWNER', 'READER', 'WRITER']
+ cors:
+ description:
+ - The bucket's Cross-Origin Resource Sharing (CORS) configuration.
+ required: false
+ suboptions:
+ max_age_seconds:
+ description:
+ - The value, in seconds, to return in the Access-Control-Max-Age
+ header used in preflight responses.
+ required: false
+ method:
+ description:
+ - |
+ The list of HTTP methods on which to include CORS response
+ headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in
+ the list of methods, and means "any method".
+ required: false
+ origin:
+ description:
+ - The list of Origins eligible to receive CORS response headers.
+ - |
+ Note: "*" is permitted in the list of origins, and means "any
+ Origin".
+ required: false
+ response_header:
+ description:
+ - The list of HTTP headers other than the simple response
+ headers to give permission for the user-agent to share across
+ domains.
+ required: false
+ lifecycle:
+ description:
+ - The bucket's lifecycle configuration.
+ - |
+ See https://developers.google.com/storage/docs/lifecycle for more
+ information.
+ required: false
+ suboptions:
+ rule:
+ description:
+ - A lifecycle management rule, which is made of an action to
+ take and the condition(s) under which the action will be taken.
+ required: false
+ suboptions:
+ action:
+ description:
+ - The action to take.
+ required: false
+ suboptions:
+ storage_class:
+ description:
+ - Target storage class. Required iff the type of the
+ action is SetStorageClass.
+ required: false
+ type:
+ description:
+ - Type of the action. Currently, only Delete and
+ SetStorageClass are supported.
+ required: false
+ choices: ['Delete', 'SetStorageClass']
+ condition:
+ description:
+ - The condition(s) under which the action will be taken.
+ required: false
+ suboptions:
+ age_days:
+ description:
+ - Age of an object (in days). This condition is
+ satisfied when an object reaches the specified age.
+ required: false
+ created_before:
+ description:
+ - A date in RFC 3339 format with only the date part (for
+ instance, "2013-01-15"). This condition is satisfied
+ when an object is created before midnight of the
+ specified date in UTC.
+ required: false
+ is_live:
+ description:
+ - Relevant only for versioned objects. If the value is
+ true, this condition matches live objects; if the
+ value is false, it matches archived objects.
+ required: false
+ type: bool
+ matches_storage_class:
+ description:
+ - Objects having any of the storage classes specified by
+ this condition will be matched. Values include
+ MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE,
+ STANDARD, and DURABLE_REDUCED_AVAILABILITY.
+ required: false
+ num_newer_versions:
+ description:
+ - Relevant only for versioned objects. If the value is
+ N, this condition is satisfied when there are at least
+ N versions (including the live version) newer than
+ this version of the object.
+ required: false
+ location:
+ description:
+ - The location of the bucket. Object data for objects in the bucket
+ resides in physical storage within this region. Defaults to US.
+ See the developer's guide for the authoritative list.
+ required: false
+ logging:
+ description:
+ - The bucket's logging configuration, which defines the destination
+ bucket and optional name prefix for the current bucket's logs.
+ required: false
+ suboptions:
+ log_bucket:
+ description:
+ - The destination bucket where the current bucket's logs should
+ be placed.
+ required: false
+ log_object_prefix:
+ description:
+ - A prefix for log object names.
+ required: false
+ metageneration:
+ description:
+ - The metadata generation of this bucket.
+ required: false
+ name:
+ description:
+ - The name of the bucket.
+ required: false
+ owner:
+ description:
+ - The owner of the bucket. This is always the project team's owner
+ group.
+ required: false
+ suboptions:
+ entity:
+ description:
+ - The entity, in the form project-owner-projectId.
+ required: false
+ entity_id:
+ description:
+ - The ID for the entity.
+ required: false
+ storage_class:
+ description:
+ - The bucket's default storage class, used whenever no storageClass
+ is specified for a newly-created object. This defines how objects
+ in the bucket are stored and determines the SLA and the cost of
+ storage.
+ - Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE,
+ COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value is not
+ specified when the bucket is created, it will default to STANDARD.
+ For more information, see storage classes.
+ required: false
+ choices: ['MULTI_REGIONAL', 'REGIONAL', 'STANDARD', 'NEARLINE', 'COLDLINE', 'DURABLE_REDUCED_AVAILABILITY']
+ versioning:
+ description:
+ - The bucket's versioning configuration.
+ required: false
+ suboptions:
+ enabled:
+ description:
+ - While set to true, versioning is fully enabled for this bucket.
+ required: false
+ type: bool
+ website:
+ description:
+ - The bucket's website configuration, controlling how the service
+ behaves when accessing bucket contents as a web site. See the
+ Static Website Examples for more information.
+ required: false
+ suboptions:
+ main_page_suffix:
+ description:
+ - If the requested object path is missing, the service will
+ ensure the path has a trailing '/', append this suffix, and
+ attempt to retrieve the resulting object. This allows the
+ creation of index.html objects to represent directory pages.
+ required: false
+ not_found_page:
+ description:
+ - If the requested object path is missing, and any
+ mainPageSuffix object is missing, if applicable, the service
+ will return the named object from this bucket as the content
+ for a 404 Not Found result.
+ required: false
+ project:
+ description:
+ - A valid API project identifier.
+ required: false
+ predefined_default_object_acl:
+ description:
+ - Apply a predefined set of default object access controls to this
+ bucket.
+ - |
+ Acceptable values are: - "authenticatedRead": Object owner gets
+ OWNER access, and allAuthenticatedUsers get READER access.
+ - |
+ - "bucketOwnerFullControl": Object owner gets OWNER access, and
+ project team owners get OWNER access.
+ - |
+ - "bucketOwnerRead": Object owner gets OWNER access, and project
+ team owners get READER access.
+ - |
+ - "private": Object owner gets OWNER access.
+ - |
+ - "projectPrivate": Object owner gets OWNER access, and project
+ team members get access according to their roles.
+ - |
+ - "publicRead": Object owner gets OWNER access, and allUsers get
+ READER access.
+ required: false
+ choices: ['authenticatedRead', 'bucketOwnerFullControl', 'bucketOwnerRead', 'private', 'projectPrivate', 'publicRead']
+extends_documentation_fragment: gcp
+'''
+
+EXAMPLES = '''
+- name: create a bucket
+ gcp_storage_bucket:
+ name: 'ansible-storage-module'
+ project: testProject
+ auth_kind: service_account
+ service_account_file: /tmp/auth.pem
+ scopes:
+ - https://www.googleapis.com/auth/devstorage.full_control
+ state: present
+'''
+
+RETURN = '''
+ acl:
+ description:
+ - Access controls on the bucket.
+ returned: success
+ type: complex
+ contains:
+ bucket:
+ description:
+ - A reference to Bucket resource.
+ returned: success
+ type: dict
+ domain:
+ description:
+ - The domain associated with the entity.
+ returned: success
+ type: str
+ email:
+ description:
+ - The email address associated with the entity.
+ returned: success
+ type: str
+ entity:
+ description:
+ - |
+ The entity holding the permission, in one of the following
+ forms: user-userId user-email group-groupId
+ group-email domain-domain project-team-projectId
+ allUsers allAuthenticatedUsers Examples: The user
+ liz@example.com would be user-liz@example.com.
+ - The group example@googlegroups.com would be
+ group-example@googlegroups.com.
+ - To refer to all members of the Google Apps for Business domain
+ example.com, the entity would be domain-example.com.
+ returned: success
+ type: str
+ entity_id:
+ description:
+ - The ID for the entity.
+ returned: success
+ type: str
+ id:
+ description:
+ - The ID of the access-control entry.
+ returned: success
+ type: str
+ project_team:
+ description:
+ - The project team associated with the entity.
+ returned: success
+ type: complex
+ contains:
+ project_number:
+ description:
+ - The project team associated with the entity.
+ returned: success
+ type: str
+ team:
+ description:
+ - The team.
+ returned: success
+ type: str
+ role:
+ description:
+ - The access permission for the entity.
+ returned: success
+ type: str
+ cors:
+ description:
+ - The bucket's Cross-Origin Resource Sharing (CORS) configuration.
+ returned: success
+ type: complex
+ contains:
+ max_age_seconds:
+ description:
+ - The value, in seconds, to return in the Access-Control-Max-Age
+ header used in preflight responses.
+ returned: success
+ type: int
+ method:
+ description:
+ - |
+ The list of HTTP methods on which to include CORS response
+ headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in
+ the list of methods, and means "any method".
+ returned: success
+ type: list
+ origin:
+ description:
+ - The list of Origins eligible to receive CORS response headers.
+ - |
+ Note: "*" is permitted in the list of origins, and means "any
+ Origin".
+ returned: success
+ type: list
+ response_header:
+ description:
+ - The list of HTTP headers other than the simple response
+ headers to give permission for the user-agent to share across
+ domains.
+ returned: success
+ type: list
+ id:
+ description:
+ - The ID of the bucket. For buckets, the id and name properities are
+ the same.
+ returned: success
+ type: str
+ lifecycle:
+ description:
+ - The bucket's lifecycle configuration.
+ - |
+ See https://developers.google.com/storage/docs/lifecycle for more
+ information.
+ returned: success
+ type: complex
+ contains:
+ rule:
+ description:
+ - A lifecycle management rule, which is made of an action to
+ take and the condition(s) under which the action will be taken.
+ returned: success
+ type: complex
+ contains:
+ action:
+ description:
+ - The action to take.
+ returned: success
+ type: complex
+ contains:
+ storage_class:
+ description:
+ - Target storage class. Required iff the type of the
+ action is SetStorageClass.
+ returned: success
+ type: str
+ type:
+ description:
+ - Type of the action. Currently, only Delete and
+ SetStorageClass are supported.
+ returned: success
+ type: str
+ condition:
+ description:
+ - The condition(s) under which the action will be taken.
+ returned: success
+ type: complex
+ contains:
+ age_days:
+ description:
+ - Age of an object (in days). This condition is
+ satisfied when an object reaches the specified age.
+ returned: success
+ type: int
+ created_before:
+ description:
+ - A date in RFC 3339 format with only the date part (for
+ instance, "2013-01-15"). This condition is satisfied
+ when an object is created before midnight of the
+ specified date in UTC.
+ returned: success
+ type: str
+ is_live:
+ description:
+ - Relevant only for versioned objects. If the value is
+ true, this condition matches live objects; if the
+ value is false, it matches archived objects.
+ returned: success
+ type: bool
+ matches_storage_class:
+ description:
+ - Objects having any of the storage classes specified by
+ this condition will be matched. Values include
+ MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE,
+ STANDARD, and DURABLE_REDUCED_AVAILABILITY.
+ returned: success
+ type: list
+ num_newer_versions:
+ description:
+ - Relevant only for versioned objects. If the value is
+ N, this condition is satisfied when there are at least
+ N versions (including the live version) newer than
+ this version of the object.
+ returned: success
+ type: int
+ location:
+ description:
+ - The location of the bucket. Object data for objects in the bucket
+ resides in physical storage within this region. Defaults to US.
+ See the developer's guide for the authoritative list.
+ returned: success
+ type: str
+ logging:
+ description:
+ - The bucket's logging configuration, which defines the destination
+ bucket and optional name prefix for the current bucket's logs.
+ returned: success
+ type: complex
+ contains:
+ log_bucket:
+ description:
+ - The destination bucket where the current bucket's logs should
+ be placed.
+ returned: success
+ type: str
+ log_object_prefix:
+ description:
+ - A prefix for log object names.
+ returned: success
+ type: str
+ metageneration:
+ description:
+ - The metadata generation of this bucket.
+ returned: success
+ type: int
+ name:
+ description:
+ - The name of the bucket.
+ returned: success
+ type: str
+ owner:
+ description:
+ - The owner of the bucket. This is always the project team's owner
+ group.
+ returned: success
+ type: complex
+ contains:
+ entity:
+ description:
+ - The entity, in the form project-owner-projectId.
+ returned: success
+ type: str
+ entity_id:
+ description:
+ - The ID for the entity.
+ returned: success
+ type: str
+ project_number:
+ description:
+ - The project number of the project the bucket belongs to.
+ returned: success
+ type: int
+ storage_class:
+ description:
+ - The bucket's default storage class, used whenever no storageClass
+ is specified for a newly-created object. This defines how objects
+ in the bucket are stored and determines the SLA and the cost of
+ storage.
+ - Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE,
+ COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value is not
+ specified when the bucket is created, it will default to STANDARD.
+ For more information, see storage classes.
+ returned: success
+ type: str
+ time_created:
+ description:
+ - The creation time of the bucket in RFC 3339 format.
+ returned: success
+ type: str
+ updated:
+ description:
+ - The modification time of the bucket in RFC 3339 format.
+ returned: success
+ type: str
+ versioning:
+ description:
+ - The bucket's versioning configuration.
+ returned: success
+ type: complex
+ contains:
+ enabled:
+ description:
+ - While set to true, versioning is fully enabled for this bucket.
+ returned: success
+ type: bool
+ website:
+ description:
+ - The bucket's website configuration, controlling how the service
+ behaves when accessing bucket contents as a web site. See the
+ Static Website Examples for more information.
+ returned: success
+ type: complex
+ contains:
+ main_page_suffix:
+ description:
+ - If the requested object path is missing, the service will
+ ensure the path has a trailing '/', append this suffix, and
+ attempt to retrieve the resulting object. This allows the
+ creation of index.html objects to represent directory pages.
+ returned: success
+ type: str
+ not_found_page:
+ description:
+ - If the requested object path is missing, and any
+ mainPageSuffix object is missing, if applicable, the service
+ will return the named object from this bucket as the content
+ for a 404 Not Found result.
+ returned: success
+ type: str
+ project:
+ description:
+ - A valid API project identifier.
+ returned: success
+ type: str
+ predefined_default_object_acl:
+ description:
+ - Apply a predefined set of default object access controls to this
+ bucket.
+ - |
+ Acceptable values are: - "authenticatedRead": Object owner gets
+ OWNER access, and allAuthenticatedUsers get READER access.
+ - |
+ - "bucketOwnerFullControl": Object owner gets OWNER access, and
+ project team owners get OWNER access.
+ - |
+ - "bucketOwnerRead": Object owner gets OWNER access, and project
+ team owners get READER access.
+ - |
+ - "private": Object owner gets OWNER access.
+ - |
+ - "projectPrivate": Object owner gets OWNER access, and project
+ team members get access according to their roles.
+ - |
+ - "publicRead": Object owner gets OWNER access, and allUsers get
+ READER access.
+ returned: success
+ type: str
+'''
+
+################################################################################
+# Imports
+################################################################################
+
+from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
+import json
+
+################################################################################
+# Main
+################################################################################
+
+
+def main():
+ """Main function"""
+
+ module = GcpModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ acl=dict(type='list', elements='dict', options=dict(
+ bucket=dict(required=True, type='dict'),
+ domain=dict(type='str'),
+ email=dict(type='str'),
+ entity=dict(required=True, type='str'),
+ entity_id=dict(type='str'),
+ id=dict(type='str'),
+ project_team=dict(type='dict', options=dict(
+ project_number=dict(type='str'),
+ team=dict(type='str', choices=['editors', 'owners', 'viewers'])
+ )),
+ role=dict(type='str', choices=['OWNER', 'READER', 'WRITER'])
+ )),
+ cors=dict(type='list', elements='dict', options=dict(
+ max_age_seconds=dict(type='int'),
+ method=dict(type='list', elements='str'),
+ origin=dict(type='list', elements='str'),
+ response_header=dict(type='list', elements='str')
+ )),
+ lifecycle=dict(type='dict', options=dict(
+ rule=dict(type='list', elements='dict', options=dict(
+ action=dict(type='dict', options=dict(
+ storage_class=dict(type='str'),
+ type=dict(type='str', choices=['Delete', 'SetStorageClass'])
+ )),
+ condition=dict(type='dict', options=dict(
+ age_days=dict(type='int'),
+ created_before=dict(type='str'),
+ is_live=dict(type='bool'),
+ matches_storage_class=dict(type='list', elements='str'),
+ num_newer_versions=dict(type='int')
+ ))
+ ))
+ )),
+ location=dict(type='str'),
+ logging=dict(type='dict', options=dict(
+ log_bucket=dict(type='str'),
+ log_object_prefix=dict(type='str')
+ )),
+ metageneration=dict(type='int'),
+ name=dict(type='str'),
+ owner=dict(type='dict', options=dict(
+ entity=dict(type='str'),
+ entity_id=dict(type='str')
+ )),
+ storage_class=dict(type='str', choices=['MULTI_REGIONAL', 'REGIONAL', 'STANDARD', 'NEARLINE', 'COLDLINE', 'DURABLE_REDUCED_AVAILABILITY']),
+ versioning=dict(type='dict', options=dict(
+ enabled=dict(type='bool')
+ )),
+ website=dict(type='dict', options=dict(
+ main_page_suffix=dict(type='str'),
+ not_found_page=dict(type='str')
+ )),
+ project=dict(type='str'),
+ predefined_default_object_acl=dict(type='str', choices=['authenticatedRead',
+ 'bucketOwnerFullControl',
+ 'bucketOwnerRead',
+ 'private',
+ 'projectPrivate',
+ 'publicRead'])
+ )
+ )
+
+ state = module.params['state']
+ kind = 'storage#bucket'
+
+ fetch = fetch_resource(module, self_link(module), kind)
+ changed = False
+
+ if fetch:
+ if state == 'present':
+ if is_different(module, fetch):
+ fetch = update(module, self_link(module), kind)
+ changed = True
+ else:
+ delete(module, self_link(module), kind)
+ fetch = {}
+ changed = True
+ else:
+ if state == 'present':
+ fetch = create(module, collection(module), kind)
+ changed = True
+ else:
+ fetch = {}
+
+ fetch.update({'changed': changed})
+
+ module.exit_json(**fetch)
+
+
+def create(module, link, kind):
+ auth = GcpSession(module, 'storage')
+ return return_if_object(module, auth.post(link, resource_to_request(module)), kind)
+
+
+def update(module, link, kind):
+ auth = GcpSession(module, 'storage')
+ return return_if_object(module, auth.put(link, resource_to_request(module)), kind)
+
+
+def delete(module, link, kind):
+ auth = GcpSession(module, 'storage')
+ return return_if_object(module, auth.delete(link), kind)
+
+
+def resource_to_request(module):
+ request = {
+ u'kind': 'storage#bucket',
+ u'project': module.params.get('project'),
+ u'predefinedDefaultObjectAcl': module.params.get('predefined_default_object_acl'),
+ u'acl': BucketAclArray(module.params.get('acl', [])).to_request(),
+ u'cors': BucketCorsArray(module.params.get('cors', [])).to_request(),
+ u'lifecycle': BucketLifecycle(module.params.get('lifecycle', {})).to_request(),
+ u'location': module.params.get('location'),
+ u'logging': BucketLogging(module.params.get('logging', {})).to_request(),
+ u'metageneration': module.params.get('metageneration'),
+ u'name': module.params.get('name'),
+ u'owner': BucketOwner(module.params.get('owner', {})).to_request(),
+ u'storageClass': module.params.get('storage_class'),
+ u'versioning': BucketVersioning(module.params.get('versioning', {})).to_request(),
+ u'website': BucketWebsite(module.params.get('website', {})).to_request()
+ }
+ return_vals = {}
+ for k, v in request.items():
+ if v:
+ return_vals[k] = v
+
+ return return_vals
+
+
+def fetch_resource(module, link, kind):
+ auth = GcpSession(module, 'storage')
+ return return_if_object(module, auth.get(link), kind)
+
+
+def self_link(module):
+ return "https://www.googleapis.com/storage/v1/b/{name}?projection=full".format(**module.params)
+
+
+def collection(module):
+ return "https://www.googleapis.com/storage/v1/b?project={project}".format(**module.params)
+
+
+def return_if_object(module, response, kind):
+ # If not found, return nothing.
+ if response.status_code == 404:
+ return None
+
+ # If no content, return nothing.
+ if response.status_code == 204:
+ return None
+
+ try:
+ module.raise_for_status(response)
+ result = response.json()
+ except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
+ module.fail_json(msg="Invalid JSON response with error: %s" % inst)
+
+ if navigate_hash(result, ['error', 'errors']):
+ module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
+ if result['kind'] != kind:
+ module.fail_json(msg="Incorrect result: {kind}".format(**result))
+
+ return result
+
+
+def is_different(module, response):
+ request = resource_to_request(module)
+ response = response_to_hash(module, response)
+
+ # Remove all output-only from response.
+ response_vals = {}
+ for k, v in response.items():
+ if k in request:
+ response_vals[k] = v
+
+ request_vals = {}
+ for k, v in request.items():
+ if k in response:
+ request_vals[k] = v
+
+ return GcpRequest(request_vals) != GcpRequest(response_vals)
+
+
+# Remove unnecessary properties from the response.
+# This is for doing comparisons with Ansible's current parameters.
+def response_to_hash(module, response):
+ return {
+ u'acl': BucketAclArray(response.get(u'acl', [])).from_response(),
+ u'cors': BucketCorsArray(response.get(u'cors', [])).from_response(),
+ u'id': response.get(u'id'),
+ u'lifecycle': BucketLifecycle(response.get(u'lifecycle', {})).from_response(),
+ u'location': response.get(u'location'),
+ u'logging': BucketLogging(response.get(u'logging', {})).from_response(),
+ u'metageneration': response.get(u'metageneration'),
+ u'name': response.get(u'name'),
+ u'owner': BucketOwner(response.get(u'owner', {})).from_response(),
+ u'projectNumber': response.get(u'projectNumber'),
+ u'storageClass': response.get(u'storageClass'),
+ u'timeCreated': response.get(u'timeCreated'),
+ u'updated': response.get(u'updated'),
+ u'versioning': BucketVersioning(response.get(u'versioning', {})).from_response(),
+ u'website': BucketWebsite(response.get(u'website', {})).from_response()
+ }
+
+
+class BucketAclArray(object):
+ def __init__(self, request):
+ if request:
+ self.request = request
+ else:
+ self.request = []
+
+ def to_request(self):
+ items = []
+ for item in self.request:
+ items.append(self._request_for_item(item))
+ return items
+
+ def from_response(self):
+ items = []
+ for item in self.request:
+ items.append(self._response_from_item(item))
+ return items
+
+ def _request_for_item(self, item):
+ return remove_nones_from_dict({
+ u'bucket': replace_resource_dict(item.get(u'bucket', {}), 'name'),
+ u'domain': item.get('domain'),
+ u'email': item.get('email'),
+ u'entity': item.get('entity'),
+ u'entityId': item.get('entity_id'),
+ u'id': item.get('id'),
+ u'projectTeam': BucketProjectTeam(item.get('project_team', {})).to_request(),
+ u'role': item.get('role')
+ })
+
+ def _response_from_item(self, item):
+ return remove_nones_from_dict({
+ u'bucket': item.get(u'bucket'),
+ u'domain': item.get(u'domain'),
+ u'email': item.get(u'email'),
+ u'entity': item.get(u'entity'),
+ u'entityId': item.get(u'entityId'),
+ u'id': item.get(u'id'),
+ u'projectTeam': BucketProjectTeam(item.get(u'projectTeam', {})).from_response(),
+ u'role': item.get(u'role')
+ })
+
+
+class BucketProjectTeam(object):
+ def __init__(self, request):
+ if request:
+ self.request = request
+ else:
+ self.request = {}
+
+ def to_request(self):
+ return remove_nones_from_dict({
+ u'projectNumber': self.request.get('project_number'),
+ u'team': self.request.get('team')
+ })
+
+ def from_response(self):
+ return remove_nones_from_dict({
+ u'projectNumber': self.request.get(u'projectNumber'),
+ u'team': self.request.get(u'team')
+ })
+
+
+class BucketCorsArray(object):
+ def __init__(self, request):
+ if request:
+ self.request = request
+ else:
+ self.request = []
+
+ def to_request(self):
+ items = []
+ for item in self.request:
+ items.append(self._request_for_item(item))
+ return items
+
+ def from_response(self):
+ items = []
+ for item in self.request:
+ items.append(self._response_from_item(item))
+ return items
+
+ def _request_for_item(self, item):
+ return remove_nones_from_dict({
+ u'maxAgeSeconds': item.get('max_age_seconds'),
+ u'method': item.get('method'),
+ u'origin': item.get('origin'),
+ u'responseHeader': item.get('response_header')
+ })
+
+ def _response_from_item(self, item):
+ return remove_nones_from_dict({
+ u'maxAgeSeconds': item.get(u'maxAgeSeconds'),
+ u'method': item.get(u'method'),
+ u'origin': item.get(u'origin'),
+ u'responseHeader': item.get(u'responseHeader')
+ })
+
+
+class BucketLifecycle(object):
+ def __init__(self, request):
+ if request:
+ self.request = request
+ else:
+ self.request = {}
+
+ def to_request(self):
+ return remove_nones_from_dict({
+ u'rule': BucketRuleArray(self.request.get('rule', [])).to_request()
+ })
+
+ def from_response(self):
+ return remove_nones_from_dict({
+ u'rule': BucketRuleArray(self.request.get(u'rule', [])).from_response()
+ })
+
+
+class BucketRuleArray(object):
+ def __init__(self, request):
+ if request:
+ self.request = request
+ else:
+ self.request = []
+
+ def to_request(self):
+ items = []
+ for item in self.request:
+ items.append(self._request_for_item(item))
+ return items
+
+ def from_response(self):
+ items = []
+ for item in self.request:
+ items.append(self._response_from_item(item))
+ return items
+
+ def _request_for_item(self, item):
+ return remove_nones_from_dict({
+ u'action': BucketAction(item.get('action', {})).to_request(),
+ u'condition': BucketCondition(item.get('condition', {})).to_request()
+ })
+
+ def _response_from_item(self, item):
+ return remove_nones_from_dict({
+ u'action': BucketAction(item.get(u'action', {})).from_response(),
+ u'condition': BucketCondition(item.get(u'condition', {})).from_response()
+ })
+
+
+class BucketAction(object):
+ def __init__(self, request):
+ if request:
+ self.request = request
+ else:
+ self.request = {}
+
+ def to_request(self):
+ return remove_nones_from_dict({
+ u'storageClass': self.request.get('storage_class'),
+ u'type': self.request.get('type')
+ })
+
+ def from_response(self):
+ return remove_nones_from_dict({
+ u'storageClass': self.request.get(u'storageClass'),
+ u'type': self.request.get(u'type')
+ })
+
+
+class BucketCondition(object):
+ def __init__(self, request):
+ if request:
+ self.request = request
+ else:
+ self.request = {}
+
+ def to_request(self):
+ return remove_nones_from_dict({
+ u'age': self.request.get('age_days'),
+ u'createdBefore': self.request.get('created_before'),
+ u'isLive': self.request.get('is_live'),
+ u'matchesStorageClass': self.request.get('matches_storage_class'),
+ u'numNewerVersions': self.request.get('num_newer_versions')
+ })
+
+ def from_response(self):
+ return remove_nones_from_dict({
+ u'age': self.request.get(u'ageDays'),
+ u'createdBefore': self.request.get(u'createdBefore'),
+ u'isLive': self.request.get(u'isLive'),
+ u'matchesStorageClass': self.request.get(u'matchesStorageClass'),
+ u'numNewerVersions': self.request.get(u'numNewerVersions')
+ })
+
+
+class BucketLogging(object):
+ def __init__(self, request):
+ if request:
+ self.request = request
+ else:
+ self.request = {}
+
+ def to_request(self):
+ return remove_nones_from_dict({
+ u'logBucket': self.request.get('log_bucket'),
+ u'logObjectPrefix': self.request.get('log_object_prefix')
+ })
+
+ def from_response(self):
+ return remove_nones_from_dict({
+ u'logBucket': self.request.get(u'logBucket'),
+ u'logObjectPrefix': self.request.get(u'logObjectPrefix')
+ })
+
+
+class BucketOwner(object):
+ def __init__(self, request):
+ if request:
+ self.request = request
+ else:
+ self.request = {}
+
+ def to_request(self):
+ return remove_nones_from_dict({
+ u'entity': self.request.get('entity'),
+ u'entityId': self.request.get('entity_id')
+ })
+
+ def from_response(self):
+ return remove_nones_from_dict({
+ u'entity': self.request.get(u'entity'),
+ u'entityId': self.request.get(u'entityId')
+ })
+
+
+class BucketVersioning(object):
+ def __init__(self, request):
+ if request:
+ self.request = request
+ else:
+ self.request = {}
+
+ def to_request(self):
+ return remove_nones_from_dict({
+ u'enabled': self.request.get('enabled')
+ })
+
+ def from_response(self):
+ return remove_nones_from_dict({
+ u'enabled': self.request.get(u'enabled')
+ })
+
+
+class BucketWebsite(object):
+ def __init__(self, request):
+ if request:
+ self.request = request
+ else:
+ self.request = {}
+
+ def to_request(self):
+ return remove_nones_from_dict({
+ u'mainPageSuffix': self.request.get('main_page_suffix'),
+ u'notFoundPage': self.request.get('not_found_page')
+ })
+
+ def from_response(self):
+ return remove_nones_from_dict({
+ u'mainPageSuffix': self.request.get(u'mainPageSuffix'),
+ u'notFoundPage': self.request.get(u'notFoundPage')
+ })
+
+if __name__ == '__main__':
+ main()
diff --git a/test/integration/targets/gcp_storage_bucket/aliases b/test/integration/targets/gcp_storage_bucket/aliases
new file mode 100644
index 0000000000..9812f019ca
--- /dev/null
+++ b/test/integration/targets/gcp_storage_bucket/aliases
@@ -0,0 +1,2 @@
+cloud/gcp
+unsupported
diff --git a/test/integration/targets/gcp_storage_bucket/defaults/main.yml b/test/integration/targets/gcp_storage_bucket/defaults/main.yml
new file mode 100644
index 0000000000..aa87a2a8e0
--- /dev/null
+++ b/test/integration/targets/gcp_storage_bucket/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# defaults file
+resource_name: '{{resource_prefix}}'
diff --git a/test/integration/targets/gcp_storage_bucket/meta/main.yml b/test/integration/targets/gcp_storage_bucket/meta/main.yml
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/integration/targets/gcp_storage_bucket/meta/main.yml
diff --git a/test/integration/targets/gcp_storage_bucket/tasks/main.yml b/test/integration/targets/gcp_storage_bucket/tasks/main.yml
new file mode 100644
index 0000000000..2becee5214
--- /dev/null
+++ b/test/integration/targets/gcp_storage_bucket/tasks/main.yml
@@ -0,0 +1,88 @@
+---
+# ----------------------------------------------------------------------------
+#
+# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
+#
+# ----------------------------------------------------------------------------
+#
+# This file is automatically generated by Magic Modules and manual
+# changes will be clobbered when the file is regenerated.
+#
+# Please read more about how to change this file at
+# https://www.github.com/GoogleCloudPlatform/magic-modules
+#
+# ----------------------------------------------------------------------------
+# Pre-test setup
+- name: delete a bucket
+ gcp_storage_bucket:
+ name: 'ansible-storage-module'
+ project: "{{ gcp_project }}"
+ auth_kind: "{{ gcp_cred_kind }}"
+ service_account_file: "{{ gcp_cred_file }}"
+ scopes:
+ - https://www.googleapis.com/auth/devstorage.full_control
+ state: absent
+#----------------------------------------------------------
+- name: create a bucket
+ gcp_storage_bucket:
+ name: 'ansible-storage-module'
+ project: "{{ gcp_project }}"
+ auth_kind: "{{ gcp_cred_kind }}"
+ service_account_file: "{{ gcp_cred_file }}"
+ scopes:
+ - https://www.googleapis.com/auth/devstorage.full_control
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result.changed == true
+ - "result.kind == 'storage#bucket'"
+# ----------------------------------------------------------------------------
+- name: create a bucket that already exists
+ gcp_storage_bucket:
+ name: 'ansible-storage-module'
+ project: "{{ gcp_project }}"
+ auth_kind: "{{ gcp_cred_kind }}"
+ service_account_file: "{{ gcp_cred_file }}"
+ scopes:
+ - https://www.googleapis.com/auth/devstorage.full_control
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.changed == false
+ - "result.kind == 'storage#bucket'"
+#----------------------------------------------------------
+- name: delete a bucket
+ gcp_storage_bucket:
+ name: 'ansible-storage-module'
+ project: "{{ gcp_project }}"
+ auth_kind: "{{ gcp_cred_kind }}"
+ service_account_file: "{{ gcp_cred_file }}"
+ scopes:
+ - https://www.googleapis.com/auth/devstorage.full_control
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result.changed == true
+ - result.has_key('kind') == False
+# ----------------------------------------------------------------------------
+- name: delete a bucket that does not exist
+ gcp_storage_bucket:
+ name: 'ansible-storage-module'
+ project: "{{ gcp_project }}"
+ auth_kind: "{{ gcp_cred_kind }}"
+ service_account_file: "{{ gcp_cred_file }}"
+ scopes:
+ - https://www.googleapis.com/auth/devstorage.full_control
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.changed == false
+ - result.has_key('kind') == False