summaryrefslogtreecommitdiff
path: root/test/integration/targets/k8s
diff options
context:
space:
mode:
authorWill Thames <will@thames.id.au>2019-05-30 09:26:43 +1000
committerjctanner <tanner.jc@gmail.com>2019-05-29 19:26:43 -0400
commitac1895453f5a552f151c501e697184c5c21144ec (patch)
tree9cd2fedabdf3c7f947f831e2fe1ddd0730be6fe3 /test/integration/targets/k8s
parent5008e1d479896e3be3d94a306ee1b72bd624d0a6 (diff)
downloadansible-ac1895453f5a552f151c501e697184c5c21144ec.tar.gz
Add test case for k8s cascading deletes (#55987)
* Add test case for non-cascading deletes Deleting a DaemonSet does not delete associated pods, even though it should * Add coverage module when using pip Otherwise tests seemingly fail
Diffstat (limited to 'test/integration/targets/k8s')
-rw-r--r--test/integration/targets/k8s/defaults/main.yml24
-rw-r--r--test/integration/targets/k8s/tasks/delete.yml101
-rw-r--r--test/integration/targets/k8s/tasks/full_test.yml1
-rw-r--r--test/integration/targets/k8s/tasks/main.yml18
-rw-r--r--test/integration/targets/k8s/tasks/older_openshift_fail.yml3
-rw-r--r--test/integration/targets/k8s/tasks/validate_not_installed.yml6
-rw-r--r--test/integration/targets/k8s/tasks/waiter.yml89
7 files changed, 176 insertions, 66 deletions
diff --git a/test/integration/targets/k8s/defaults/main.yml b/test/integration/targets/k8s/defaults/main.yml
index dcb712be0b..68fde7c412 100644
--- a/test/integration/targets/k8s/defaults/main.yml
+++ b/test/integration/targets/k8s/defaults/main.yml
@@ -1,15 +1,15 @@
recreate_crd_default_merge_expectation: recreate_crd is not failed
-wait_pod_metadata:
+k8s_pod_metadata:
labels:
- app: "{{ wait_pod_name }}"
+ app: "{{ k8s_pod_name }}"
-wait_pod_spec:
+k8s_pod_spec:
containers:
- - image: "{{ wait_pod_image }}"
+ - image: "{{ k8s_pod_image }}"
imagePullPolicy: Always
- name: "{{ wait_pod_name }}"
- command: "{{ wait_pod_command }}"
+ name: "{{ k8s_pod_name }}"
+ command: "{{ k8s_pod_command }}"
readinessProbe:
initialDelaySeconds: 15
exec:
@@ -19,14 +19,14 @@ wait_pod_spec:
limits:
cpu: "100m"
memory: "100Mi"
- ports: "{{ wait_pod_ports }}"
+ ports: "{{ k8s_pod_ports }}"
-wait_pod_command: []
+k8s_pod_command: []
-wait_pod_ports: []
+k8s_pod_ports: []
-wait_pod_template:
- metadata: "{{ wait_pod_metadata }}"
- spec: "{{ wait_pod_spec }}"
+k8s_pod_template:
+ metadata: "{{ k8s_pod_metadata }}"
+ spec: "{{ k8s_pod_spec }}"
k8s_openshift: yes
diff --git a/test/integration/targets/k8s/tasks/delete.yml b/test/integration/targets/k8s/tasks/delete.yml
new file mode 100644
index 0000000000..5ee48dca3e
--- /dev/null
+++ b/test/integration/targets/k8s/tasks/delete.yml
@@ -0,0 +1,101 @@
+- name: ensure that there are actually some nodes
+ k8s_facts:
+ kind: Node
+ register: nodes
+
+- block:
+ - set_fact:
+ delete_namespace: delete
+
+ - name: ensure namespace exists
+ k8s:
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ delete_namespace }}"
+
+ - name: add a daemonset
+ k8s:
+ definition:
+ apiVersion: extensions/v1beta1
+ kind: DaemonSet
+ metadata:
+ name: delete-daemonset
+ namespace: "{{ delete_namespace }}"
+ spec:
+ selector:
+ matchLabels:
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
+ wait: yes
+ wait_timeout: 180
+ vars:
+ k8s_pod_name: delete-ds
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
+ register: ds
+
+ - name: check that daemonset wait worked
+ assert:
+ that:
+ - ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
+
+ - name: check if pods exist
+ k8s_facts:
+ namespace: "{{ delete_namespace }}"
+ kind: Pod
+ label_selectors:
+ - "app={{ k8s_pod_name }}"
+ vars:
+ k8s_pod_name: delete-ds
+ register: pods_create
+
+ - name: assert that there are pods
+ assert:
+ that:
+ - pods_create.resources
+
+ - name: remove the daemonset
+ k8s:
+ kind: DaemonSet
+ name: delete-daemonset
+ namespace: "{{ delete_namespace }}"
+ state: absent
+ wait: yes
+
+ - name: show status of pods
+ k8s_facts:
+ namespace: "{{ delete_namespace }}"
+ kind: Pod
+ label_selectors:
+ - "app={{ k8s_pod_name }}"
+ vars:
+ k8s_pod_name: delete-ds
+
+ - name: wait for background deletion
+ pause:
+ seconds: 30
+
+ - name: check if pods still exist
+ k8s_facts:
+ namespace: "{{ delete_namespace }}"
+ kind: Pod
+ label_selectors:
+ - "app={{ k8s_pod_name }}"
+ vars:
+ k8s_pod_name: delete-ds
+ register: pods_delete
+
+ - name: assert that deleting the daemonset deleted the pods
+ assert:
+ that:
+ - not pods_delete.resources
+
+ always:
+ - name: remove namespace
+ k8s:
+ kind: Namespace
+ name: "{{ delete_namespace }}"
+ state: absent
+
+ when: (nodes.resources | length) > 0
diff --git a/test/integration/targets/k8s/tasks/full_test.yml b/test/integration/targets/k8s/tasks/full_test.yml
index 804a0d87ba..99e362b770 100644
--- a/test/integration/targets/k8s/tasks/full_test.yml
+++ b/test/integration/targets/k8s/tasks/full_test.yml
@@ -4,6 +4,7 @@
# Kubernetes resources
+- include_tasks: delete.yml
- include_tasks: waiter.yml
- block:
diff --git a/test/integration/targets/k8s/tasks/main.yml b/test/integration/targets/k8s/tasks/main.yml
index c2a57ab3ac..51b8ac482d 100644
--- a/test/integration/targets/k8s/tasks/main.yml
+++ b/test/integration/targets/k8s/tasks/main.yml
@@ -12,10 +12,11 @@
- pip:
name:
- - openshift==0.8.1
+ - openshift==0.8.8
+ - coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
- virtualenv_site_packages: yes
+ virtualenv_site_packages: no
- include_tasks: validate_not_installed.yml
vars:
@@ -29,11 +30,12 @@
- pip:
name:
- - openshift==0.8.1
+ - openshift==0.8.8
- kubernetes-validate==1.12.0
+ - coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
- virtualenv_site_packages: yes
+ virtualenv_site_packages: no
- include_tasks: validate_installed.yml
vars:
@@ -50,9 +52,10 @@
name:
- openshift==0.6.0
- kubernetes==6.0.0
+ - coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
- virtualenv_site_packages: yes
+ virtualenv_site_packages: no
- include_tasks: older_openshift_fail.yml
vars:
@@ -68,10 +71,11 @@
- pip:
name:
- - openshift==0.8.1
+ - openshift==0.8.8
+ - coverage
virtualenv: "{{ virtualenv }}"
virtualenv_command: "{{ virtualenv_command }}"
- virtualenv_site_packages: yes
+ virtualenv_site_packages: no
- include_tasks: full_test.yml
vars:
diff --git a/test/integration/targets/k8s/tasks/older_openshift_fail.yml b/test/integration/targets/k8s/tasks/older_openshift_fail.yml
index aa9afc0a86..6f91f744b3 100644
--- a/test/integration/targets/k8s/tasks/older_openshift_fail.yml
+++ b/test/integration/targets/k8s/tasks/older_openshift_fail.yml
@@ -25,9 +25,6 @@
- "'Failed to import the required Python library (openshift >= 0.7.2)' in k8s_append_hash.msg"
- "'. This is required for append_hash.' in k8s_append_hash.msg"
- # merge_type
- - include_tasks: crd.yml
-
# validate
- name: attempt to use validate with older openshift
k8s:
diff --git a/test/integration/targets/k8s/tasks/validate_not_installed.yml b/test/integration/targets/k8s/tasks/validate_not_installed.yml
index e4af14da30..3c80bdde03 100644
--- a/test/integration/targets/k8s/tasks/validate_not_installed.yml
+++ b/test/integration/targets/k8s/tasks/validate_not_installed.yml
@@ -1,3 +1,9 @@
+ - python_requirements_facts:
+ dependencies:
+ - openshift
+ - kubernetes
+ - kubernetes-validate
+
- k8s:
definition:
apiVersion: v1
diff --git a/test/integration/targets/k8s/tasks/waiter.yml b/test/integration/targets/k8s/tasks/waiter.yml
index b6e64488da..04a2bd09bf 100644
--- a/test/integration/targets/k8s/tasks/waiter.yml
+++ b/test/integration/targets/k8s/tasks/waiter.yml
@@ -6,6 +6,7 @@
- block:
- set_fact:
wait_namespace: wait
+
- name: ensure namespace exists
k8s:
definition:
@@ -20,14 +21,14 @@
apiVersion: v1
kind: Pod
metadata:
- name: "{{ wait_pod_name }}"
+ name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
- spec: "{{ wait_pod_spec }}"
+ spec: "{{ k8s_pod_spec }}"
wait: yes
vars:
- wait_pod_name: wait-pod
- wait_pod_image: alpine:3.8
- wait_pod_command:
+ k8s_pod_name: wait-pod
+ k8s_pod_image: alpine:3.8
+ k8s_pod_command:
- sleep
- "10000"
register: wait_pod
@@ -49,13 +50,13 @@
spec:
selector:
matchLabels:
- app: "{{ wait_pod_name }}"
- template: "{{ wait_pod_template }}"
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 180
vars:
- wait_pod_name: wait-ds
- wait_pod_image: gcr.io/kuar-demo/kuard-amd64:1
+ k8s_pod_name: wait-ds
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
register: ds
- name: check that daemonset wait worked
@@ -74,15 +75,15 @@
spec:
selector:
matchLabels:
- app: "{{ wait_pod_name }}"
+ app: "{{ k8s_pod_name }}"
updateStrategy:
type: RollingUpdate
- template: "{{ wait_pod_template }}"
+ template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 180
vars:
- wait_pod_name: wait-ds
- wait_pod_image: gcr.io/kuar-demo/kuard-amd64:2
+ k8s_pod_name: wait-ds
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
register: update_ds_check_mode
- name: check that check_mode returned changed
@@ -101,15 +102,15 @@
spec:
selector:
matchLabels:
- app: "{{ wait_pod_name }}"
+ app: "{{ k8s_pod_name }}"
updateStrategy:
type: RollingUpdate
- template: "{{ wait_pod_template }}"
+ template: "{{ k8s_pod_template }}"
wait: yes
wait_timeout: 180
vars:
- wait_pod_name: wait-ds
- wait_pod_image: gcr.io/kuar-demo/kuard-amd64:2
+ k8s_pod_name: wait-ds
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:3
register: ds
- name: get updated pods
@@ -125,7 +126,7 @@
assert:
that:
- ds.result.status.currentNumberScheduled == ds.result.status.desiredNumberScheduled
- - updated_ds_pods.resources[0].spec.containers[0].image.endswith(":2")
+ - updated_ds_pods.resources[0].spec.containers[0].image.endswith(":3")
- name: add a crashing pod
k8s:
@@ -133,15 +134,15 @@
apiVersion: v1
kind: Pod
metadata:
- name: "{{ wait_pod_name }}"
+ name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
- spec: "{{ wait_pod_spec }}"
+ spec: "{{ k8s_pod_spec }}"
wait: yes
wait_timeout: 30
vars:
- wait_pod_name: wait-crash-pod
- wait_pod_image: alpine:3.8
- wait_pod_command:
+ k8s_pod_name: wait-crash-pod
+ k8s_pod_image: alpine:3.8
+ k8s_pod_command:
- /bin/false
register: crash_pod
ignore_errors: yes
@@ -157,14 +158,14 @@
apiVersion: v1
kind: Pod
metadata:
- name: "{{ wait_pod_name }}"
+ name: "{{ k8s_pod_name }}"
namespace: "{{ wait_namespace }}"
- spec: "{{ wait_pod_spec }}"
+ spec: "{{ k8s_pod_spec }}"
wait: yes
wait_timeout: 30
vars:
- wait_pod_name: wait-no-image-pod
- wait_pod_image: i_made_this_up:and_this_too
+ k8s_pod_name: wait-no-image-pod
+ k8s_pod_image: i_made_this_up:and_this_too
register: no_image_pod
ignore_errors: yes
@@ -185,13 +186,13 @@
replicas: 3
selector:
matchLabels:
- app: "{{ wait_pod_name }}"
- template: "{{ wait_pod_template }}"
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
wait: yes
vars:
- wait_pod_name: wait-deploy
- wait_pod_image: gcr.io/kuar-demo/kuard-amd64:1
- wait_pod_ports:
+ k8s_pod_name: wait-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:1
+ k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
@@ -215,13 +216,13 @@
replicas: 3
selector:
matchLabels:
- app: "{{ wait_pod_name }}"
- template: "{{ wait_pod_template }}"
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
wait: yes
vars:
- wait_pod_name: wait-deploy
- wait_pod_image: gcr.io/kuar-demo/kuard-amd64:2
- wait_pod_ports:
+ k8s_pod_name: wait-deploy
+ k8s_pod_image: gcr.io/kuar-demo/kuard-amd64:2
+ k8s_pod_ports:
- containerPort: 8080
name: http
protocol: TCP
@@ -277,14 +278,14 @@
namespace: "{{ wait_namespace }}"
spec:
selector:
- app: "{{ wait_pod_name }}"
+ app: "{{ k8s_pod_name }}"
ports:
- port: 8080
targetPort: 8080
protocol: TCP
wait: yes
vars:
- wait_pod_name: wait-deploy
+ k8s_pod_name: wait-deploy
register: service
- name: assert that waiting for service works
@@ -304,13 +305,13 @@
replicas: 3
selector:
matchLabels:
- app: "{{ wait_pod_name }}"
- template: "{{ wait_pod_template }}"
+ app: "{{ k8s_pod_name }}"
+ template: "{{ k8s_pod_template }}"
wait: yes
vars:
- wait_pod_name: wait-crash-deploy
- wait_pod_image: alpine:3.8
- wait_pod_command:
+ k8s_pod_name: wait-crash-deploy
+ k8s_pod_image: alpine:3.8
+ k8s_pod_command:
- /bin/false
register: wait_crash_deploy
ignore_errors: yes