-
Notifications
You must be signed in to change notification settings - Fork 22
WIP: Basic tests to verify setup and Gluster volume #73
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -4,3 +4,4 @@ | |
| *.retry | ||
| gcs-venv | ||
| kubectl | ||
| e2e/__pycache__ | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,19 @@ | ||
| --- | ||
| kind: Pod | ||
| apiVersion: v1 | ||
| metadata: | ||
| name: test-nginx | ||
| spec: | ||
| volumes: | ||
| - name: test-nginx-storage | ||
| persistentVolumeClaim: | ||
| claimName: gv1 | ||
| containers: | ||
| - name: test-nginx-container | ||
| image: nginx | ||
| ports: | ||
| - containerPort: 80 | ||
| name: "http-server" | ||
| volumeMounts: | ||
| - mountPath: "/usr/share/nginx/html" | ||
| name: test-nginx-storage | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,13 @@ | ||
| --- | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same here, if it's needed to have a separate pod and PVC template we can separate it out in the example folder. |
||
| kind: PersistentVolumeClaim | ||
| apiVersion: v1 | ||
| metadata: | ||
| name: gv1 | ||
| annotations: | ||
| volume.beta.kubernetes.io/storage-class: glusterfs-csi | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should be provided as |
||
| spec: | ||
| accessModes: | ||
| - ReadWriteMany | ||
| resources: | ||
| requests: | ||
| storage: 1Gi | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,112 @@ | ||
| from os import path | ||
|
|
||
| import pytest | ||
| from kubernetes import client, config, utils | ||
|
|
||
|
|
||
| @pytest.fixture | ||
| def core_v1_client(): | ||
| config.load_kube_config() | ||
| return client.CoreV1Api() | ||
|
|
||
|
|
||
| def gluster_pods_verify(pods): | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. expected pod count needs to be the input
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we can have a generic function which takes
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. good idea. ack |
||
| cnt = 0 | ||
| expected = 3 | ||
| for pod in pods: | ||
| if pod.metadata.namespace == "gcs" and pod.metadata.name.startswith("gluster-kube"): | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please make the namespace as a var.
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ack |
||
| cnt += 1 | ||
|
|
||
| assert cnt == expected, "number of gluster pods: %s (Expected %s)" % (cnt, expected) | ||
|
|
||
|
|
||
| def etcd_pods_verify(pods): | ||
| etcd_cnt = 0 | ||
| etcd_cnt_expected = 3 | ||
| op_cnt = 0 | ||
| op_cnt_expected = 1 | ||
| for pod in pods: | ||
| if pod.metadata.namespace == "gcs": | ||
| if pod.metadata.name.startswith("etcd-operator"): | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. pod names can be variables. |
||
| op_cnt += 1 | ||
| continue | ||
|
|
||
| if pod.metadata.name.startswith("etcd-"): | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. pod names can be variables. |
||
| etcd_cnt += 1 | ||
|
|
||
| assert op_cnt == op_cnt_expected, "number of etcd operator pods: %s (Expected %s)" % (op_cnt, op_cnt_expected) | ||
| assert etcd_cnt == etcd_cnt_expected, "number of etcd pods: %s (Expected %s)" % (etcd_cnt, etcd_cnt_expected) | ||
|
|
||
|
|
||
| def csi_pods_verify(pods): | ||
| num_nodeplugins_expected = 3 | ||
| num_provisioner_expected = 1 | ||
| num_attacher_expected = 1 | ||
| attacher_cnt = 0 | ||
| nodeplugins_cnt = 0 | ||
| provisioner_cnt = 0 | ||
|
|
||
| for pod in pods: | ||
| if pod.metadata.namespace == "gcs": | ||
| if pod.metadata.name.startswith("csi-attacher-glusterfsplugin"): | ||
| attacher_cnt += 1 | ||
|
|
||
| if pod.metadata.name.startswith("csi-nodeplugin-glusterfsplugin"): | ||
| nodeplugins_cnt += 1 | ||
|
|
||
| if pod.metadata.name.startswith("csi-provisioner-glusterfsplugin"): | ||
| provisioner_cnt += 1 | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. pod names can be variables. |
||
|
|
||
| assert attacher_cnt == num_attacher_expected, "number of csi-attachers: %s (Expected: %s)" % (attacher_cnt, num_attacher_expected) | ||
| assert nodeplugins_cnt == num_nodeplugins_expected, "number of csi-nodeplugins: %s (Expected: %s)" % (nodeplugins_cnt, num_nodeplugins_expected) | ||
| assert provisioner_cnt == num_provisioner_expected, "number of csi-provisioners: %s (Expected: %s)" % (provisioners_cnt, num_provisioner_expected) | ||
|
|
||
|
|
||
| def monitoring_pods_verify(pods): | ||
| prom_cnt = 0 | ||
| prom_cnt_expected = 2 | ||
| op_cnt = 0 | ||
| op_cnt_expected = 1 | ||
| alert_cnt = 0 | ||
| alert_cnt_expected = 1 | ||
| grafana_cnt = 0 | ||
| grafana_cnt_expected = 1 | ||
|
|
||
| for pod in pods: | ||
| if pod.metadata.namespace == "monitoring": | ||
| if pod.metadata.name.startswith("prometheus-operator-"): | ||
| op_cnt += 1 | ||
|
|
||
| if pod.metadata.name.startswith("alertmanager-alert-"): | ||
| alert_cnt += 1 | ||
|
|
||
| if pod.metadata.name.startswith("grafana-"): | ||
| grafana_cnt += 1 | ||
|
|
||
| if pod.metadata.name.startswith("prometheus-prometheus-"): | ||
| prom_cnt += 1 | ||
|
|
||
| assert op_cnt == op_cnt_expected, "number of prometheus operator pods: %s (Expected %s)" % (op_cnt, op_cnt_expected) | ||
| assert prom_cnt == prom_cnt_expected, "number of prometheus pods: %s (Expected %s)" % (prom_cnt, prom_cnt_expected) | ||
| assert grafana_cnt == grafana_cnt_expected, "number of grafana pods: %s (Expected %s)" % (grafana_cnt, grafana_cnt_expected) | ||
| assert alert_cnt == alert_cnt_expected, "number of alertmanager pods: %s (Expected %s)" % (alert_cnt, alert_cnt_expected) | ||
|
|
||
|
|
||
| def test_all_pods_running(core_v1_client): | ||
| out = core_v1_client.list_pod_for_all_namespaces(watch=False) | ||
| not_running_pods = [] | ||
| for pod in out.items: | ||
| if pod.status.phase != "Running": | ||
| # FIXME: Pending state of alertmanager is ignored | ||
| if pod.metadata.name.startswith("alertmanager-alert-"): | ||
| if pod.status.phase == "Pending": | ||
| continue | ||
|
|
||
| not_running_pods.append((pod.metadata.name, pod.status.phase)) | ||
|
|
||
| assert len(not_running_pods) == 0, "Pods not in Running state: %s" % repr(not_running_pods) | ||
|
|
||
| gluster_pods_verify(out.items) | ||
| csi_pods_verify(out.items) | ||
| monitoring_pods_verify(out.items) | ||
| etcd_pods_verify(out.items) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,131 @@ | ||
| import time | ||
|
|
||
| import pytest | ||
| from kubernetes import client, config, utils | ||
| from kubernetes.stream import stream | ||
|
|
||
| MESSAGE = "Hello from Gluster Volume!" | ||
|
|
||
|
|
||
| @pytest.fixture | ||
| def core_v1_client(): | ||
| config.load_kube_config() | ||
| return client.CoreV1Api() | ||
|
|
||
|
|
||
| @pytest.fixture | ||
| def k8s_client(): | ||
| config.load_kube_config() | ||
| k8s_client = client.ApiClient() | ||
|
|
||
|
|
||
| # /usr/share/nginx/html/index.html | ||
| def write_file(core_v1_client, namespace, podname, filepath, content): | ||
| cmd = [ | ||
| '/bin/sh', | ||
| '-c', | ||
| 'echo "%s" > %s' % (content, filepath)] | ||
|
|
||
| resp = stream(core_v1_client.connect_get_namespaced_pod_exec, | ||
| podname, | ||
| namespace, | ||
| command=cmd, | ||
| stderr=True, | ||
| stdin=False, | ||
| stdout=True, | ||
| tty=False) | ||
|
|
||
| return resp | ||
|
|
||
|
|
||
| def read_file(core_v1_client, namespace, podname, filepath): | ||
| cmd = [ | ||
| '/bin/sh', | ||
| '-c', | ||
| 'cat %s' % filepath] | ||
|
|
||
| resp = stream(core_v1_client.connect_get_namespaced_pod_exec, | ||
| podname, | ||
| namespace, | ||
| command=cmd, | ||
| stderr=True, | ||
| stdin=False, | ||
| stdout=True, | ||
| tty=False) | ||
|
|
||
| return resp | ||
|
|
||
|
|
||
| def test_volume_use(core_v1_client, k8s_client): | ||
| # Cleanup, if volume exists from previous test | ||
| try: | ||
| core_v1_client.delete_namespaced_persistent_volume_claim("gv1", "default", client.V1DeleteOptions()) | ||
| except Exception: | ||
| pass | ||
|
|
||
| # FIXME: Remove Sleep | ||
| time.sleep(50) | ||
|
|
||
| # Create Gluster Volume | ||
| k8s_api = utils.create_from_yaml(k8s_client, "tests/files/volume-claim.yml") | ||
|
|
||
| # Wait till volume gets created | ||
| # FIXME: Remove Sleep | ||
| time.sleep(120) | ||
|
|
||
| # Fetch the claim and check Volume is Bound | ||
| v = k8s_api.read_namespaced_persistent_volume_claim("gv1", "default") | ||
| assert v.status.phase == "Bound" | ||
|
|
||
| # Cleanup, if nginx pod exists from previous test | ||
| try: | ||
| core_v1_client.delete_namespaced_pod("test-nginx", "default", client.V1DeleteOptions()) | ||
| except Exception: | ||
| pass | ||
|
|
||
| # FIXME: Remove sleep | ||
| time.sleep(30) | ||
|
|
||
| # Create nginx pod to use persistent volume created from previous test | ||
| k8s_api = utils.create_from_yaml(k8s_client, "tests/files/nginx-app.yml") | ||
|
|
||
| # FIXME: Remove sleep | ||
| time.sleep(100) | ||
|
|
||
| # Fetch the nginx pod and see it is Running | ||
| nginx = core_v1_client.read_namespaced_pod("test-nginx", "default") | ||
| assert nginx.status.phase == "Running" | ||
|
|
||
| # Write a sample content to applications pod's persistent storage | ||
| write_file(core_v1_client, "default", "test-nginx", | ||
| "/usr/share/nginx/html/index.html", | ||
| MESSAGE) | ||
|
|
||
| # Verify the content by reading | ||
| data = read_file(core_v1_client, "default", "test-nginx", | ||
| "/usr/share/nginx/html/index.html") | ||
|
|
||
| assert data.strip() == MESSAGE | ||
|
|
||
| # Now delete the pod | ||
| core_v1_client.delete_namespaced_pod("test-nginx", "default", client.V1DeleteOptions()) | ||
|
|
||
| # FIXME: Remove sleep | ||
| time.sleep(30) | ||
|
|
||
| # Create pod again to see the same gluster volume will be used | ||
| k8s_api = utils.create_from_yaml(k8s_client, "tests/files/nginx-app.yml") | ||
|
|
||
| # FIXME: Remove sleep | ||
| time.sleep(100) | ||
|
|
||
| # Read the data again to see if it is persisted | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 👍 |
||
| data = read_file(core_v1_client, "default", "test-nginx", | ||
| "/usr/share/nginx/html/index.html") | ||
|
|
||
| assert data.strip() == MESSAGE | ||
|
|
||
| # TODO: Handle cleanup failure | ||
| # Cleanup | ||
| core_v1_client.delete_namespaced_pod("test-nginx", "default", client.V1DeleteOptions()) | ||
| core_v1_client.delete_namespaced_persistent_volume_claim("gv1", "default", client.V1DeleteOptions(), async_req=False) | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
cant we make use of existing examples
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
from where? Sorry I didn't understand this comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
currently we have an example templates in
https://github.com/gluster/gcs/blob/master/deploy/examplescan we make use of it?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
got it. Let me check if I can use it. In that volume claim and pod creation is added in single yaml file.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We need to carefully consider the trade-off:
Perhaps a compromise would be to have a hybrid approach where we run the examples, ensuring they continue to work, but put the bulk of the custom test pods w/ the e2e?