From cf5cd1f1d9cb1d9afb3372c3091724dadcf6c796 Mon Sep 17 00:00:00 2001 From: Aravinda VK Date: Fri, 23 Nov 2018 17:26:43 +0530 Subject: [PATCH] WIP: Basic tests to verify setup and Gluster volume Two tests are available: - Setup test: Tests expected number of pods and Running status - CSI and Gluster volume test: Creates 1GiB gluster volume using Persistent Volume claim and runs nginx pod to verify mount and persisted data. TODO: - Dependencies need to be installed(`pytest` and `kubernetes`) - Sleeps in tests needs to be removed - Internal modules needs to be refactored - Integrate Pytest with tox and nose similar to project https://github.com/gluster/libgfapi-python - Deployment of these test scripts to master node - Tests running infra creation How I tested: - Copied `/root/.kube/config` from one of the node(kube1) to my local machine(`/home/aravinda/.kube/config`) - Run tests from local machine ``` cd gcs pytest e2e ``` Signed-off-by: Aravinda VK --- .gitignore | 1 + e2e/files/nginx-app.yml | 19 +++++ e2e/files/volume-claim.yml | 13 ++++ e2e/test_0000_setup_verify.py | 112 +++++++++++++++++++++++++++++ e2e/test_volume_usage.py | 131 ++++++++++++++++++++++++++++++++++ 5 files changed, 276 insertions(+) create mode 100644 e2e/files/nginx-app.yml create mode 100644 e2e/files/volume-claim.yml create mode 100644 e2e/test_0000_setup_verify.py create mode 100644 e2e/test_volume_usage.py diff --git a/.gitignore b/.gitignore index 98c304d..231d2a5 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ *.retry gcs-venv kubectl +e2e/__pycache__ \ No newline at end of file diff --git a/e2e/files/nginx-app.yml b/e2e/files/nginx-app.yml new file mode 100644 index 0000000..b57f7fa --- /dev/null +++ b/e2e/files/nginx-app.yml @@ -0,0 +1,19 @@ +--- +kind: Pod +apiVersion: v1 +metadata: + name: test-nginx +spec: + volumes: + - name: test-nginx-storage + persistentVolumeClaim: + claimName: gv1 + containers: + - name: test-nginx-container + image: nginx + ports: + - containerPort: 80 + name: "http-server" + volumeMounts: + - mountPath: "/usr/share/nginx/html" + name: test-nginx-storage diff --git a/e2e/files/volume-claim.yml b/e2e/files/volume-claim.yml new file mode 100644 index 0000000..2e274a9 --- /dev/null +++ b/e2e/files/volume-claim.yml @@ -0,0 +1,13 @@ +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: gv1 + annotations: + volume.beta.kubernetes.io/storage-class: glusterfs-csi +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Gi diff --git a/e2e/test_0000_setup_verify.py b/e2e/test_0000_setup_verify.py new file mode 100644 index 0000000..3414c98 --- /dev/null +++ b/e2e/test_0000_setup_verify.py @@ -0,0 +1,112 @@ +from os import path + +import pytest +from kubernetes import client, config, utils + + +@pytest.fixture +def core_v1_client(): + config.load_kube_config() + return client.CoreV1Api() + + +def gluster_pods_verify(pods): + cnt = 0 + expected = 3 + for pod in pods: + if pod.metadata.namespace == "gcs" and pod.metadata.name.startswith("gluster-kube"): + cnt += 1 + + assert cnt == expected, "number of gluster pods: %s (Expected %s)" % (cnt, expected) + + +def etcd_pods_verify(pods): + etcd_cnt = 0 + etcd_cnt_expected = 3 + op_cnt = 0 + op_cnt_expected = 1 + for pod in pods: + if pod.metadata.namespace == "gcs": + if pod.metadata.name.startswith("etcd-operator"): + op_cnt += 1 + continue + + if pod.metadata.name.startswith("etcd-"): + etcd_cnt += 1 + + assert op_cnt == op_cnt_expected, "number of etcd operator pods: %s (Expected %s)" % (op_cnt, op_cnt_expected) + assert etcd_cnt == etcd_cnt_expected, "number of etcd pods: %s (Expected %s)" % (etcd_cnt, etcd_cnt_expected) + + +def csi_pods_verify(pods): + num_nodeplugins_expected = 3 + num_provisioner_expected = 1 + num_attacher_expected = 1 + attacher_cnt = 0 + nodeplugins_cnt = 0 + provisioner_cnt = 0 + + for pod in pods: + if pod.metadata.namespace == "gcs": + if pod.metadata.name.startswith("csi-attacher-glusterfsplugin"): + attacher_cnt += 1 + + if pod.metadata.name.startswith("csi-nodeplugin-glusterfsplugin"): + nodeplugins_cnt += 1 + + if pod.metadata.name.startswith("csi-provisioner-glusterfsplugin"): + provisioner_cnt += 1 + + assert attacher_cnt == num_attacher_expected, "number of csi-attachers: %s (Expected: %s)" % (attacher_cnt, num_attacher_expected) + assert nodeplugins_cnt == num_nodeplugins_expected, "number of csi-nodeplugins: %s (Expected: %s)" % (nodeplugins_cnt, num_nodeplugins_expected) + assert provisioner_cnt == num_provisioner_expected, "number of csi-provisioners: %s (Expected: %s)" % (provisioners_cnt, num_provisioner_expected) + + +def monitoring_pods_verify(pods): + prom_cnt = 0 + prom_cnt_expected = 2 + op_cnt = 0 + op_cnt_expected = 1 + alert_cnt = 0 + alert_cnt_expected = 1 + grafana_cnt = 0 + grafana_cnt_expected = 1 + + for pod in pods: + if pod.metadata.namespace == "monitoring": + if pod.metadata.name.startswith("prometheus-operator-"): + op_cnt += 1 + + if pod.metadata.name.startswith("alertmanager-alert-"): + alert_cnt += 1 + + if pod.metadata.name.startswith("grafana-"): + grafana_cnt += 1 + + if pod.metadata.name.startswith("prometheus-prometheus-"): + prom_cnt += 1 + + assert op_cnt == op_cnt_expected, "number of prometheus operator pods: %s (Expected %s)" % (op_cnt, op_cnt_expected) + assert prom_cnt == prom_cnt_expected, "number of prometheus pods: %s (Expected %s)" % (prom_cnt, prom_cnt_expected) + assert grafana_cnt == grafana_cnt_expected, "number of grafana pods: %s (Expected %s)" % (grafana_cnt, grafana_cnt_expected) + assert alert_cnt == alert_cnt_expected, "number of alertmanager pods: %s (Expected %s)" % (alert_cnt, alert_cnt_expected) + + +def test_all_pods_running(core_v1_client): + out = core_v1_client.list_pod_for_all_namespaces(watch=False) + not_running_pods = [] + for pod in out.items: + if pod.status.phase != "Running": + # FIXME: Pending state of alertmanager is ignored + if pod.metadata.name.startswith("alertmanager-alert-"): + if pod.status.phase == "Pending": + continue + + not_running_pods.append((pod.metadata.name, pod.status.phase)) + + assert len(not_running_pods) == 0, "Pods not in Running state: %s" % repr(not_running_pods) + + gluster_pods_verify(out.items) + csi_pods_verify(out.items) + monitoring_pods_verify(out.items) + etcd_pods_verify(out.items) diff --git a/e2e/test_volume_usage.py b/e2e/test_volume_usage.py new file mode 100644 index 0000000..d971224 --- /dev/null +++ b/e2e/test_volume_usage.py @@ -0,0 +1,131 @@ +import time + +import pytest +from kubernetes import client, config, utils +from kubernetes.stream import stream + +MESSAGE = "Hello from Gluster Volume!" + + +@pytest.fixture +def core_v1_client(): + config.load_kube_config() + return client.CoreV1Api() + + +@pytest.fixture +def k8s_client(): + config.load_kube_config() + k8s_client = client.ApiClient() + + +# /usr/share/nginx/html/index.html +def write_file(core_v1_client, namespace, podname, filepath, content): + cmd = [ + '/bin/sh', + '-c', + 'echo "%s" > %s' % (content, filepath)] + + resp = stream(core_v1_client.connect_get_namespaced_pod_exec, + podname, + namespace, + command=cmd, + stderr=True, + stdin=False, + stdout=True, + tty=False) + + return resp + + +def read_file(core_v1_client, namespace, podname, filepath): + cmd = [ + '/bin/sh', + '-c', + 'cat %s' % filepath] + + resp = stream(core_v1_client.connect_get_namespaced_pod_exec, + podname, + namespace, + command=cmd, + stderr=True, + stdin=False, + stdout=True, + tty=False) + + return resp + + +def test_volume_use(core_v1_client, k8s_client): + # Cleanup, if volume exists from previous test + try: + core_v1_client.delete_namespaced_persistent_volume_claim("gv1", "default", client.V1DeleteOptions()) + except Exception: + pass + + # FIXME: Remove Sleep + time.sleep(50) + + # Create Gluster Volume + k8s_api = utils.create_from_yaml(k8s_client, "tests/files/volume-claim.yml") + + # Wait till volume gets created + # FIXME: Remove Sleep + time.sleep(120) + + # Fetch the claim and check Volume is Bound + v = k8s_api.read_namespaced_persistent_volume_claim("gv1", "default") + assert v.status.phase == "Bound" + + # Cleanup, if nginx pod exists from previous test + try: + core_v1_client.delete_namespaced_pod("test-nginx", "default", client.V1DeleteOptions()) + except Exception: + pass + + # FIXME: Remove sleep + time.sleep(30) + + # Create nginx pod to use persistent volume created from previous test + k8s_api = utils.create_from_yaml(k8s_client, "tests/files/nginx-app.yml") + + # FIXME: Remove sleep + time.sleep(100) + + # Fetch the nginx pod and see it is Running + nginx = core_v1_client.read_namespaced_pod("test-nginx", "default") + assert nginx.status.phase == "Running" + + # Write a sample content to applications pod's persistent storage + write_file(core_v1_client, "default", "test-nginx", + "/usr/share/nginx/html/index.html", + MESSAGE) + + # Verify the content by reading + data = read_file(core_v1_client, "default", "test-nginx", + "/usr/share/nginx/html/index.html") + + assert data.strip() == MESSAGE + + # Now delete the pod + core_v1_client.delete_namespaced_pod("test-nginx", "default", client.V1DeleteOptions()) + + # FIXME: Remove sleep + time.sleep(30) + + # Create pod again to see the same gluster volume will be used + k8s_api = utils.create_from_yaml(k8s_client, "tests/files/nginx-app.yml") + + # FIXME: Remove sleep + time.sleep(100) + + # Read the data again to see if it is persisted + data = read_file(core_v1_client, "default", "test-nginx", + "/usr/share/nginx/html/index.html") + + assert data.strip() == MESSAGE + + # TODO: Handle cleanup failure + # Cleanup + core_v1_client.delete_namespaced_pod("test-nginx", "default", client.V1DeleteOptions()) + core_v1_client.delete_namespaced_persistent_volume_claim("gv1", "default", client.V1DeleteOptions(), async_req=False)