diff --git a/.gitignore b/.gitignore index 525b230c8f..cd85688de7 100644 --- a/.gitignore +++ b/.gitignore @@ -13,4 +13,13 @@ dist/ build/ *.egg-info/ -app_cpp/ \ No newline at end of file +app_cpp/ + +.terraform +.terraform.lock.hcl +terraform.tfstate +authorized_key.json +terraform.tfstate.backup +config.auto.tfvars + +visits.txt \ No newline at end of file diff --git a/ansible/ANSIBLE.md b/ansible/ANSIBLE.md new file mode 100644 index 0000000000..92b7041ef4 --- /dev/null +++ b/ansible/ANSIBLE.md @@ -0,0 +1,167 @@ +# Ansible lab +## Inventory +`ansible/inventory/yandex_cloud` file contains ip address for vm where I am installing docker and docker-compose + +## Playbooks +`ansible/playbooks/dev/main.yml` runs the docker role which installs docker and docke-compose in the VM. + +## commands output + +### **`ansible-playbook playbooks/dev/main.yml --diff`** +``` + PLAY [Run Docker role] **************************************************************************************************** + + TASK [Gathering Facts] **************************************************************************************************** + ok: [51.250.102.185] + + TASK [docker : Update apt] ************************************************************************************************ + ok: [51.250.102.185] + + TASK [docker : Install pip] *********************************************************************************************** + The following additional packages will be installed: + binutils binutils-common binutils-x86-64-linux-gnu build-essential cpp cpp-9 + dpkg-dev fakeroot g++ g++-9 gcc gcc-10-base gcc-9 gcc-9-base + libalgorithm-diff-perl libalgorithm-diff-xs-perl libalgorithm-merge-perl + libasan5 libatomic1 libbinutils libc-dev-bin libc6 libc6-dev libcc1-0 + libcrypt-dev libctf-nobfd0 libctf0 libdpkg-perl libexpat1 libexpat1-dev + libfakeroot libfile-fcntllock-perl libgcc-9-dev libgcc-s1 libgdbm-compat4 + libgomp1 libisl22 libitm1 liblsan0 libmpc3 libmpfr6 libperl5.30 + libpython3-dev libpython3.8 libpython3.8-dev libpython3.8-minimal + libpython3.8-stdlib libquadmath0 libstdc++-9-dev libstdc++6 libtsan0 + libubsan1 linux-libc-dev make manpages-dev patch perl perl-base + perl-modules-5.30 python-pip-whl python3-dev python3-pkg-resources + python3-wheel python3.8 python3.8-dev python3.8-minimal zlib1g zlib1g-dev + Suggested packages: + binutils-doc cpp-doc gcc-9-locales debian-keyring g++-multilib + g++-9-multilib gcc-9-doc gcc-multilib autoconf automake libtool flex bison + gdb gcc-doc gcc-9-multilib glibc-doc git bzr libstdc++-9-doc make-doc + diffutils-doc perl-doc libterm-readline-gnu-perl + | libterm-readline-perl-perl libb-debug-perl liblocale-codes-perl + python-setuptools-doc python3.8-venv python3.8-doc binfmt-support + The following NEW packages will be installed: + binutils binutils-common binutils-x86-64-linux-gnu build-essential cpp cpp-9 + dpkg-dev fakeroot g++ g++-9 gcc gcc-9 gcc-9-base libalgorithm-diff-perl + libalgorithm-diff-xs-perl libalgorithm-merge-perl libasan5 libatomic1 + libbinutils libc-dev-bin libc6-dev libcc1-0 libcrypt-dev libctf-nobfd0 + libctf0 libdpkg-perl libexpat1-dev libfakeroot libfile-fcntllock-perl + libgcc-9-dev libgdbm-compat4 libgomp1 libisl22 libitm1 liblsan0 libmpc3 + libmpfr6 libperl5.30 libpython3-dev libpython3.8-dev libquadmath0 + libstdc++-9-dev libtsan0 libubsan1 linux-libc-dev make manpages-dev patch + perl perl-modules-5.30 python-pip-whl python3-dev python3-pip python3-wheel + python3.8-dev zlib1g-dev + The following packages will be upgraded: + gcc-10-base libc6 libexpat1 libgcc-s1 libpython3.8 libpython3.8-minimal + libpython3.8-stdlib libstdc++6 perl-base python3-pkg-resources + python3-setuptools python3.8 python3.8-minimal zlib1g + 14 upgraded, 56 newly installed, 0 to remove and 180 not upgraded. + changed: [51.250.102.185] + + TASK [docker : Install Docker dependencies] ******************************************************************************* + ok: [51.250.102.185] + + TASK [docker : Add Docker GPG key] **************************************************************************************** + changed: [51.250.102.185] + + TASK [docker : Add Docker repository] ************************************************************************************* + --- before: /dev/null + +++ after: /etc/apt/sources.list.d/docker.list + @@ -0,0 +1 @@ + +deb https://download.docker.com/linux/ubuntu bionic stable + + changed: [51.250.102.185] + + TASK [docker : Install Docker] ******************************************************************************************** + The following additional packages will be installed: + containerd.io docker-buildx-plugin docker-ce-cli docker-ce-rootless-extras + docker-compose-plugin git git-man libcurl3-gnutls liberror-perl pigz + slirp4netns + Suggested packages: + aufs-tools cgroupfs-mount | cgroup-lite git-daemon-run | git-daemon-sysvinit + git-doc git-el git-email git-gui gitk gitweb git-cvs git-mediawiki git-svn + The following NEW packages will be installed: + containerd.io docker-buildx-plugin docker-ce docker-ce-cli + docker-ce-rootless-extras docker-compose-plugin git git-man libcurl3-gnutls + liberror-perl pigz slirp4netns + 0 upgraded, 12 newly installed, 0 to remove and 180 not upgraded. + changed: [51.250.102.185] + + TASK [docker : Upgrade pip] *********************************************************************************************** + changed: [51.250.102.185] + + TASK [docker : Install docker-compose] ************************************************************************************ + changed: [51.250.102.185] + + PLAY RECAP **************************************************************************************************************** + 51.250.102.185 : ok=9 changed=6 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +``` + +### **`ansible-inventory -i inventory/yandex_cloud.yml --list`** +```json + { + "_meta": { + "hostvars": { + "51.250.102.185": { + "ansible_user": "ubuntu" + } + } + }, + "all": { + "children": [ + "myhost", + "ungrouped" + ] + }, + "myhost": { + "hosts": [ + "51.250.102.185" + ] + } + } +``` + +### **`ansible-playbook playbooks/dev/main.yml -i inventory/yandex_cloud.yml`** +``` + PLAY [Install docker & deploy python app] *************************************************************************************** + + TASK [Gathering Facts] ********************************************************************************************************** + ok: [62.84.123.211] + + TASK [docker : Update apt] ****************************************************************************************************** + ok: [62.84.123.211] + + TASK [docker : Install pip] ***************************************************************************************************** + ok: [62.84.123.211] + + TASK [docker : Install Docker dependencies] ************************************************************************************* + ok: [62.84.123.211] + + TASK [docker : Add Docker GPG key] ********************************************************************************************** + ok: [62.84.123.211] + + TASK [docker : Add Docker repository] ******************************************************************************************* + ok: [62.84.123.211] + + TASK [docker : Install Docker] ************************************************************************************************** + ok: [62.84.123.211] + + TASK [docker : Install docker-compose] ****************************************************************************************** + ok: [62.84.123.211] + + TASK [web_app : create project directory] *************************************************************************************** + changed: [62.84.123.211] + + TASK [web_app : start docker] *************************************************************************************************** + ok: [62.84.123.211] + + TASK [web_app : pull the image] ************************************************************************************************* + ok: [62.84.123.211] + + TASK [web_app : create docker-compose file] ************************************************************************************* + changed: [62.84.123.211] + + TASK [web_app : run the container] ********************************************************************************************** + changed: [62.84.123.211] + + PLAY RECAP ********************************************************************************************************************** + 62.84.123.211 : ok=13 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +``` \ No newline at end of file diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000000..c83c2ba7bd --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,3 @@ +[defaults] +inventory = ./inventory +roles_path = ./roles \ No newline at end of file diff --git a/ansible/inventory/yandex_cloud.yml b/ansible/inventory/yandex_cloud.yml new file mode 100644 index 0000000000..9898d9112c --- /dev/null +++ b/ansible/inventory/yandex_cloud.yml @@ -0,0 +1,4 @@ +myhost: + hosts: + 62.84.123.211: + ansible_user: ubuntu \ No newline at end of file diff --git a/ansible/playbooks/dev/main.yml b/ansible/playbooks/dev/main.yml new file mode 100644 index 0000000000..5f1493d2e2 --- /dev/null +++ b/ansible/playbooks/dev/main.yml @@ -0,0 +1,11 @@ +- name: Install docker & deploy python app + hosts: all + become: true + vars: + folder: "/app_python" + image: "vectorsmaster/flask-app" + ex_port: "5000" + in_port: "5000" + roles: + - docker + - web_app \ No newline at end of file diff --git a/ansible/roles/docker/README.md b/ansible/roles/docker/README.md new file mode 100644 index 0000000000..4161f96a00 --- /dev/null +++ b/ansible/roles/docker/README.md @@ -0,0 +1,17 @@ +# Docker Role + +## Overview +It's an ansible role that installs docker in ubuntu + +## requirements + +1. `ubuntu` +2. `python` + +## installation details + +`main.yaml`: +1. Upgrade apt. +2. Install and upgrade pip. +3. Install docker using apt. +4. Install docker-compose using pip. diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml new file mode 100644 index 0000000000..ccc3b1cdbd --- /dev/null +++ b/ansible/roles/docker/defaults/main.yml @@ -0,0 +1,58 @@ +--- +# Edition can be one of: 'ce' (Community Edition) or 'ee' (Enterprise Edition). +docker_edition: 'ce' +docker_packages: + - "docker-{{ docker_edition }}" + - "docker-{{ docker_edition }}-cli" + - "docker-{{ docker_edition }}-rootless-extras" + - "containerd.io" + - docker-buildx-plugin +docker_packages_state: present + +# Service options. +docker_service_manage: true +docker_service_state: started +docker_service_enabled: true +docker_restart_handler_state: restarted + +# Docker Compose Plugin options. +docker_install_compose_plugin: true +docker_compose_package: docker-compose-plugin +docker_compose_package_state: present + +# Docker Compose options. +docker_install_compose: false +docker_compose_version: "v2.11.1" +docker_compose_arch: "{{ ansible_architecture }}" +docker_compose_url: "https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-linux-{{ docker_compose_arch }}" +docker_compose_path: /usr/local/bin/docker-compose + +# Enable repo setup +docker_add_repo: true + +# Docker repo URL. +docker_repo_url: https://download.docker.com/linux + +# Used only for Debian/Ubuntu/Pop!_OS/Linux Mint. Switch 'stable' to 'nightly' if needed. +docker_apt_release_channel: stable +# docker_apt_ansible_distribution is a workaround for Ubuntu variants which can't be identified as such by Ansible, +# and is only necessary until Docker officially supports them. +docker_apt_ansible_distribution: "{{ 'ubuntu' if ansible_distribution in ['Pop!_OS', 'Linux Mint'] else ansible_distribution }}" +docker_apt_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}" +docker_apt_repository: "deb [arch={{ docker_apt_arch }} signed-by=/etc/apt/trusted.gpg.d/docker.asc] {{ docker_repo_url }}/{{ docker_apt_ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_apt_release_channel }}" +docker_apt_ignore_key_error: true +docker_apt_gpg_key: "{{ docker_repo_url }}/{{ docker_apt_ansible_distribution | lower }}/gpg" +docker_apt_gpg_key_checksum: "sha256:1500c1f56fa9e26b9b8f42452a553675796ade0807cdce11975eb98170b3a570" +docker_apt_filename: "docker" + +# Used only for RedHat/CentOS/Fedora. +docker_yum_repo_url: "{{ docker_repo_url }}/{{ (ansible_distribution == 'Fedora') | ternary('fedora','centos') }}/docker-{{ docker_edition }}.repo" +docker_yum_repo_enable_nightly: '0' +docker_yum_repo_enable_test: '0' +docker_yum_gpg_key: "{{ docker_repo_url }}/centos/gpg" + +# A list of users who will be added to the docker group. +docker_users: [] + +# Docker daemon options as a dict +docker_daemon_options: {} diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml new file mode 100644 index 0000000000..72594c8c18 --- /dev/null +++ b/ansible/roles/docker/handlers/main.yml @@ -0,0 +1,7 @@ +--- +- name: restart docker + service: + name: docker + state: "{{ docker_restart_handler_state }}" + ignore_errors: "{{ ansible_check_mode }}" + when: docker_service_manage | bool diff --git a/ansible/roles/docker/tasks/install_compose.yml b/ansible/roles/docker/tasks/install_compose.yml new file mode 100644 index 0000000000..dd3468a72c --- /dev/null +++ b/ansible/roles/docker/tasks/install_compose.yml @@ -0,0 +1,4 @@ +- name: Install docker-compose + ansible.builtin.pip: + name: docker-compose + state: latest diff --git a/ansible/roles/docker/tasks/install_docker.yml b/ansible/roles/docker/tasks/install_docker.yml new file mode 100644 index 0000000000..5ba2420cc4 --- /dev/null +++ b/ansible/roles/docker/tasks/install_docker.yml @@ -0,0 +1,26 @@ +- name: Install Docker dependencies + apt: + name: + - apt-transport-https + - ca-certificates + - curl + - software-properties-common + state: present + update_cache: true + +- name: Add Docker GPG key + apt_key: + url: https://download.docker.com/linux/ubuntu/gpg + state: present + +- name: Add Docker repository + apt_repository: + repo: deb https://download.docker.com/linux/ubuntu bionic stable + filename: docker + state: present + +- name: Install Docker + apt: + name: docker-ce + state: present + update_cache: true diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml new file mode 100644 index 0000000000..016872d511 --- /dev/null +++ b/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,20 @@ +- name: Update apt + apt: + name: apt + state: latest + update_cache: true + +- name: Install pip + apt: + name: + - python3-pip + - python3-setuptools + state: latest + update_cache: true + + +- name: Install Docker + import_tasks: install_docker.yml + +- name: Install docker-compose + import_tasks: install_compose.yml diff --git a/ansible/roles/web_app/README.md b/ansible/roles/web_app/README.md new file mode 100644 index 0000000000..1921fb1ed8 --- /dev/null +++ b/ansible/roles/web_app/README.md @@ -0,0 +1,29 @@ +# web_app role + +## Overview +It's an ansible role pulls vectorsmaster/flask-app image, run it, wipe docker container if stated + +## Requirements + +1. `ubuntu`. +2. `python`. +3. `docker` role. + +## Usage (navigate into ansible directory) + +1. `ansible-playbook playbooks/dev/main.yml -i inventory/yandex_cloud.yml`. + - pull the image and run it inside container. + - wipe the container. + +2. `ansible-playbook playbooks/dev/main.yml -i inventory/yandex_cloud.yml --tags "deploy"`. + - pull the image and run it inside container. + +3. `ansible-playbook playbooks/dev/main.yml -i inventory/yandex_cloud.yml --tags "wipe"`. + - wipe the container. + +## Notes + +- The container will be deployed at http://:5000 + +- You may need to upgrade ansible `pip install --upgrade ansible`. + diff --git a/ansible/roles/web_app/meta/main.yml b/ansible/roles/web_app/meta/main.yml new file mode 100644 index 0000000000..b456d40b27 --- /dev/null +++ b/ansible/roles/web_app/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - role: docker \ No newline at end of file diff --git a/ansible/roles/web_app/tasks/main.yml b/ansible/roles/web_app/tasks/main.yml new file mode 100644 index 0000000000..4b431e1545 --- /dev/null +++ b/ansible/roles/web_app/tasks/main.yml @@ -0,0 +1,39 @@ +- name: create project directory + ansible.builtin.file: + name: "{{ folder }}/" + state: directory + tags: + - deploy + +- name: deploy python app + block: + - name: start docker + service: + name: docker + state: started + enabled: yes + + - name: pull the image + docker_image: + name: "{{ image }}" + source: pull + state: present + + - name: create docker-compose file + template: + src: docker-compose.yml.j2 + dest: "{{ folder }}/docker-compose.yml" + + - name: run the container + community.docker.docker_compose_v2: + files: + - "{{ folder }}/docker-compose.yml" + project_src: "{{ folder }}/" + tags: + - deploy + +- name: wipe python app + import_tasks: wipe.yml + when: web_app_full_wipe == true + tags: + - wipe \ No newline at end of file diff --git a/ansible/roles/web_app/tasks/wipe.yml b/ansible/roles/web_app/tasks/wipe.yml new file mode 100644 index 0000000000..d10c6c137b --- /dev/null +++ b/ansible/roles/web_app/tasks/wipe.yml @@ -0,0 +1,13 @@ +- name: Remove the container + community.docker.docker_compose_v2: + project_src: "{{ folder }}" + state: stopped + remove_images: all + remove_volumes: true + remove_orphans: true + +- name: Remove the directory + ansible.builtin.file: + path: "{{ folder }}/" + state: absent + diff --git a/ansible/roles/web_app/templates/docker-compose.yml.j2 b/ansible/roles/web_app/templates/docker-compose.yml.j2 new file mode 100644 index 0000000000..b21426c9b2 --- /dev/null +++ b/ansible/roles/web_app/templates/docker-compose.yml.j2 @@ -0,0 +1,7 @@ +version: '3' + +services: + web_app: + image: {{ image }} + ports: + - {{ in_port }}:{{ ex_port }} \ No newline at end of file diff --git a/ansible/roles/web_app/vars/main.yml b/ansible/roles/web_app/vars/main.yml new file mode 100644 index 0000000000..db89327a0b --- /dev/null +++ b/ansible/roles/web_app/vars/main.yml @@ -0,0 +1 @@ +web_app_full_wipe: true \ No newline at end of file diff --git a/app_python/README.md b/app_python/README.md index 4020b4f4f5..9716d4b1db 100644 --- a/app_python/README.md +++ b/app_python/README.md @@ -4,7 +4,7 @@ ## Overview -This is a flask web application that renders current time in Moscow zone. +This is a flask web application that renders current time in Moscow zone in the home page `host_url/` and renders number of visits to the home page in `host_url/visits` ## Steps to run the application @@ -58,6 +58,23 @@ There exists 3 Unit tests: 1. `docker pull yourusername/name` 2. `docker run -p 5000:5000 yourusername/name` +## Steps to run docker-compose +```docker-compose +version: '3' + +services: + web: + image: yourusername/flask-app + ports: + - "5000:5000" + volumes: + - ./visits.txt:/app_python/visits.txt +``` +1. `docker compose pull` +2. `docker compose up` + +In this Docker Compose configuration, the purpose of the "volumes" section is to create a persistent data storage mechanism for the container. Specifically, it binds the local directory ./visits.txt to the directory /app-python/visits.txt inside the container, in order to see the number of visits on the host machine. So make sure this file exists before running `docker compose up`. + ## CI workflow This project is set up with continuous integration (CI) using GitHub Actions. The CI workflow performs the following steps on every push: diff --git a/app_python/docker-compose.yaml b/app_python/docker-compose.yaml new file mode 100644 index 0000000000..c80b955365 --- /dev/null +++ b/app_python/docker-compose.yaml @@ -0,0 +1,9 @@ +version: '3' + +services: + web: + image: vectorsmaster/flask-app + ports: + - "5000:5000" + volumes: + - ./visits.txt:/app_python/visits.txt diff --git a/app_python/flaskr/app.py b/app_python/flaskr/app.py index 3f788a0b08..447212e9f7 100644 --- a/app_python/flaskr/app.py +++ b/app_python/flaskr/app.py @@ -1,17 +1,68 @@ -from flask import Flask, render_template +from flask import Flask, render_template, Response import pytz from datetime import datetime +from prometheus_client import Counter, Gauge, Histogram +import prometheus_client +import time +import os app = Flask(__name__) +REQUEST_COUNT = Counter( + 'http_requests_total', + 'Total HTTP Requests', + ['method', 'endpoint']) + +REQUEST_DURATION = Histogram( + 'http_request_duration_seconds', + 'HTTP Request Duration', + ['method', 'endpoint']) + +ACTIVE_USERS = Gauge('active_users', 'Number of Active Users') + + +def get_number_of_visits(): + if not os.path.exists('visits.txt'): + with open('visits.txt', 'w') as f: + f.write('0') + with open('visits.txt', 'r') as f: + return int(f.read().strip()) + + +def update_number_of_visits(): + visits_count = get_number_of_visits() + visits_count += 1 + with open('visits.txt', 'w') as f: + f.write(str(visits_count)) + + +@app.route('/visits') +def visits(): + return render_template('visits.html', visits_count=get_number_of_visits()) + @app.route('/') def index(): + update_number_of_visits() + REQUEST_COUNT.labels(method='GET', endpoint='/').inc() + start_time = time.time() # get time in Moscow time zone msk_time = datetime.now(pytz.timezone( 'Europe/Moscow')).strftime('%Y-%m-%d %H:%M:%S') + REQUEST_DURATION.labels(method='GET', endpoint='/').observe( + time.time() - start_time) return render_template('index.html', msk_time=msk_time) +@app.route('/metrics') +def metrics(): + return Response(prometheus_client.generate_latest(), mimetype='text/plain') + + +@app.route('/health') +def health_check(): + return Response(status=200) + + if __name__ == '__main__': - app.run(debug=True) + app.run(debug=False) diff --git a/app_python/flaskr/templates/visits.html b/app_python/flaskr/templates/visits.html new file mode 100644 index 0000000000..85e315dd8f --- /dev/null +++ b/app_python/flaskr/templates/visits.html @@ -0,0 +1,10 @@ + + + + visits + + +

Visits Count

+

{{ visits_count }}

+ + diff --git a/app_python/requirements.txt b/app_python/requirements.txt index dbdf76fd08..700ca5b7c6 100644 Binary files a/app_python/requirements.txt and b/app_python/requirements.txt differ diff --git a/k8s/11.md b/k8s/11.md new file mode 100644 index 0000000000..eae8fda9a4 --- /dev/null +++ b/k8s/11.md @@ -0,0 +1,134 @@ +# lab 11 +## Task 1 +1. **Create the secret** + ```terminal + $ echo -n 'admin' > ./username.txt + $ echo -n 'S!B\*d$zDsb=' > ./password.txt + $ kubectl create secret generic db-user-pass --from-file=./username.txt --from-file=./password.txt + secret/db-user-pass created + ``` + +2. **Verfify** + ```terminal + $ kubectl get secrets + NAME TYPE DATA AGE + db-user-pass Opaque 2 8m27s + + $ kubectl describe secret db-user-pass + Name: db-user-pass + Namespace: default + Labels: + Annotations: + + Type: Opaque + + Data + ==== + password.txt: 12 bytes + username.txt: 5 bytes + ``` + +3. **Decode** + ```terminal + $ kubectl get secret db-user-pass -o jsonpath='{.data}' + {"password.txt":"UyFCXCpkJHpEc2I9","username.txt":"YWRtaW4="} + + $ echo 'UyFCXCpkJHpEc2I9' | base64 --decode + S!B\*d$zDsb= + + ``` + +4. **Manage secrets with helm** + ```terminal + $ gpg --list-keys + /home/sarhan/.gnupg/pubring.kbx + ------------------------------- + pub rsa3072 2024-04-23 [SC] [expires: 2026-04-23] + 12767A9AC61D56B8A6AE2680F968B06BE51CF6A8 + uid [ultimate] Ahmad + sub rsa3072 2024-04-23 [E] [expires: 2026-04-23] + + $ sops -p 12767A9AC61D56B8A6AE2680F968B06BE51CF6A8 secrets.yaml + password: secret1234 + + $ helm secrets view secrets.yaml + password: secret1234 + + $ helm secrets install app-python ./app-python -n default -f ./secrets.yaml + NAME: app-python + LAST DEPLOYED: Tue Apr 23 23:29:00 2024 + NAMESPACE: default + STATUS: deployed + REVISION: 1 + NOTES: + 1. Get the application URL by running these commands: + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace default svc -w app-python' + export SERVICE_IP=$(kubectl get svc --namespace default app-python --template "{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}") + echo http://$SERVICE_IP:5000 + removed './secrets.yaml.dec' + + $ helm ls + NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION + app-python default 1 2024-04-23 23:29:00.300040908 +0300 MSK deployed app-python-0.1.0 1.16.0 + + $ kubectl get pods + NAME READY STATUS RESTARTS AGE + app-python-6f7ff4f67b-kfg2c 1/1 Running 0 4m52s + + $ kubectl exec app-python-6f7ff4f67b-kfg2c -- printenv | grep MY_PASSWORD + MY_PASSWORD=secret1234 + ``` + + ![secrets](assets/secrets.png) + +## Task 2 + +1. **create secret** + ```terminal + $ kubectl exec -it vault-0 -- /bin/sh + + / $ vault secrets enable -path=internal kv-v2 + Success! Enabled the kv-v2 secrets engine at: internal/ + + / $ vault kv put internal/database/config username="db-readonly-username" password="db-secret-password" + ======== Secret Path ======== + internal/data/database/config + + ======= Metadata ======= + Key Value + --- ----- + created_time 2024-04-24T18:29:43.426198259Z + custom_metadata + deletion_time n/a + destroyed false + version 1 + + / $ exit + ``` + +2. **Configure authentication** + ```terminal + $ kubectl exec -it vault-0 -- /bin/sh + / $ vault auth enable kubernetes + Success! Enabled kubernetes auth method at: kubernetes/ + / $ vault write auth/kubernetes/config \ + > kubernetes_host="https://$KUBERNETES_PORT_443_TCP_ADDR:443" + Success! Data written to: auth/kubernetes/config + + / $ vault policy write internal-app - < path "internal/data/database/config" { + > capabilities = ["read"] + > } + > EOF + Success! Uploaded policy: internal-app + + / $ vault write auth/kubernetes/role/internal-app \ + > bound_service_account_names=internal-app \ + > bound_service_account_namespaces=default \ + > policies=internal-app \ + > ttl=24h + Success! Data written to: auth/kubernetes/role/internal-app + + / $ exit + ``` \ No newline at end of file diff --git a/k8s/12.md b/k8s/12.md new file mode 100644 index 0000000000..191d852a5a --- /dev/null +++ b/k8s/12.md @@ -0,0 +1,15 @@ +# lab 12 +## Task2 outputs +1. **`kubectl get po`** + ``` + NAME READY STATUS RESTARTS AGE + app-python-d49dfc746-mbq6w 1/1 Running 0 2m33s + ``` + +2. **`kubectl exec app-python-d49dfc746-mbq6w -- cat /app-python/config.json`** + ``` + kubectl exec app-python-d49dfc746-mbq6w -- cat /app-python/config.json + { + "developer":"vectors_master" + } + ``` diff --git a/k8s/13.md b/k8s/13.md new file mode 100644 index 0000000000..ee583b5225 --- /dev/null +++ b/k8s/13.md @@ -0,0 +1,48 @@ +# Lab 13 + +## Task 2 outputs +1. **`sarhan@sarhan-HP:~/projects/DevOps/k8s$ kubectl get po,sts,svc,pvc`** + ``` + NAME READY STATUS RESTARTS AGE + pod/app-python-0 1/1 Running 0 64s + pod/app-python-1 1/1 Running 0 64s + + NAME READY AGE + statefulset.apps/app-python 2/2 66s + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/app-python LoadBalancer 10.110.101.79 5000:30739/TCP 66s + service/kubernetes ClusterIP 10.96.0.1 443/TCP 35d + + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + persistentvolumeclaim/data-vault-0 Bound pvc-53ffa16a-ee5d-4d16-a375-0b1adce51a27 10Gi RWO standard 11d + persistentvolumeclaim/www-app-python-0 Bound pvc-7486e9b2-53a6-49c0-84ba-33cd49bfea53 1Gi RWO standard 10m + persistentvolumeclaim/www-app-python-1 Bound pvc-80cad299-d3de-4c96-bd26-89a3ff0dcd4f 1Gi RWO standard 65s + ``` +2. **`sarhan@sarhan-HP:~/projects/DevOps/k8s$ minikube service app-python`** +``` +minikube service app-python +|-----------|------------|-------------|----------------------------| +| NAMESPACE | NAME | TARGET PORT | URL | +|-----------|------------|-------------|----------------------------| +| default | app-python | http/5000 | http://192.168.184.2:30739 | +|-----------|------------|-------------|----------------------------| +🎉 Opening service default/app-python in default browser... +``` + +3. +``` +sarhan@sarhan-HP:~/projects/DevOps/k8s$ kubectl exec pod/app-python-0 -- cat visits.txt +16 + +sarhan@sarhan-HP:~/projects/DevOps/k8s$ kubectl exec pod/app-python-1 -- cat visits.txt +14 +``` +The outputs are not the same because the pods are independent (each one has its own storage). + +4. + +Ordering Guarantees are not necessary because pods are independent from each other. +```yaml +podManagementPolicy: "Parallel" +``` \ No newline at end of file diff --git a/k8s/14.md b/k8s/14.md new file mode 100644 index 0000000000..6cd025fa0c --- /dev/null +++ b/k8s/14.md @@ -0,0 +1,174 @@ +# Lab 14 + +## Task1 + +- ***Components details*** + + 1. **Prometheus Operator:** It automates the deployment, configuration, and management of Prometheus instances, making it easier to scale and maintain monitoring capabilities. + + 2. **Prometheus:** collect and store metrics from various sources. + + 3. **Alertmanager:** handles alerts generated by Prometheus and routes them to appropriate channels such as email. + + 4. **Prometheus Node Exporter:** gathers system-level metrics from Kubernetes nodes, providing insights into CPU, memory, disk usage, and network activity. + + 5. **Prometheus Blackbox Exporter:** It performs HTTP, TCP, and ICMP probes to check the availability and responsiveness of services. + + 6. **Prometheus Adapter for Kubernetes Metrics APIs:** Gets metrics from kubernetes and provides them to Prometheus. + + 7. **kube-state-metrics:** collects and exposes detailed information about the state of Kubernetes objects allowing the operators the monitor their health and performance. + + 8. **Grafana:** visualization tool that complements Prometheus by enabling users to create customizable dashboards for visualizing and analyzing metrics. + +- ***Installation*** + + ```terminal + $ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts + + $ helm repo update + + $ helm install kube-prometheus-stack prometheus-community/kube-prometheus-stack + NAME: kube-prometheus-stack + LAST DEPLOYED: Wed May 8 01:34:47 2024 + NAMESPACE: default + STATUS: deployed + REVISION: 1 + NOTES: + kube-prometheus-stack has been installed. Check its status by running: + kubectl --namespace default get pods -l "release=kube-prometheus-stack" + + Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator. + + $ kubectl --namespace default get pods -l "release=kube-prometheus-stack" + NAME READY STATUS RESTARTS AGE + kube-prometheus-stack-kube-state-metrics-65594f9476-4gvqb 1/1 Running 0 110s + kube-prometheus-stack-operator-985449d44-q5z6w 1/1 Running 0 110s + kube-prometheus-stack-prometheus-node-exporter-qrk56 1/1 Running 0 111s + + $ helm secrets install app-python ./app-python -n default -f ./secrets.yaml + NAME: app-python + LAST DEPLOYED: Wed May 8 01:54:01 2024 + NAMESPACE: default + STATUS: deployed + REVISION: 1 + NOTES: + 1. Get the application URL by running these commands: + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace default svc -w app-python' + export SERVICE_IP=$(kubectl get svc --namespace default app-python --template "{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}") + echo http://$SERVICE_IP:5000 + removed './secrets.yaml.dec' + + ``` + +- ***`kubectl get po,sts,svc,pvc,cm`*** + ``` + $ kubectl get po,sts,svc,pvc,cm + + # List of running pods in the cluster + + sarhan@sarhan-HP:~/projects/DevOps/k8s$ kubectl get po,sts,svc,pvc,cm + NAME READY STATUS RESTARTS AGE + pod/alertmanager-kube-prometheus-stack-alertmanager-0 2/2 Running 0 6m32s + pod/app-python-0 1/1 Running 2 (4m21s ago) 16m + pod/app-python-1 1/1 Running 1 16m + pod/kube-prometheus-stack-grafana-7cf5785ff8-rpdrv 3/3 Running 1 (66s ago) 7m8s + pod/kube-prometheus-stack-kube-state-metrics-65594f9476-vgblx 1/1 Running 0 7m8s + pod/kube-prometheus-stack-operator-985449d44-vtlkk 1/1 Running 0 7m8s + pod/kube-prometheus-stack-prometheus-node-exporter-mb9xx 1/1 Running 0 7m8s + pod/prometheus-kube-prometheus-stack-prometheus-0 2/2 Running 0 6m30s + + # list of statefulsets + NAME READY AGE + statefulset.apps/alertmanager-kube-prometheus-stack-alertmanager 1/1 6m32s + statefulset.apps/app-python 2/2 16m + statefulset.apps/prometheus-kube-prometheus-stack-prometheus 1/1 6m31s + + # list of the services and their details (AGE, TYPE, ...) + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/alertmanager-operated ClusterIP None 9093/TCP,9094/TCP,9094/UDP 6m32s + service/app-python LoadBalancer 10.98.246.250 5000:30351/TCP 16m + service/kube-prometheus-stack-alertmanager ClusterIP 10.96.147.220 9093/TCP,8080/TCP 7m10s + service/kube-prometheus-stack-grafana ClusterIP 10.108.20.228 80/TCP 7m10s + service/kube-prometheus-stack-kube-state-metrics ClusterIP 10.96.28.139 8080/TCP 7m10s + service/kube-prometheus-stack-operator ClusterIP 10.97.101.226 443/TCP 7m10s + service/kube-prometheus-stack-prometheus ClusterIP 10.96.23.143 9090/TCP,8080/TCP 7m10s + service/kube-prometheus-stack-prometheus-node-exporter ClusterIP 10.110.202.24 9100/TCP 7m10s + service/kubernetes ClusterIP 10.96.0.1 443/TCP 31m + service/prometheus-operated ClusterIP None 9090/TCP 6m31s + + # list of persistent volumes + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + persistentvolumeclaim/www-app-python-0 Bound pvc-7cea5181-f8af-4ed9-9a5f-42764fa02382 1Gi RWO standard 16m + persistentvolumeclaim/www-app-python-1 Bound pvc-467b556a-1824-4194-9113-76cd42d39941 1Gi RWO standard 16m + + # list of config maps + NAME DATA AGE + configmap/config 1 16m + configmap/kube-prometheus-stack-alertmanager-overview 1 7m10s + configmap/kube-prometheus-stack-apiserver 1 7m10s + configmap/kube-prometheus-stack-cluster-total 1 7m10s + configmap/kube-prometheus-stack-controller-manager 1 7m10s + configmap/kube-prometheus-stack-etcd 1 7m10s + configmap/kube-prometheus-stack-grafana 1 7m10s + configmap/kube-prometheus-stack-grafana-config-dashboards 1 7m10s + configmap/kube-prometheus-stack-grafana-datasource 1 7m10s + configmap/kube-prometheus-stack-grafana-overview 1 7m10s + configmap/kube-prometheus-stack-k8s-coredns 1 7m10s + configmap/kube-prometheus-stack-k8s-resources-cluster 1 7m10s + configmap/kube-prometheus-stack-k8s-resources-multicluster 1 7m10s + configmap/kube-prometheus-stack-k8s-resources-namespace 1 7m10s + configmap/kube-prometheus-stack-k8s-resources-node 1 7m10s + configmap/kube-prometheus-stack-k8s-resources-pod 1 7m10s + configmap/kube-prometheus-stack-k8s-resources-workload 1 7m10s + configmap/kube-prometheus-stack-k8s-resources-workloads-namespace 1 7m10s + configmap/kube-prometheus-stack-kubelet 1 7m10s + configmap/kube-prometheus-stack-namespace-by-pod 1 7m10s + configmap/kube-prometheus-stack-namespace-by-workload 1 7m10s + configmap/kube-prometheus-stack-node-cluster-rsrc-use 1 7m10s + configmap/kube-prometheus-stack-node-rsrc-use 1 7m10s + configmap/kube-prometheus-stack-nodes 1 7m10s + configmap/kube-prometheus-stack-nodes-darwin 1 7m10s + configmap/kube-prometheus-stack-persistentvolumesusage 1 7m10s + configmap/kube-prometheus-stack-pod-total 1 7m10s + configmap/kube-prometheus-stack-prometheus 1 7m10s + configmap/kube-prometheus-stack-proxy 1 7m10s + configmap/kube-prometheus-stack-scheduler 1 7m10s + configmap/kube-prometheus-stack-workload-total 1 7m10s + configmap/kube-root-ca.crt 1 31m + configmap/prometheus-kube-prometheus-stack-prometheus-rulefiles-0 35 6m33s + ``` + +- ***Grafana Dashboards:*** +1. Check CPU and Memory consumption of your StatefulSet. + - app-python-0 CPU consumption is 0.001 and memory 22.32 MiB + - app-python-1 CPU consumption is 0.002 and memory 22.24 MiB + + ![CPU and Memory Consumption](./assets/stateulsetCPU.png) + +2. Identify Pods with higher and lower CPU usage in the default namespace. + Higher CPU usage: prometheus-kube-prometheus-stack-prometheus-0 + Lowr CPU usage: most of the time it's alertmanager-0 pod. + ![Pods Consumption in default](./assets/NameSpace.png) + +3. Monitor node memory usage in percentage and megabytes. + - memory used: 4.88 + - percentage: 73.6 + ![memory](./assets/memory.png) + +4. Count the number of pods and containers managed by the Kubelet service. + - Pods: 16 + - Containers: 26 + ![Pods and Containers](./assets/podsAndContainers.png) + +5. Evaluate network usage of Pods in the default namespace. + - Download: 34.8 + - Upload: 39.7 + ![Network Usage](./assets/NetworkUsage.png) + +6. Determine the number of active alerts; also check the Web UI with `minikube service monitoring-kube-prometheus-alertmanager`. + - Active Alerts: 8 + + ![Active Alerts](./assets/alerting.png) + + ![alertmanager UI](./assets/AlertManagerUI.png) \ No newline at end of file diff --git a/k8s/HELM.md b/k8s/HELM.md new file mode 100644 index 0000000000..37bcde019b --- /dev/null +++ b/k8s/HELM.md @@ -0,0 +1,188 @@ +# Lab10 +## Task1 +1. **`helm install app-python app-python`** + ``` + NAME: app-python + LAST DEPLOYED: Wed Apr 10 03:55:20 2024 + NAMESPACE: default + STATUS: deployed + REVISION: 1 + NOTES: + 1. Get the application URL by running these commands: + export POD_NAME=$(kubectl get pods --namespace default -l "app.kubernetes.io/name=app-python,app.kubernetes.io/instance=app-python" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace default $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace default port-forward $POD_NAME 8080:$CONTAINER_PORT + ``` +2. **`minikube service app-python`** + ``` + |-----------|------------|-------------|----------------------------| + | NAMESPACE | NAME | TARGET PORT | URL | + |-----------|------------|-------------|----------------------------| + | default | app-python | http/5000 | http://192.168.184.2:32722 | + |-----------|------------|-------------|----------------------------| + 🎉 Opening service default/app-python in default browser... + ``` + + ![screenshot](./assets/verifyChart.png) + +3. **`kubectl get pods,svc`** + ``` + NAME READY STATUS RESTARTS AGE + pod/app-python-68bf577b48-6nnrh 1/1 Running 0 7m6s + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/app-python LoadBalancer 10.105.81.37 5000:32722/TCP 7m7s + service/kubernetes ClusterIP 10.96.0.1 443/TCP 9d + ``` + +4. **`minikube dashboard`** + ![screenshot](./assets/minikubeDashboard.png) + + +## Task2 + +1. **`kubectl get po`** + ``` + NAME READY STATUS RESTARTS AGE + app-python-68bf577b48-gvtk6 1/1 Running 0 2m23s + postinstall-hook 0/1 Completed 0 2m49s + preinstall-hook 0/1 Completed 0 3m22s + ``` +2. **`kubectl describe po preinstall-hook`** + ``` + Name: preinstall-hook + Namespace: default + Priority: 0 + Service Account: default + Node: minikube/192.168.184.2 + Start Time: Wed, 10 Apr 2024 04:49:05 +0300 + Labels: + Annotations: helm.sh/hook: pre-install + Status: Succeeded + IP: 10.244.0.42 + IPs: + IP: 10.244.0.42 + Containers: + pre-install-container: + Container ID: docker://058968aa182315c18da473593975f479d40a7c0f086ed2cd51011b56644af8a8 + Image: busybox + Image ID: docker-pullable://busybox@sha256:c3839dd800b9eb7603340509769c43e146a74c63dca3045a8e7dc8ee07e53966 + Port: + Host Port: + Command: + sh + -c + echo The pre-install hook is running && sleep 20 + State: Terminated + Reason: Completed + Exit Code: 0 + Started: Wed, 10 Apr 2024 04:49:12 +0300 + Finished: Wed, 10 Apr 2024 04:49:32 +0300 + Ready: False + Restart Count: 0 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-47gcf (ro) + Conditions: + Type Status + Initialized True + Ready False + ContainersReady False + PodScheduled True + Volumes: + kube-api-access-47gcf: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + ConfigMapOptional: + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: + Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 5m37s default-scheduler Successfully assigned default/preinstall-hook to minikube + Normal Pulled 5m33s kubelet Container image "busybox" already present on machine + Normal Created 5m30s kubelet Cre ated container pre-install-container + Normal Started 5m30s kubelet Started container pre-install-container + ``` + +3. **`kubectl describe po postinstall-hook`** + ``` + Name: postinstall-hook + Namespace: default + Priority: 0 + Service Account: default + Node: minikube/192.168.184.2 + Start Time: Wed, 10 Apr 2024 04:49:38 +0300 + Labels: + Annotations: helm.sh/hook: post-install + Status: Succeeded + IP: 10.244.0.44 + IPs: + IP: 10.244.0.44 + Containers: + post-install-container: + Container ID: docker://43f412a1f076c2434d631b834f674bfd631503beb70711d75ac7a05138dc215d + Image: busybox + Image ID: docker-pullable://busybox@sha256:c3839dd800b9eb7603340509769c43e146a74c63dca3045a8e7dc8ee07e53966 + Port: + Host Port: + Command: + sh + -c + echo The post-install hook is running && sleep 15 + State: Terminated + Reason: Completed + Exit Code: 0 + Started: Wed, 10 Apr 2024 04:49:50 +0300 + Finished: Wed, 10 Apr 2024 04:50:05 +0300 + Ready: False + Restart Count: 0 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-2md2d (ro) + Conditions: + Type Status + Initialized True + Ready False + ContainersReady False + PodScheduled True + Volumes: + kube-api-access-2md2d: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + ConfigMapOptional: + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: + Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 7m8s default-scheduler Successfully assigned default/postinstall-hook to minikube + Normal Pulling 7m3s kubelet Pulling image "busybox" + Normal Pulled 7m kubelet Successfully pulled image "busybox" in 3.646s (3.646s including waiting) + Normal Created 6m56s kubelet Created container post-install-container + Normal Started 6m55s kubelet Started container post-install-container + ``` + +4. **hook delete policy** + I added the following to hooks yaml files in `metadata.annotations`: + - `"helm.sh/hook-delete-policy": hook-succeeded` + + +5. **`kubectl get pods,svc`** + ``` + NAME READY STATUS RESTARTS AGE + pod/app-python-68bf577b48-cwk7r 1/1 Running 0 67s + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/app-python LoadBalancer 10.102.29.15 5000:31786/TCP 68s + service/kubernetes ClusterIP 10.96.0.1 443/TCP 9d + ``` \ No newline at end of file diff --git a/k8s/README.md b/k8s/README.md new file mode 100644 index 0000000000..496da80d01 --- /dev/null +++ b/k8s/README.md @@ -0,0 +1,159 @@ +# lab9 + +## Task1 + +1. **Deloyment** + + ``` + $ kubectl create deployment flask-app --image=vectorsmaster/flask-app:latest + deployment.apps/flask-app created + + $ kubectl get deployments + NAME READY UP-TO-DATE AVAILABLE AGE + flask-app 1/1 1 1 15m + ``` +2. **Expose the deployment** + + ``` + $ kubectl expose deployment flask-app --type=NodePort --port=5000 + service/flask-app exposed + ``` +3. **Access the application from the browser** + + ``` + $ kubectl port-forward service/flask-app 5000:5000 + Forwarding from 127.0.0.1:5000 -> 5000 + Forwarding from [::1]:5000 -> 5000 + Handling connection for 5000 + Handling connection for 5000 + ``` +4. **utput of command:** `kubectl get pods, svc` + + ``` + NAME READY STATUS RESTARTS AGE + pod/flask-app-6bfd5d795d-jrq26 1/1 Running 0 6m47s + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/flask-app NodePort 10.97.78.154 5000:31377/TCP 6m38s + service/kubernetes ClusterIP 10.96.0.1 443/TCP 31m + ``` +5. **clean up:** + + ``` + $ kubectl delete service flask-app + service "flask-app" deleted + + $ kubectl delete deployment flask-app + deployment.apps "flask-app" deleted + ``` + +## Task2 + +1. **Create deployment and service**: + navigate to k8s directory. + + ``` + $ kubectl apply -f . + deployment.apps/flask-app-deployment created + service/flask-app-service created + + ``` +2. **Output of command:** + $ kubectl get pods,svc + NAME READY STATUS RESTARTS AGE + pod/flask-app-deployment-64fd7cb84d-4hdjq 1/1 Running 0 44m + pod/flask-app-deployment-64fd7cb84d-hksn6 1/1 Running 0 44m + pod/flask-app-deployment-64fd7cb84d-sr7p2 1/1 Running 0 44m + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/flask-app-service NodePort 10.110.51.67 `` 5000:31000/TCP 44m + service/kubernetes ClusterIP 10.96.0.1 `` 443/TCP 2d19h +3. **Output of command:** + + ``` + $ minikube service --all + |-----------|-------------------|-------------|----------------------------| + | NAMESPACE | NAME | TARGET PORT | URL | + |-----------|-------------------|-------------|----------------------------| + | default | flask-app-service | 5000 | http://192.168.184.2:31000 | + |-----------|-------------------|-------------|----------------------------| + |-----------|------------|-------------|--------------| + | NAMESPACE | NAME | TARGET PORT | URL | + |-----------|------------|-------------|--------------| + | default | kubernetes | | No node port | + |-----------|------------|-------------|--------------| + 😿 service default/kubernetes has no node port + 🎉 Opening service default/flask-app-service in default browser... + ``` +4. **Screenshot:** +![screenshot](./assets/image.png) + +## Bonus Task + +1. **`$ minikube service flask-app-service --url`** + output: + `http://192.168.184.2:31000` + +2. **`$ curl http://192.168.184.2:31000`** + output: + ``` + + + + MSK Time + + +

MSK Time

+

Current MSK Time: 2024-04-03 03:01:21

+ + ``` + +3. **ingress:** + ``` + $ minikube addons enable ingress + 💡 ingress is an addon maintained by Kubernetes. For any concerns contact minikube on GitHub. + You can view the list of minikube maintainers at: https://github.com/kubernetes/minikube/blob/master/OWNERS + ▪ Using image registry.k8s.io/ingress-nginx/controller:v1.9.4 + ▪ Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0 + ▪ Using image registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231011-8b53cabe0 + 🔎 Verifying ingress addon... + 🌟 The 'ingress' addon is enabled + + $ kubectl get pods -n ingress-nginx + NAME READY STATUS RESTARTS AGE + ingress-nginx-admission-create-xg8zz 0/1 Completed 0 3m6s + ingress-nginx-admission-patch-ff48h 0/1 Completed 1 3m6s + ingress-nginx-controller-7c6974c4d8-tmvbp 1/1 Running 0 3m6s + + $ kubectl apply -f ingress.yml + ingress.networking.k8s.io/flask-app-ingress created + + $ kubectl get ingress + NAME CLASS HOSTS ADDRESS PORTS AGE + flask-app-ingress nginx flask-app.info 192.168.184.2 80 71s + + $ curl --resolve "flask-app.info:80:$( minikube ip )" -i http://flask-app.info + HTTP/1.1 200 OK + Date: Wed, 03 Apr 2024 00:07:26 GMT + Content-Type: text/html; charset=utf-8 + Content-Length: 159 + Connection: keep-alive + + + + + MSK Time + + +

MSK Time

+

Current MSK Time: 2024-04-03 03:07:26

+ + ``` + +4. **screenshot:** +![screenshot](./assets/ingress.png) + + + + + diff --git a/k8s/app-python/.helmignore b/k8s/app-python/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/k8s/app-python/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/k8s/app-python/Chart.yaml b/k8s/app-python/Chart.yaml new file mode 100644 index 0000000000..702048839f --- /dev/null +++ b/k8s/app-python/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: app-python +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/k8s/app-python/files/config.json b/k8s/app-python/files/config.json new file mode 100644 index 0000000000..a89993a48a --- /dev/null +++ b/k8s/app-python/files/config.json @@ -0,0 +1,3 @@ +{ + "developer":"vectors_master" +} \ No newline at end of file diff --git a/k8s/app-python/templates/NOTES.txt b/k8s/app-python/templates/NOTES.txt new file mode 100644 index 0000000000..f3696d9ed7 --- /dev/null +++ b/k8s/app-python/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "app-python.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "app-python.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "app-python.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "app-python.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/k8s/app-python/templates/_helpers.tpl b/k8s/app-python/templates/_helpers.tpl new file mode 100644 index 0000000000..b39b9e5cc3 --- /dev/null +++ b/k8s/app-python/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "app-python.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "app-python.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "app-python.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "app-python.labels" -}} +helm.sh/chart: {{ include "app-python.chart" . }} +{{ include "app-python.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "app-python.selectorLabels" -}} +app.kubernetes.io/name: {{ include "app-python.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "app-python.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "app-python.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/k8s/app-python/templates/config.yaml b/k8s/app-python/templates/config.yaml new file mode 100644 index 0000000000..e7389fa25d --- /dev/null +++ b/k8s/app-python/templates/config.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: config +data: + config.json: |- +{{ .Files.Get "files/config.json" | indent 4 }} \ No newline at end of file diff --git a/k8s/app-python/templates/hpa.yaml b/k8s/app-python/templates/hpa.yaml new file mode 100644 index 0000000000..897a1223f5 --- /dev/null +++ b/k8s/app-python/templates/hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "app-python.fullname" . }} + labels: + {{- include "app-python.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "app-python.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/k8s/app-python/templates/ingress.yaml b/k8s/app-python/templates/ingress.yaml new file mode 100644 index 0000000000..31241a1cb8 --- /dev/null +++ b/k8s/app-python/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "app-python.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "app-python.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/k8s/app-python/templates/post-install-hool.yaml b/k8s/app-python/templates/post-install-hool.yaml new file mode 100644 index 0000000000..1e2f5220ab --- /dev/null +++ b/k8s/app-python/templates/post-install-hool.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: postinstall-hook + annotations: + "helm.sh/hook": "post-install" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + containers: + - name: post-install-container + image: busybox + imagePullPolicy: Always + command: ['sh', '-c', 'echo The post-install hook is running && sleep 15' ] + restartPolicy: Never + terminationGracePeriodSeconds: 0 \ No newline at end of file diff --git a/k8s/app-python/templates/pre-install-hook.yaml b/k8s/app-python/templates/pre-install-hook.yaml new file mode 100644 index 0000000000..ba53bf2309 --- /dev/null +++ b/k8s/app-python/templates/pre-install-hook.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: preinstall-hook + annotations: + "helm.sh/hook": "pre-install" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + containers: + - name: pre-install-container + image: busybox + imagePullPolicy: IfNotPresent + command: ['sh', '-c', 'echo The pre-install hook is running && sleep 20' ] + restartPolicy: Never + terminationGracePeriodSeconds: 0 \ No newline at end of file diff --git a/k8s/app-python/templates/secrets.yaml b/k8s/app-python/templates/secrets.yaml new file mode 100644 index 0000000000..7ed3c7a17e --- /dev/null +++ b/k8s/app-python/templates/secrets.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: credentials + labels: + app: app-python +type: opaque +data: + password: {{ .Values.password | b64enc | quote }} \ No newline at end of file diff --git a/k8s/app-python/templates/service.yaml b/k8s/app-python/templates/service.yaml new file mode 100644 index 0000000000..95a3e10238 --- /dev/null +++ b/k8s/app-python/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "app-python.fullname" . }} + labels: + {{- include "app-python.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: 5000 + targetPort: http + protocol: TCP + name: http + selector: + {{- include "app-python.selectorLabels" . | nindent 4 }} diff --git a/k8s/app-python/templates/serviceaccount.yaml b/k8s/app-python/templates/serviceaccount.yaml new file mode 100644 index 0000000000..cc89ba9533 --- /dev/null +++ b/k8s/app-python/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "app-python.serviceAccountName" . }} + labels: + {{- include "app-python.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automount }} +{{- end }} diff --git a/k8s/app-python/templates/statefulset.yaml b/k8s/app-python/templates/statefulset.yaml new file mode 100644 index 0000000000..7585087d01 --- /dev/null +++ b/k8s/app-python/templates/statefulset.yaml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "app-python.fullname" . }} + labels: + {{- include "app-python.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + volumeClaimTemplates: + {{- toYaml .Values.volumeClaimTemplates | nindent 4 }} + selector: + matchLabels: + {{- include "app-python.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "app-python.labels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "app-python.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: MY_PASSWORD + valueFrom: + secretKeyRef: + name: credentials + key: password + ports: + - name: http + containerPort: 5000 + protocol: TCP + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.volumes }} + volumes: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + podManagementPolicy: "Parallel" + diff --git a/k8s/app-python/templates/tests/test-connection.yaml b/k8s/app-python/templates/tests/test-connection.yaml new file mode 100644 index 0000000000..66d581c341 --- /dev/null +++ b/k8s/app-python/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "app-python.fullname" . }}-test-connection" + labels: + {{- include "app-python.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "app-python.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/k8s/app-python/values.yaml b/k8s/app-python/values.yaml new file mode 100644 index 0000000000..5b92c8d555 --- /dev/null +++ b/k8s/app-python/values.yaml @@ -0,0 +1,115 @@ +# Default values for app-python. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 2 + +image: + repository: vectorsmaster/flask-app + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Automatically mount a ServiceAccount's API credentials? + automount: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} +podLabels: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: LoadBalancer + port: 5000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +livenessProbe: + httpGet: + path: / + port: http +readinessProbe: + httpGet: + path: / + port: http + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +volumes: + - name: config-volume + configMap: + name: config + +volumeClaimTemplates: + - metadata: + name: www + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi + +volumeMounts: + - name: config-volume + mountPath: /app-python/config.json + subPath: config.json + - name: www + mountPath: /app-python/www + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/k8s/assets/AlertManagerUI.png b/k8s/assets/AlertManagerUI.png new file mode 100644 index 0000000000..2da52b4cb3 Binary files /dev/null and b/k8s/assets/AlertManagerUI.png differ diff --git a/k8s/assets/NameSpace.png b/k8s/assets/NameSpace.png new file mode 100644 index 0000000000..5dc6432b9a Binary files /dev/null and b/k8s/assets/NameSpace.png differ diff --git a/k8s/assets/NetworkUsage.png b/k8s/assets/NetworkUsage.png new file mode 100644 index 0000000000..85b95e1edb Binary files /dev/null and b/k8s/assets/NetworkUsage.png differ diff --git a/k8s/assets/alerting.png b/k8s/assets/alerting.png new file mode 100644 index 0000000000..895c2ee37e Binary files /dev/null and b/k8s/assets/alerting.png differ diff --git a/k8s/assets/image.png b/k8s/assets/image.png new file mode 100644 index 0000000000..095f38ddc6 Binary files /dev/null and b/k8s/assets/image.png differ diff --git a/k8s/assets/ingress.png b/k8s/assets/ingress.png new file mode 100644 index 0000000000..f7e4c57eec Binary files /dev/null and b/k8s/assets/ingress.png differ diff --git a/k8s/assets/memory.png b/k8s/assets/memory.png new file mode 100644 index 0000000000..370b769a56 Binary files /dev/null and b/k8s/assets/memory.png differ diff --git a/k8s/assets/minikubeDashboard.png b/k8s/assets/minikubeDashboard.png new file mode 100644 index 0000000000..e44b53705d Binary files /dev/null and b/k8s/assets/minikubeDashboard.png differ diff --git a/k8s/assets/podsAndContainers.png b/k8s/assets/podsAndContainers.png new file mode 100644 index 0000000000..869c197b79 Binary files /dev/null and b/k8s/assets/podsAndContainers.png differ diff --git a/k8s/assets/secrets.png b/k8s/assets/secrets.png new file mode 100644 index 0000000000..535a78461f Binary files /dev/null and b/k8s/assets/secrets.png differ diff --git a/k8s/assets/stateulsetCPU.png b/k8s/assets/stateulsetCPU.png new file mode 100644 index 0000000000..7df00cafe4 Binary files /dev/null and b/k8s/assets/stateulsetCPU.png differ diff --git a/k8s/assets/verifyChart.png b/k8s/assets/verifyChart.png new file mode 100644 index 0000000000..cc056b7e27 Binary files /dev/null and b/k8s/assets/verifyChart.png differ diff --git a/k8s/deployment.yml b/k8s/deployment.yml new file mode 100644 index 0000000000..70d1db9c24 --- /dev/null +++ b/k8s/deployment.yml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: flask-app-deployment + labels: + app: flask-app +spec: + replicas: 3 + selector: + matchLabels: + app: flask-app + template: + metadata: + labels: + app: flask-app + spec: + containers: + - name: flask-app + image: vectorsmaster/flask-app:latest + ports: + - containerPort: 5000 diff --git a/k8s/ingress.yml b/k8s/ingress.yml new file mode 100644 index 0000000000..9ed5759030 --- /dev/null +++ b/k8s/ingress.yml @@ -0,0 +1,18 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: flask-app-ingress + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$1 +spec: + rules: + - host: flask-app.info + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: flask-app-service + port: + number: 5000 \ No newline at end of file diff --git a/k8s/password.txt b/k8s/password.txt new file mode 100644 index 0000000000..3dc3b8910e --- /dev/null +++ b/k8s/password.txt @@ -0,0 +1 @@ +S!B\*d$zDsb= \ No newline at end of file diff --git a/k8s/secrets.yaml b/k8s/secrets.yaml new file mode 100644 index 0000000000..c01f076153 --- /dev/null +++ b/k8s/secrets.yaml @@ -0,0 +1,27 @@ +password: ENC[AES256_GCM,data:wFufZZ91u1jqKQ==,iv:4v1EEIl2Y586iKfh4gLuNY223G5plcaclecP7PSJ+m0=,tag:rFhKSaNne7n7H5t6b76KRw==,type:str] +sops: + kms: [] + gcp_kms: [] + lastmodified: '2024-04-23T20:18:16Z' + mac: ENC[AES256_GCM,data:AMj/pBONX3Nr+RwZu6QATHT/hCH7zEsyz26of+P5dBW3HM+5O66bXsb8lqfioDS91fcrbQyp01gS3YtWURy01RAAsmAGYT3Kr40bR8tcUv0mBZe6E55Tkaf1nJo15GKCyuyphA/OU+GcPwGVHTSbHbMQUiIBORH/WOkovKVQFb8=,iv:9bLvASs0BDWmCS/EZ2B7TlVzJzGnHvJf445FmFfx+dE=,tag:KgQSu2z2QA0WuSPbzNpGgA==,type:str] + pgp: + - created_at: '2024-04-23T20:17:29Z' + enc: | + -----BEGIN PGP MESSAGE----- + + hQGMA1QlWF9GqZyTAQwAgqIVNvN9heEfnJPjRBwShCF28L2Z+BLFo6wCTeZM61Nj + bdeAX4q04bcYN7xi+ihuci8JP+PNn2r08XKKKsLcBgzPYpI8LqI10WAIrtFcQ1zV + C2mE860Ih4te10NUVbsO8gABY304RIfdOsuC7Jx9eHuHyKivDGBqqnOt/RMdCkG8 + PM7d079NPVX9M7whQ7vz2fEMAlJZVduHCE91e6FNRwnFjDZMAw9bcA4JN8PbaotM + 6FqJV2Xbkszv/tuhUC/KN+apgaFoit+VlMI2kurXFbspDlRyj+EyvpmtBs4liVeV + UMIhLmwubKFD+OvFde44o/j4Rwd1JjyrSHsv6Uc4mp6i1AnHHfVvoSxIy6QJy1kT + IHJEFzjP/7OuNUIPJ9XPyBIGwIlSmgDGWo/moi3gGaEdwS9ECKQiZtx3LZtRkHT4 + VPt4eazgDXNxpuUBqolsozX1/3ZuBRmh/2Fd0qQD25hTjDTDGz8lJaKHNPJOfxgL + CThn5WMbUszMBq2uPoHv0l4B4Cu4YQ2yLDEwJNvlpiMBFwny8xbHxIhV1MZxxQcV + HNcmEt5BoUfV6l/Sn/8IgZ7c3778wOzqzYT9iP9+ONukzuFyDDsfnbUyUmU2Rlox + /z/OvQoK9ehKwRB2wbXX + =ZyBV + -----END PGP MESSAGE----- + fp: 12767A9AC61D56B8A6AE2680F968B06BE51CF6A8 + unencrypted_suffix: _unencrypted + version: 3.0.3 diff --git a/k8s/service.yml b/k8s/service.yml new file mode 100644 index 0000000000..6b1b43fe86 --- /dev/null +++ b/k8s/service.yml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: flask-app-service +spec: + selector: + app: flask-app + type: NodePort + ports: + - protocol: TCP + port: 5000 + targetPort: 5000 + nodePort: 31000 \ No newline at end of file diff --git a/k8s/username.txt b/k8s/username.txt new file mode 100644 index 0000000000..f77b00407e --- /dev/null +++ b/k8s/username.txt @@ -0,0 +1 @@ +admin \ No newline at end of file diff --git a/lab10.md b/lab10.md new file mode 100644 index 0000000000..c472086168 --- /dev/null +++ b/lab10.md @@ -0,0 +1,91 @@ +# Lab 10: Introduction to Helm + +## Overview + +In this lab, you will become familiar with Helm, set up a local development environment, and generate manifests for your application. + +## Task 1: Helm Setup and Chart Creation + +**6 Points:** + +1. Learn About Helm: + - Begin by exploring the architecture and concepts of Helm: + - [Helm Architecture](https://helm.sh/docs/topics/architecture/) + - [Understanding Helm Charts](https://helm.sh/docs/topics/charts/) + +2. Install Helm: + - Install Helm using the instructions provided: + - [Helm Installation](https://helm.sh/docs/intro/install/) + - [Chart Repository Initialization](https://helm.sh/docs/intro/quickstart/#initialize-a-helm-chart-repository) + +3. Create Your Own Helm Chart: + - Generate a Helm chart for your application. + - Inside the `k8s` folder, create a Helm chart template by using the command `helm create your-app`. + - Replace the default repository and tag inside the `values.yaml` file with your repository name. + - Modify the `containerPort` setting in the `deployment.yml` file. + - If you encounter issues with `livenessProbe` and `readinessProbe`, you can comment them out. + + > For troubleshooting, you can use the `minikube dashboard` command. + +4. Install Your Helm Chart: + - Install your custom Helm chart and ensure that all services are healthy. Verify this by checking the `Workloads` page in the Minikube dashboard. + +5. Access Your Application: + - Confirm that your application is accessible by running the `minikube service your_service_name` command. + +6. Create a HELM.md File: + - Construct a `HELM.md` file and provide the output of the `kubectl get pods,svc` command within it. + +## Task 2: Helm Chart Hooks + +**4 Points:** + +1. Learn About Chart Hooks: + - Familiarize yourself with [Helm Chart Hooks](https://helm.sh/docs/topics/charts_hooks/). + +2. Implement Helm Chart Hooks: + - Develop pre-install and post-install pods within your Helm chart, without adding any complex logic (e.g., use "sleep 20"). You can refer to [Example 1 in the guide](https://www.golinuxcloud.com/kubernetes-helm-hooks-examples/). + +3. Troubleshoot Hooks: + - Execute the following commands to troubleshoot your hooks: + 1. `helm lint ` + 2. `helm install --dry-run helm-hooks ` + 3. `kubectl get po` + +4. Provide Output: + - Execute the following commands and include their output in your report: + 1. `kubectl get po` + 2. `kubectl describe po ` + 3. `kubectl describe po ` + +5. Hook Delete Policy: + - Implement a hook delete policy to remove the hook once it has executed successfully. + +**List of Requirements:** + +- Helm Chart with Hooks implemented, including the hook delete policy. +- Output of the `kubectl get pods,svc` command in `HELM.md`. +- Output of all commands from the step 4 of Task 2 in `HELM.md`. + +## Bonus Task: Helm Library Chart + +**To Earn 2.5 Additional Points:** + +1. Helm Chart for Extra App: + - Prepare a Helm chart for an additional application. + +2. Helm Library Charts: + - Get acquainted with [Helm Library Charts](https://helm.sh/docs/topics/library_charts/). + +3. Create a Library Chart: + - Develop a simple library chart that includes a "labels" template. You can follow the steps outlined in [the Using Library Charts guide](https://austindewey.com/2020/08/17/how-to-reduce-helm-chart-boilerplate-with-library-charts/). Use this library chart for both of your applications. + +### Guidelines + +- Ensure your documentation is clear and well-structured. +- Include all the necessary components. +- Follow appropriate file and folder naming conventions. +- Create and participate in PRs for the peer review process. +- Create pull requests (PRs) as needed: from your fork to the main branch of this repository, and from your fork's branch to your fork's master branch. + +> Note: Detailed documentation is crucial to ensure that your Helm deployment and hooks function as expected. Engage with the bonus tasks to further enhance your understanding and application deployment skills. diff --git a/lab11.md b/lab11.md new file mode 100644 index 0000000000..ba0c5efda5 --- /dev/null +++ b/lab11.md @@ -0,0 +1,91 @@ +# Lab 11: Kubernetes Secrets and Hashicorp Vault + +## Overview + +In this lab, you will learn how to manage sensitive data, such as passwords, tokens, or keys, within Kubernetes. Additionally, you will configure CPU and memory limits for your application. + +## Task 1: Kubernetes Secrets and Resource Management + +**6 Points:** + +1. Create a Secret Using `kubectl`: + - Learn about Kubernetes Secrets and create a secret using the `kubectl` command: + - [Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) + - [Managing Secrets with kubectl](https://kubernetes.io/docs/tasks/configmap-secret/managing-secret-using-kubectl/#decoding-secret) + +2. Verify and Decode Your Secret: + - Confirm and decode the secret, then create an `11.md` file within the `k8s` folder. Provide the output of the necessary commands inside this file. + +3. Manage Secrets with Helm: + - Use Helm to manage your secrets. + - Create a `secrets.yaml` file in the `templates` folder. + - Define a `secret` object within this YAML file. + - Add an `env` field to your `Deployment`. The path to update is: `spec.template.spec.containers.env`. + + > Refer to this [Helm Secrets Video](https://www.youtube.com/watch?v=hRSlKRvYe1A) for guidance. + + - Update your Helm deployment as instructed in the video. + - Retrieve the list of pods using the command `kubectl get po`. Use the name of the pod as proof of your success within the report. + - Verify your secret inside the pod, for example: `kubectl exec demo-5f898f5f4c-2gpnd -- printenv | grep MY_PASS`. Share this output in `11.md`. + +4. Create a Pull Request: + - Generate a PR to the main branch of the forked repository. + +5. Create a Pull Request in Your Own Repository: + - Create a PR in your repository from the lab11 branch to the main one. This will facilitate the grading process. + +## Task 2: Vault Secret Management System + +**4 Points:** + +1. Install Vault Using Helm Chart: + - Install Vault using a Helm chart. Follow the steps provided in this guide: + - [Vault Installation Guide](https://developer.hashicorp.com/vault/tutorials/kubernetes/kubernetes-sidecar#install-the-vault-helm-chart) + +2. Follow the Tutorial with Your Helm Chart: + - Adapt the tutorial to work with your Helm chart, including the following steps: + - [Set a Secret in Vault](https://developer.hashicorp.com/vault/tutorials/kubernetes/kubernetes-sidecar#set-a-secret-in-vault) + - [Configure Kubernetes Authentication](https://developer.hashicorp.com/vault/tutorials/kubernetes/kubernetes-sidecar#configure-kubernetes-authentication) + - Be cautious with the service account. If you used `helm create ...`, it will be created automatically. In the guide, they create it manually. + - [Manually Define a Kubernetes Service Account](https://developer.hashicorp.com/vault/tutorials/kubernetes/kubernetes-sidecar#define-a-kubernetes-service-account) + +3. Implement Vault Secrets in Your Helm Chart: + - Use the steps from the guide as an example for your Helm chart: + - [Update values.yaml](https://developer.hashicorp.com/vault/tutorials/kubernetes/kubernetes-sidecar#launch-an-application) + - [Add Labels](https://developer.hashicorp.com/vault/tutorials/kubernetes/kubernetes-sidecar#inject-secrets-into-the-pod) + - Test to ensure your credentials are injected successfully. Use the `kubectl exec -it -- bash` command to access the container. Verify the injected secrets using `cat /path/to/your/secret` and `df -h`. Share the output in the `11.md` report. + - Apply a template as described in the guide. Test the updates as you did in the previous step and provide the outputs in `11.md`. + +**List of Requirements:** + +- Proof of work with a secret in `11.md` for the Task 1 - steps 2 and 3. +- `secrets.yaml` file. +- Resource requests and limits for CPU and memory. +- Vault configuration implemented, with proofs in `11.md`. + +## Bonus Task: Resource Management and Environment Variables + +**2.5 Points:** + +1. Read About Resource Management: + - Familiarize yourself with resource management in Kubernetes: + - [Resource Management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) + +2. Set Up Requests and Limits for CPU and Memory for Both Helm Charts: + - Configure resource requests and limits for CPU and memory for your application. + - Test to ensure these configurations work correctly. + +3. Add Environment Variables for Your Containers for Both Helm Charts: + - Read about Kubernetes environment variables: + - [Kubernetes Environment Variables](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/) + - Update your Helm chart with several environment variables using named templates. Move these variables to the `_helpers.tpl` file: + - [Helm Named Templates](https://helm.sh/docs/chart_template_guide/named_templates/) + +### Guidelines + +- Ensure that your documentation is clear and organized. +- Include all the necessary components. +- Follow appropriate file and folder naming conventions. +- Create pull requests (PRs) as needed: from your fork to the main branch of this repository, and from your fork's branch to your fork's master branch. + +> Note: Thorough documentation is essential to demonstrate your success in managing secrets and resource allocation in Kubernetes. Explore the bonus tasks to enhance your skills further. diff --git a/lab12.md b/lab12.md new file mode 100644 index 0000000000..79d1171972 --- /dev/null +++ b/lab12.md @@ -0,0 +1,74 @@ +# Lab 12: Kubernetes ConfigMaps + +## Overview + +In this lab, you'll delve into Kubernetes ConfigMaps, focusing on managing non-confidential data and upgrading your application for persistence. ConfigMaps provide a way to decouple configuration artifacts from image content, allowing you to manage configuration data separately from the application. + +## Task 1: Upgrade Application for Persistence + +**6 Points:** + +In this task, you'll enhance your application to persist data and explore ConfigMaps in Kubernetes. + +1. Upgrade Your Application: + - Modify your application to: + - Implement a counter logic in your application to keep track of the number of times it's accessed. + - Save the counter number in the `visits` file. + - Introduce a new endpoint `/visits` to display the recorded visits. + - Test the changes: + - Update your `docker-compose.yml` to include a new volume with your `visits` file. + - Verify that the enhancements work as expected, you must see the updated number in the `visits` file on the host machine. + - Update the `README.md` for your application. + +2. Create Pull Requests: + - Submit a PR to merge your changes into the main branch of the forked repository. + - Create a PR from the `lab12` branch to the main branch in your own repository. + +## Task 2: ConfigMap Implementation + +**4 Points:** + +1. Understand ConfigMaps: + - Read about ConfigMaps in Kubernetes: + - [ConfigMaps](https://kubernetes.io/docs/concepts/configuration/configmap/) + +2. Mount a Config File: + - Create a `files` folder with a `config.json` file. + - Populate `config.json` with data in JSON format. + - Use Helm to mount `config.json`: + - Create a `configMap` manifest, extracting data from `config.json` using `.Files.Get`. + - Update `deployment.yaml` with `Volumes` and `VolumeMounts`. + - [Example](https://carlos.mendible.com/2019/02/10/kubernetes-mount-file-pod-with-configmap/) + - Install the updated Helm chart and verify success: + - Retrieve the list of pods: `kubectl get po`. + - Use the pod name as proof of successful deployment. + - Check the ConfigMap inside the pod, e.g., `kubectl exec demo-758cc4d7c4-cxnrn -- cat /config.json`. + +3. Documentation: + - Create `12.md` in the `k8s` folder and include the output of relevant commands. + +**List of Requirements:** + +- `config.json` in the `files` folder. +- `configMap` retrieving data from `config.json` using `.Files.Get`. +- `Volume`s and `VolumeMount`s in `deployments.yml`. +- `12.md` documenting the results of commands. + +## Bonus Task: ConfigMap via Environment Variables + +**2.5 Points:** + +1. Upgrade Bonus App: + - Implement persistence logic in your bonus app. + +2. ConfigMap via Environment Variables: + - Utilize ConfigMap via environment variables in a running container using the `envFrom` property. + - Provide proof with the output of the `env` command inside your container. + +### Guidelines + +- Maintain clear and organized documentation. +- Use appropriate naming conventions for files and folders. +- For your repository PR, ensure it's from the `lab12` branch to the main branch. + +> Note: Clear documentation is crucial to demonstrate successful data persistence and ConfigMap utilization in Kubernetes. Explore the bonus tasks to further enhance your skills. diff --git a/lab13.md b/lab13.md new file mode 100644 index 0000000000..bc8fd7f4cd --- /dev/null +++ b/lab13.md @@ -0,0 +1,63 @@ +# Lab 13: Kubernetes StatefulSet + +## Overview + +In this lab, you'll explore Kubernetes StatefulSets, focusing on managing stateful applications with guarantees about the ordering and uniqueness of a set of Pods. + +## Task 1: Implement StatefulSet in Helm Chart + +**6 Points:** + +1. Understand StatefulSets: + + - Read about StatefulSet objects: + - [Concept](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) + - [Tutorial](https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/) +2. Update Helm Chart: + + - Rename `deployment.yml` to `statefulset.yml`. + - Create a manifest for StatefulSet following the tutorial. + - Test with command: `helm install --dry-run --debug name_of_your_chart path_to_your_chart`. + - Fix any issues and deploy it. + - Apply best practices by moving values to variables in `values.yml` meaningfully. + +## Task 2: StatefulSet Exploration and Optimization + +**4 Points:** + +1. Research and Documentation: + + - Create `13.md` report. + - Include the output of `kubectl get po,sts,svc,pvc` commands. + - Use `minikube service name_of_your_statefulset` command to access your app. + - Access the root path of your app from different tabs and modes in your browser. + - Check the content of your file in each pod, e.g., `kubectl exec pod/demo-0 -- cat visits`, and provide the output for all replicas. + - Describe and explain differences in the report. +2. Ordering Guarantee and Parallel Operations: + + - Explain why ordering guarantees are unnecessary for your app. + - Implement a way to instruct the StatefulSet controller to launch or terminate all Pods in parallel. + +**List of Requirements:** + +- Outputs of commands in `13.md`. +- Results of the "number of visits" command for each pod, with an explanation in `13.md`. +- Answers to questions in point 2 of `13.md`. +- Implementation of parallel launch and terminate. + +## Bonus Task: Update Strategies + +**2.5 Points:** + +1. Apply the main steps to your extra app. +2. Explore Update Strategies: + - Read about update strategies. + - Describe your understanding of kinds and differences in the report. + +**Guidelines:** + +- Maintain clear and organized documentation. +- Use appropriate naming conventions for files and folders. +- For your repository PR, ensure it's from the `lab13` branch to the main branch. + +> Note: Understanding StatefulSets and their optimization is crucial for managing stateful applications in Kubernetes. Explore the bonus tasks to further enhance your skills. diff --git a/lab14.md b/lab14.md new file mode 100644 index 0000000000..add503d42f --- /dev/null +++ b/lab14.md @@ -0,0 +1,72 @@ +# Lab 14: Kubernetes Monitoring and Init Containers + +## Overview + +In this lab, you will explore Kubernetes cluster monitoring using Prometheus with the Kube Prometheus Stack. Additionally, you'll delve into the concept of Init Containers in Kubernetes. + +## Task 1: Kubernetes Cluster Monitoring with Prometheus + +**6 Points:** + +1. Read about `Kube Prometheus Stack`: + - [Helm chart with installation guide](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) + - [Explanation of components](https://github.com/prometheus-operator/kube-prometheus#kubeprometheus) + +2. Describe Components: + - Create `14.md` and detail the components of the Kube Prometheus Stack, explaining their roles and functions. Avoid direct copy-pasting; provide a personal understanding. + +3. Install Helm Charts: + - Install the Kube Prometheus Stack to your Kubernetes cluster. + - Install your app's Helm chart. + - Provide the output of the `kubectl get po,sts,svc,pvc,cm` command in the report and explain each part. + +4. Utilize Grafana Dashboards: + - Access Grafana using `minikube service monitoring-grafana`. + - Explore existing dashboards to find information about your cluster: + 1. Check CPU and Memory consumption of your StatefulSet. + 2. Identify Pods with higher and lower CPU usage in the default namespace. + 3. Monitor node memory usage in percentage and megabytes. + 4. Count the number of pods and containers managed by the Kubelet service. + 5. Evaluate network usage of Pods in the default namespace. + 6. Determine the number of active alerts; also check the Web UI with `minikube service monitoring-kube-prometheus-alertmanager`. + - Provide answers to all these points in the report. + +## Task 2: Init Containers + +**4 Points:** + +1. Read about `Init Containers`: + - [Concept](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) + - [Tutorial](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-initialization/#create-a-pod-that-has-an-init-container) + +2. Implement Init Container: + - Create a new Volume. + - Implement an Init container to download any file using `wget` (you can use a site from the example). + - Provide proof of success, e.g., `kubectl exec pod/demo-0 -- cat /test.html`. + +**List of Requirements:** + +- Detailed explanation of monitoring stack components in `14.md`. +- Output and explanation of `kubectl get po,sts,svc,pvc,cm`. +- Answers to all 6 questions from point 4 in `14.md`. +- Implementation of Init Container. +- Proof of Init Container downloading a file. + +## Bonus Task: App Metrics & Multiple Init Containers + +**2.5 Points:** + +1. App Metrics: + - Fetch metrics from your app and provide proof. + +2. Init Container Queue: + - Create a queue of three Init containers, with any logic like adding new lines to the same file. + - Provide proof using the `cat` tool. + +### Guidelines + +- Ensure clear and organized documentation. +- Use appropriate naming conventions for files and folders. +- For your repository PR, ensure it's from the `lab14` branch to the main branch. + +> Note: Demonstrate successful implementation and understanding of Kubernetes monitoring and Init Containers. Take your time to explore the bonus tasks for additional learning opportunities. diff --git a/lab4.md b/lab4.md index d5eefd0808..93ea4795bc 100644 --- a/lab4.md +++ b/lab4.md @@ -52,7 +52,7 @@ In this lab assignment, you will explore Infrastructure as Code (IAC) using Terr - Avoid placing your token as a variable in the code; instead, use an environment variable. 2. Import Existing Repository: - - Use the `terraform import` command to import your existing GitHub repository into your Terraform configuration. Example: `terraform import "github_repository.core-course-labs" "core-course-labs"`. + - Use the `terraform import` command to import your current GitHub repository into your Terraform configuration. No need to create a new one. Example: `terraform import "github_repository.core-course-labs" "core-course-labs"`. 3. Apply Terraform Changes: - Apply changes from your Terraform configuration to your GitHub repository. @@ -65,6 +65,7 @@ In this lab assignment, you will explore Infrastructure as Code (IAC) using Terr **2.5 Points:** 1. GitHub Teams Using Terraform: + - You need to upgrade your account to organization. - Extend your Terraform configuration to add several teams to your GitHub repository, each with different levels of access. - Apply the changes and ensure they take effect in your GitHub repository. diff --git a/lab5.md b/lab5.md index 63e961624d..a0ef405031 100644 --- a/lab5.md +++ b/lab5.md @@ -64,10 +64,9 @@ In this lab, you will get acquainted with Ansible, a powerful configuration mana 1. Create Your Custom Docker Role: - Develop a custom Ansible role for Docker with the following tasks: - 1. Install pip. - 2. Install Docker using apt and Docker Compose using pip. - 3. Update your playbook to utilize this custom role. [Tricks and Tips](https://docs.ansible.com/ansible/latest/user_guide/playbooks_best_practices.html). - 4. Test your playbook with the custom role to ensure successful deployment. + 1. Install Docker and Docker Compose. + 2. Update your playbook to utilize this custom role. [Tricks and Tips](https://docs.ansible.com/ansible/latest/user_guide/playbooks_best_practices.html). + 3. Test your playbook with the custom role to ensure successful deployment. 2. Documentation: - Develop an `ANSIBLE.md` file in the `ansible` folder to document your Ansible-related work. @@ -75,7 +74,7 @@ In this lab, you will get acquainted with Ansible, a powerful configuration mana - Use a Markdown template to describe your Docker role, its requirements and usage. 3. Deployment Output: - - Execute your playbook to deploy the Docker role (either custom or existing). + - Execute your playbook to deploy the Docker role. - Provide the last 50 lines of the output from your deployment command in the `ANSIBLE.md` file. Example command: diff --git a/lab6.md b/lab6.md index ce6e446a6c..cffba2206e 100644 --- a/lab6.md +++ b/lab6.md @@ -69,7 +69,7 @@ In this lab, you will utilize Ansible to set up a Continuous Deployment (CD) pro 1. Create an Extra Playbook: - Develop an additional Ansible playbook specifically for your bonus application. - - Reuse the existing Ansible role you created for your primary application. + - You can reuse the existing Ansible role you created for your primary application or create a new one. - Suggested structure: ```sh diff --git a/lab7.md b/lab7.md new file mode 100644 index 0000000000..48e65eb202 --- /dev/null +++ b/lab7.md @@ -0,0 +1,59 @@ +# Lab 7: Monitoring and Logging + +## Overview + +In this lab, you will become familiar with a logging stack that includes Promtail, Loki, and Grafana. Your goal is to create a Docker Compose configuration and configuration files to set up this logging stack. + +## Task 1: Logging Stack Setup + +**6 Points:** + +1. Study the Logging Stack: + - Begin by researching the components of the logging stack: + - [Grafana Webinar: Loki Getting Started](https://grafana.com/go/webinar/loki-getting-started/) + - [Loki Overview](https://grafana.com/docs/loki/latest/overview/) + - [Loki GitHub Repository](https://github.com/grafana/loki) + +2. Create a Monitoring Folder: + - Start by creating a new folder named `monitoring` in your project directory. + +3. Docker Compose Configuration: + - Inside the `monitoring` folder, prepare a `docker-compose.yml` file that defines the entire logging stack along with your application. + - To assist you in this task, refer to these resources for sample Docker Compose configurations: + - [Example Docker Compose Configuration from Loki Repository](https://github.com/grafana/loki/blob/main/production/docker-compose.yaml) + - [Promtail Configuration Example](https://github.com/black-rosary/loki-nginx/blob/master/promtail/promtail.yml) (Adapt it as needed) + +4. Testing: + - Verify that the configured logging stack and your application work as expected. + +## Task 2: Documentation and Reporting + +**4 Points:** + +1. Logging Stack Report: + - Create a new file named `LOGGING.md` to document how the logging stack you've set up functions. + - Provide detailed explanations of each component's role within the stack. + +2. Screenshots: + - Capture screenshots that demonstrate the successful operation of your logging stack. + - Include these screenshots in your `LOGGING.md` report for reference. + +## Bonus Task: Additional Configuration + +**2.5 Points:** + +1. Integrating Your Extra App: + - Extend the `docker-compose.yml` configuration to include your additional application. + +2. Configure Stack for Comprehensive Logging: + - Modify the logging stack's configuration to collect logs from all containers defined in the `docker-compose.yml`. + - Include screenshots in your `LOGGING.md` report to demonstrate your success. + +### Guidelines + +- Ensure that your documentation in `LOGGING.md` is well-structured and comprehensible. +- Follow proper naming conventions for files and folders. +- Use code blocks and Markdown formatting where appropriate. +- Create pull requests (PRs) as needed: from your fork to the main branch of this repository, and from your fork's branch to your fork's master branch. + +> Note: Thoroughly document your work, and ensure the logging stack functions correctly. Utilize the bonus points opportunity to enhance your understanding and the completeness of your setup. diff --git a/lab8.md b/lab8.md new file mode 100644 index 0000000000..8eb0752ec7 --- /dev/null +++ b/lab8.md @@ -0,0 +1,71 @@ +# Lab 8: Monitoring with Prometheus + +## Overview + +In this lab, you will become acquainted with Prometheus, set it up, and configure applications to collect metrics. + +## Task 1: Prometheus Setup + +**6 Points:** + +1. Learn About Prometheus: + - Begin by reading about Prometheus and its fundamental concepts: + - [Prometheus Overview](https://prometheus.io/docs/introduction/overview/) + - [Prometheus Naming Best Practices](https://prometheus.io/docs/practices/naming/) + +2. Integration with Docker Compose: + - Expand your existing `docker-compose.yml` file from the previous lab to include Prometheus. + +3. Prometheus Configuration: + - Configure Prometheus to collect metrics from both Loki and Prometheus containers. + +4. Verify Prometheus Targets: + - Access `http://localhost:9090/targets` to ensure that Prometheus is correctly scraping metrics. + - Capture screenshots that confirm the successful setup and place them in a file named `METRICS.md` within the monitoring folder. + +## Task 2: Dashboard and Configuration Enhancements + +**4 Points:** + +1. Grafana Dashboards: + - Set up dashboards in Grafana for both Loki and Prometheus. + - You can use examples as references: + - [Example Dashboard for Loki](https://grafana.com/grafana/dashboards/13407) + - [Example Dashboard for Prometheus](https://grafana.com/grafana/dashboards/3662) + - Capture screenshots displaying your successful dashboard configurations and include them in `METRICS.md`. + +2. Service Configuration Updates: + - Enhance the configuration of all services in the `docker-compose.yml` file: + - Add log rotation mechanisms. + - Specify memory limits for containers. + - Ensure these changes are documented within your `METRICS.md` file. + +3. Metrics Gathering: + - Extend Prometheus to gather metrics from all services defined in the `docker-compose.yml` file. + +## Bonus Task: Metrics and Health Checks + +**To Earn 2.5 Additional Points:** + +1. Application Metrics: + - Integrate metrics into your applications. You can refer to Python examples like: + - [Monitoring a Synchronous Python Web Application](https://dzone.com/articles/monitoring-your-synchronous-python-web-application) + - [Metrics Monitoring in Python](https://opensource.com/article/18/4/metrics-monitoring-and-python) + +2. Obtain Application Metrics: + - Configure your applications to export metrics. + +3. METRICS.md Update: + - Document your progress with the bonus tasks, including screenshots, in the `METRICS.md` file. + +4. Health Checks: + - Further enhance the `docker-compose.yml` file's service configurations by adding health checks for the containers. + +### Guidelines + +- Maintain a well-structured and comprehensible `METRICS.md` document. +- Adhere to file and folder naming conventions. +- Utilize code blocks and Markdown formatting where appropriate. +- Create pull requests (PRs) as needed: from your fork to the main branch of this repository, and from your fork's branch to your fork's master branch. + +> Note: Ensure thorough documentation of your work, and guarantee that Prometheus correctly collects metrics. Take advantage of the bonus tasks to deepen your understanding and enhance the completeness of your setup. diff --git a/lab9.md b/lab9.md new file mode 100644 index 0000000000..5493f042a6 --- /dev/null +++ b/lab9.md @@ -0,0 +1,76 @@ +# Lab 9: Introduction to Kubernetes + +## Overview + +In this lab, you will explore Kubernetes, set up a local development environment, and create manifests for your application. + +## Task 1: Kubernetes Setup and Basic Deployment + +**6 Points:** + +1. Learn About Kubernetes: + - Begin by studying the fundamentals of Kubernetes: + - [What is Kubernetes](https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/) + - [Kubernetes Components](https://kubernetes.io/docs/concepts/overview/components/) + +2. Install Kubernetes Tools: + - Install `kubectl` and `minikube`, essential tools for managing Kubernetes. + - [Kubernetes Tools](https://kubernetes.io/docs/tasks/tools/) + +3. Deploy Your Application: + - Deploy your application within the Minikube cluster using the `kubectl create` command. Create a `Deployment` resource for your app. + - [Example of Creating a Deployment](https://kubernetes.io/docs/tutorials/hello-minikube/#create-a-deployment) + - [Deployment Overview](https://kubernetes.io/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro/) + +4. Access Your Application: + - Make your application accessible from outside the Kubernetes virtual network. Achieve this by creating a `Service` resource. + - [Example of Creating a Service](https://kubernetes.io/docs/tutorials/hello-minikube/#create-a-service) + - [Service Overview](https://kubernetes.io/docs/tutorials/kubernetes-basics/expose/expose-intro/) + +5. Create a Kubernetes Folder: + - Establish a `k8s` folder within your repository. + - Create a `README.md` report within this folder and include the output of the `kubectl get pods,svc` command. + +6. Cleanup: + - Remove the `Deployment` and `Service` resources that you created, maintaining a tidy Kubernetes environment. + +## Task 2: Declarative Kubernetes Manifests + +**4 Points:** + +1. Manifest Files for Your Application: + - As a more efficient and structured approach, employ configuration files to deploy your application. + - Create a `deployment.yml` manifest file that describes your app's deployment, specifying at least 3 replicas. + - [Kubernetes Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) + - [Declarative Management of Kubernetes Objects Using Configuration Files](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/declarative-config/) + +2. Service Manifest: + - Develop a `service.yml` manifest file for your application. + +3. Manifest Files in `k8s` Folder: + - Store these manifest files in the `k8s` folder of your repository. + - Additionally, provide the output of the `kubectl get pods,svc` command in the `README.md` report. + - Include the output of the `minikube service --all` command and the result from your browser, with a screenshot demonstrating that the IP matches the output of `minikube service --all`. + +## Bonus Task: Additional Configuration and Ingress + +**To Earn 2.5 Additional Points:** + +1. Manifests for Extra App: + - Create `deployment` and `service` manifests for an additional application. + +2. Ingress Manifests: + - Construct [Ingress manifests](https://kubernetes.io/docs/tasks/access-application-cluster/ingress-minikube/) for your applications. + +3. Application Availability Check: + - Utilize `curl` or a similar tool to verify the availability of your applications. Include the output in the report. + +**Guidelines:** + +- Maintain a clear and well-structured `README.md` document. +- Ensure that all required components are included. +- Adhere to file and folder naming conventions. +- Create and participate in PRs to facilitate the peer review process. +- Create pull requests (PRs) as needed: from your fork to the main branch of this repository, and from your fork's branch to your fork's master branch. + +> Note: Detailed documentation is crucial to ensure that your Kubernetes deployment is fully functional and accessible. Engage with the bonus tasks to further enhance your understanding and application deployment skills. diff --git a/monitoring/LOGGING.md b/monitoring/LOGGING.md new file mode 100644 index 0000000000..3d7d7e21f7 --- /dev/null +++ b/monitoring/LOGGING.md @@ -0,0 +1,39 @@ +# Logging Stack Report + +## Components + +1. **Promtail** : + + * **Purpose** : Promtail is a log shipper used to tail log files and send them to Loki for storage and analysis. + * **Functionality** : It collects logs from various sources, such as log files or Docker container output, and labels them with metadata before forwarding them to Loki. + * **Interaction** : Promtail interacts with Loki by sending log entries over HTTP using the Loki Push API. + +2. **Loki** : + + * **Purpose** : Loki is a horizontally scalable, highly available log aggregation system designed for cloud-native environments. + * **Functionality** : It stores logs in a manner optimized for efficient querying and retrieval, using labels for indexing and compression for storage. + * **Interaction** : Loki accepts log entries from Promtail and other clients via HTTP requests, indexes them based on labels, and stores them in object storage or a similar backend. + +3. **Grafana** : + + * **Purpose** : Grafana is a visualization and analytics platform used to visualize logs stored in Loki and create dashboards for log exploration. + * **Functionality** : It provides a user-friendly interface for querying and visualizing log data, enabling users to create custom dashboards and alerts. + * **Interaction** : Grafana connects to Loki as a data source, allowing users to query log data and display it in various formats, such as tables, graphs, and histograms. + +## results + +### web_app + +![app_python_monitoring](./pics/app_python_monitoring.png) + +### grafana + +![grafana](./pics/grafana.png) + +### loki + +![loki](./pics/loki.png) + +### promtail + +![promtail](./pics/promtail.png) diff --git a/monitoring/METRICS.md b/monitoring/METRICS.md new file mode 100644 index 0000000000..370e644d07 --- /dev/null +++ b/monitoring/METRICS.md @@ -0,0 +1,53 @@ +# Task1 + +![prometheus_setup](./pics/prometheus.png) + +# Task2 + +## Loki dashboard + +![loki_dashboard](./pics/lokiDashboard.png) + +## Prometheus dashboard + +![prometheus_dashboard](./pics/prometheusDashboard.png) + +## Log rotation mechanisms and memory limit + +```docker-compose +logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" +deploy: + resources: + limits: + memory: 128M +``` + +For logging, I've specified that logs should be stored in JSON format using the "json-file" driver. This ensures that logs are structured and easier to parse for analysis. Additionally, I've set up log rotation, which means that when a log file reaches a size of 10 megabytes, it will be rotated. I've also configured log rotation to retain a maximum of 3 log files (the oldest will be removed if there are more than 3) + +Regarding deployment, I've set a memory limit for each container to 128 megabytes. This restricts the amount of memory that each container can use, helping to prevent one container from consuming too many resources. + +## Gatering information from all services + +![prometheus](./pics/prometheusSetupForTask2.png) + +# Bonus Task + +## metrics integration + +![metrics_integration](./pics/app_python_metrics.png) +I've integrated metrics with the app_python. I used prometheus client library. The image above shows the response of `localhost:5000/metrics` where the app is deployed. + +## health checks +```docker-compose +healthcheck: + test: [ "CMD", "curl", "-f", "app_python:5000/health"] + interval: 30s + timeout: 30s + retries: 3 +``` +I've add `/health` endpoint to the python app. +![health_checks](./pics/healthChecks.png) diff --git a/monitoring/docker-compose.yml b/monitoring/docker-compose.yml new file mode 100644 index 0000000000..390e5b8c93 --- /dev/null +++ b/monitoring/docker-compose.yml @@ -0,0 +1,140 @@ +version: '3.8' + +networks: + monitoring: + name: "monitoring" + +services: + app_python: + image: vectorsmaster/flask-app:latest + container_name: app_python + ports: + - 5000:5000 + labels: + logging: "promtail" + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + deploy: + resources: + limits: + memory: 256M + networks: + - monitoring + healthcheck: + test: ["CMD", "curl", "-f", "app_python:5000/health"] + interval: 30s + timeout: 10s + retries: 3 + + + grafana: + image: grafana/grafana:latest + container_name: grafana + ports: + - 3000:3000 + volumes: + - ./grafana/grafana.yml:/etc/grafana/provisioning/datasources/datasources.yml + environment: + - GF_AUTH_ANONYMOUS_ENABLED=false + - GF_AUTH_DISABLE_LOGIN_FORM=false + labels: + logging: "promtail" + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + deploy: + resources: + limits: + memory: 128M + networks: + - monitoring + healthcheck: + test: ["CMD", "curl", "-f", "grafana:3000/api/health"] + interval: 30s + timeout: 15s + retries: 3 + + loki: + image: grafana/loki:latest + container_name: loki + ports: + - 3100:3100 + command: -config.file=/etc/loki/local-config.yaml + labels: + logging: "promtail" + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + deploy: + resources: + limits: + memory: 128M + networks: + - monitoring + healthcheck: + test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "loki:3100/ready"] + interval: 30s + timeout: 15s + retries: 3 + + promtail: + image: grafana/promtail:latest + container_name: promtail + ports: + - 9080:9080 + volumes: + - ./promtail/promtail.yml:/etc/promtail/promtail.yml + - /var/lib/docker/containers:/var/lib/docker/containers:ro + - /var/run/docker.sock:/var/run/docker.sock + command: -config.file=/etc/promtail/promtail.yml + labels: + logging: "promtail" + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + deploy: + resources: + limits: + memory: 128M + networks: + - monitoring + healthcheck: + test: [ "CMD-SHELL", "bash -c ':> /dev/tcp/localhost/9080'" ] + interval: 30s + timeout: 15s + retries: 3 + depends_on: + - loki + + prometheus: + image: prom/prometheus:latest + container_name: prometheus + volumes: + - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml + ports: + - "9090:9090" + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + deploy: + resources: + limits: + memory: 128M + networks: + - monitoring + healthcheck: + test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "prometheus:9090/status"] + interval: 30s + timeout: 15s + retries: 3 \ No newline at end of file diff --git a/monitoring/grafana/grafana.yml b/monitoring/grafana/grafana.yml new file mode 100644 index 0000000000..0fa3bb022d --- /dev/null +++ b/monitoring/grafana/grafana.yml @@ -0,0 +1,17 @@ +apiVersion: 1 + +datasources: + - name: Loki + type: loki + access: proxy + url: http://loki:3100 + version: 1 + editable: false + isDefault: true + + - name: Prometheus + type: prometheus + access: proxy + url: http://prometheus:9090 + editable: false + isDefault: false \ No newline at end of file diff --git a/monitoring/pics/app_python_metrics.png b/monitoring/pics/app_python_metrics.png new file mode 100644 index 0000000000..c04b771d03 Binary files /dev/null and b/monitoring/pics/app_python_metrics.png differ diff --git a/monitoring/pics/app_python_monitoring.png b/monitoring/pics/app_python_monitoring.png new file mode 100644 index 0000000000..e5ede31507 Binary files /dev/null and b/monitoring/pics/app_python_monitoring.png differ diff --git a/monitoring/pics/grafana.png b/monitoring/pics/grafana.png new file mode 100644 index 0000000000..95c87bf3de Binary files /dev/null and b/monitoring/pics/grafana.png differ diff --git a/monitoring/pics/healthChecks.png b/monitoring/pics/healthChecks.png new file mode 100644 index 0000000000..02f03521e9 Binary files /dev/null and b/monitoring/pics/healthChecks.png differ diff --git a/monitoring/pics/logging_rotation&limits.png b/monitoring/pics/logging_rotation&limits.png new file mode 100644 index 0000000000..abc5033486 Binary files /dev/null and b/monitoring/pics/logging_rotation&limits.png differ diff --git a/monitoring/pics/loki.png b/monitoring/pics/loki.png new file mode 100644 index 0000000000..8335d3e06d Binary files /dev/null and b/monitoring/pics/loki.png differ diff --git a/monitoring/pics/lokiDashboard.png b/monitoring/pics/lokiDashboard.png new file mode 100644 index 0000000000..3b381e74ab Binary files /dev/null and b/monitoring/pics/lokiDashboard.png differ diff --git a/monitoring/pics/prometheus.png b/monitoring/pics/prometheus.png new file mode 100644 index 0000000000..b403b68d98 Binary files /dev/null and b/monitoring/pics/prometheus.png differ diff --git a/monitoring/pics/prometheusDashboard.png b/monitoring/pics/prometheusDashboard.png new file mode 100644 index 0000000000..82303f2b02 Binary files /dev/null and b/monitoring/pics/prometheusDashboard.png differ diff --git a/monitoring/pics/prometheusSetupForTask2.png b/monitoring/pics/prometheusSetupForTask2.png new file mode 100644 index 0000000000..ed0a2b6d85 Binary files /dev/null and b/monitoring/pics/prometheusSetupForTask2.png differ diff --git a/monitoring/pics/promtail.png b/monitoring/pics/promtail.png new file mode 100644 index 0000000000..19a4c2b355 Binary files /dev/null and b/monitoring/pics/promtail.png differ diff --git a/monitoring/prometheus/prometheus.yml b/monitoring/prometheus/prometheus.yml new file mode 100644 index 0000000000..665ef4907c --- /dev/null +++ b/monitoring/prometheus/prometheus.yml @@ -0,0 +1,23 @@ +global: + scrape_interval: 15s + +scrape_configs: + - job_name: 'loki' + static_configs: + - targets: ['loki:3100'] + + - job_name: 'prometheus' + static_configs: + - targets: ['prometheus:9090'] + + - job_name: 'app_python' + static_configs: + - targets: ['app_python:5000'] + + - job_name: 'grafana' + static_configs: + - targets: ['grafana:3000'] + + - job_name: 'promtail' + static_configs: + - targets: ['promtail:9080'] diff --git a/monitoring/promtail/promtail.yml b/monitoring/promtail/promtail.yml new file mode 100644 index 0000000000..bbe065c10d --- /dev/null +++ b/monitoring/promtail/promtail.yml @@ -0,0 +1,23 @@ +server: + http_listen_port: 9080 + grpc_listen_port: 0 + +positions: + filename: /tmp/positions.yaml + +clients: + - url: http://loki:3100/loki/api/v1/push + +scrape_configs: + - job_name: flog_scrape + docker_sd_configs: + - host: unix:///var/run/docker.sock + refresh_interval: 5s + filters: + - name: label + values: ["logging=promtail"] + relabel_configs: + - source_labels: ['__meta_docker_container_name'] + regex: '/(.*)' + target_label: 'container' + \ No newline at end of file diff --git a/terraform/TF.md b/terraform/TF.md new file mode 100644 index 0000000000..c7f69f33ff --- /dev/null +++ b/terraform/TF.md @@ -0,0 +1,330 @@ +## Docker +1. **`terraform state list`** + ``` + docker_container.nginx + docker_image.nginx + ``` + +2. **`terraform state show docker_container.nginx`** + ``` + resource "docker_container" "nginx" { + attach = false + command = [ + "nginx", + "-g", + "daemon off;", + ] + container_read_refresh_timeout_milliseconds = 15000 + cpu_shares = 0 + entrypoint = [ + "/docker-entrypoint.sh", + ] + env = [] + hostname = "2c3401053cc5" + id = "2c3401053cc5a9f9dfcaf3d2c07c187b0bbe5c84c070881872ee936959bec312" + image = "sha256:e4720093a3c1381245b53a5a51b417963b3c4472d3f47fc301930a4f3b17666a" + init = false + ipc_mode = "private" + log_driver = "json-file" + logs = false + max_retry_count = 0 + memory = 0 + memory_swap = 0 + must_run = true + name = "tutorial" + network_data = [ + { + gateway = "172.17.0.1" + global_ipv6_address = "" + global_ipv6_prefix_length = 0 + ip_address = "172.17.0.2" + ip_prefix_length = 16 + ipv6_gateway = "" + mac_address = "02:42:ac:11:00:02" + network_name = "bridge" + }, + ] + network_mode = "default" + privileged = false + publish_all_ports = false + read_only = false + remove_volumes = true + restart = "no" + rm = false + runtime = "runc" + security_opts = [] + shm_size = 64 + start = true + stdin_open = false + stop_signal = "SIGQUIT" + stop_timeout = 0 + tty = false + wait = false + wait_timeout = 60 + + ports { + external = 8000 + internal = 80 + ip = "0.0.0.0" + protocol = "tcp" + } + } + ``` + +3. **`terraform state show docker_image.nginx`** + ``` + resource "docker_image" "nginx" { + id = "sha256:e4720093a3c1381245b53a5a51b417963b3c4472d3f47fc301930a4f3b17666anginx:latest" + image_id = "sha256:e4720093a3c1381245b53a5a51b417963b3c4472d3f47fc301930a4f3b17666a" + keep_locally = false + name = "nginx:latest" + repo_digest = "nginx@sha256:c26ae7472d624ba1fafd296e73cecc4f93f853088e6a9c13c0d52f6ca5865107" + } + ``` + +4. **`terraform output`** + ``` + container_id = "3154af8b3156a3c55149041e6b001a63a530131c81e7c8b88d64e7a3087d5ee3" + image_id = "sha256:e4720093a3c1381245b53a5a51b417963b3c4472d3f47fc301930a4f3b17666anginx:latest" + ``` + +## Yandex + +1. **`terraform state list`** + ``` + yandex_compute_disk.boot-disk-1 + yandex_compute_disk.boot-disk-2 + yandex_compute_instance.vm-1 + yandex_compute_instance.vm-2 + yandex_vpc_network.network-1 + yandex_vpc_subnet.subnet-1 + ``` + +2. **`terraform state show yandex_compute_disk.boot-disk-1`** + ``` + # yandex_compute_disk.boot-disk-1: + resource "yandex_compute_disk" "boot-disk-1" { + block_size = 4096 + created_at = "2024-03-04T13:33:16Z" + folder_id = "b1gdfa5g164ijsjslt2f" + id = "epdfujo4qo2oc9a2g8q5" + image_id = "fd8adntm80abl0lh2pa8" + name = "boot-disk-1" + product_ids = [ + "f2emovtn3j6rb7e1vfg5", + ] + size = 20 + status = "ready" + type = "network-hdd" + zone = "ru-central1-b" + + disk_placement_policy {} + } + ``` + +3. **`terraform state show yandex_compute_disk.boot-disk-2`** + ``` + # yandex_compute_disk.boot-disk-2: + resource "yandex_compute_disk" "boot-disk-2" { + block_size = 4096 + created_at = "2024-03-04T13:33:16Z" + folder_id = "b1gdfa5g164ijsjslt2f" + id = "epdvmrp6b4jaj963m5m6" + image_id = "fd8adntm80abl0lh2pa8" + name = "boot-disk-2" + product_ids = [ + "f2emovtn3j6rb7e1vfg5", + ] + size = 20 + status = "ready" + type = "network-hdd" + zone = "ru-central1-b" + + disk_placement_policy {} + } + ``` + +4. **`terraform state show yandex_compute_instance.vm-1`** + ``` + # yandex_compute_instance.vm-1: + resource "yandex_compute_instance" "vm-1" { + created_at = "2024-03-04T13:33:26Z" + folder_id = "b1gdfa5g164ijsjslt2f" + fqdn = "epdtvbocd08ep105lmit.auto.internal" + id = "epdtvbocd08ep105lmit" + metadata = { + "ssh-keys" = <<-EOT + ubuntu:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICrrApeAaCsWZVLgYst1TqSeIHs63hWVgkD4jv7+wjPT sarhan@sarhan-HP + EOT + } + name = "terraform1" + network_acceleration_type = "standard" + platform_id = "standard-v1" + status = "running" + zone = "ru-central1-b" + + boot_disk { + auto_delete = true + device_name = "epdfujo4qo2oc9a2g8q5" + disk_id = "epdfujo4qo2oc9a2g8q5" + mode = "READ_WRITE" + + initialize_params { + block_size = 4096 + image_id = "fd8adntm80abl0lh2pa8" + name = "boot-disk-1" + size = 20 + type = "network-hdd" + } + } + + metadata_options { + aws_v1_http_endpoint = 1 + aws_v1_http_token = 2 + gce_http_endpoint = 1 + gce_http_token = 1 + } + + network_interface { + index = 0 + ip_address = "192.168.10.21" + ipv4 = true + ipv6 = false + mac_address = "d0:0d:1d:fa:f0:c6" + nat = true + nat_ip_address = "84.201.140.230" + nat_ip_version = "IPV4" + security_group_ids = [] + subnet_id = "e2l9h9iskuprghf4574k" + } + + placement_policy { + host_affinity_rules = [] + placement_group_partition = 0 + } + + resources { + core_fraction = 100 + cores = 2 + gpus = 0 + memory = 2 + } + + scheduling_policy { + preemptible = false + } + } + ``` + +5. **`terraform state show yandex_compute_instance.vm-2`** + ``` + # yandex_compute_instance.vm-2: + resource "yandex_compute_instance" "vm-2" { + created_at = "2024-03-04T13:33:25Z" + folder_id = "b1gdfa5g164ijsjslt2f" + fqdn = "epdjseb162u4badp1vj4.auto.internal" + id = "epdjseb162u4badp1vj4" + metadata = { + "ssh-keys" = <<-EOT + ubuntu:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICrrApeAaCsWZVLgYst1TqSeIHs63hWVgkD4jv7+wjPT sarhan@sarhan-HP + EOT + } + name = "terraform2" + network_acceleration_type = "standard" + platform_id = "standard-v1" + status = "running" + zone = "ru-central1-b" + + boot_disk { + auto_delete = true + device_name = "epdvmrp6b4jaj963m5m6" + disk_id = "epdvmrp6b4jaj963m5m6" + mode = "READ_WRITE" + + initialize_params { + block_size = 4096 + image_id = "fd8adntm80abl0lh2pa8" + name = "boot-disk-2" + size = 20 + type = "network-hdd" + } + } + + metadata_options { + aws_v1_http_endpoint = 1 + aws_v1_http_token = 2 + gce_http_endpoint = 1 + gce_http_token = 1 + } + + network_interface { + index = 0 + ip_address = "192.168.10.15" + ipv4 = true + ipv6 = false + mac_address = "d0:0d:13:e3:96:13" + nat = true + nat_ip_address = "84.201.153.200" + nat_ip_version = "IPV4" + security_group_ids = [] + subnet_id = "e2l9h9iskuprghf4574k" + } + + placement_policy { + host_affinity_rules = [] + placement_group_partition = 0 + } + + resources { + core_fraction = 100 + cores = 2 + gpus = 0 + memory = 2 + } + + scheduling_policy { + preemptible = false + } + } + ``` + +7. **`terraform state show yandex_vpc_network.network-1`** + ``` + # yandex_vpc_network.network-1: + resource "yandex_vpc_network" "network-1" { + created_at = "2024-03-04T13:28:10Z" + default_security_group_id = "enpah66ruo6b9ggmi62b" + folder_id = "b1gdfa5g164ijsjslt2f" + id = "enp9o7dfbur6qatnoo3k" + labels = {} + name = "network1" + subnet_ids = [ + "e9bvpeq4k5bf5718ffd2", + ] + } + ``` +8. **`terraform state show yandex_vpc_subnet.subnet-1`** + ``` + # yandex_vpc_subnet.subnet-1: + resource "yandex_vpc_subnet" "subnet-1" { + created_at = "2024-03-04T13:32:58Z" + folder_id = "b1gdfa5g164ijsjslt2f" + id = "e2l9h9iskuprghf4574k" + labels = {} + name = "subnet1" + network_id = "enp9o7dfbur6qatnoo3k" + v4_cidr_blocks = [ + "192.168.10.0/24", + ] + v6_cidr_blocks = [] + zone = "ru-central1-b" + } + ``` + +9. **`terraform output`** + ``` + external_ip_address_vm_1 = "84.201.140.230" + external_ip_address_vm_2 = "84.201.153.200" + internal_ip_address_vm_1 = "192.168.10.21" + internal_ip_address_vm_2 = "192.168.10.15" + ``` \ No newline at end of file diff --git a/terraform/docker/main.tf b/terraform/docker/main.tf new file mode 100644 index 0000000000..2846afed19 --- /dev/null +++ b/terraform/docker/main.tf @@ -0,0 +1,25 @@ +terraform { + required_providers { + docker = { + source = "kreuzwerker/docker" + version = "~> 3.0.1" + } + } +} + +provider "docker" {} + +resource "docker_image" "nginx" { + name = "nginx:latest" + keep_locally = false +} + +resource "docker_container" "nginx" { + image = docker_image.nginx.image_id + name = var.container_name + ports { + internal = 80 + external = 8080 + } +} + diff --git a/terraform/docker/outputs.tf b/terraform/docker/outputs.tf new file mode 100644 index 0000000000..96eb698a95 --- /dev/null +++ b/terraform/docker/outputs.tf @@ -0,0 +1,9 @@ +output "container_id" { + description = "ID of the Docker container" + value = docker_container.nginx.id +} + +output "image_id" { + description = "ID of the Docker image" + value = docker_image.nginx.id +} diff --git a/terraform/docker/variable.tf b/terraform/docker/variable.tf new file mode 100644 index 0000000000..a1e71ccf41 --- /dev/null +++ b/terraform/docker/variable.tf @@ -0,0 +1,5 @@ +variable "container_name" { + description = "Value of the name for the Docker container" + type = string + default = "ExampleNginxContainer" +} diff --git a/terraform/github/main.tf b/terraform/github/main.tf new file mode 100644 index 0000000000..47f5c6cbd5 --- /dev/null +++ b/terraform/github/main.tf @@ -0,0 +1,43 @@ +### Main.tf ### + +terraform { + required_version = "~> 1.7.4" + required_providers { + github = { + source = "integrations/github" + version = "~> 4.0" + } + } +} + +provider "github" { + token = var.token # or `GITHUB_TOKEN` +} + +#Create and initialise a public GitHub Repository with MIT license and a Visual Studio .gitignore file (incl. issues and wiki) +resource "github_repository" "repo" { + name = "Devops" + description = "Lab4" + visibility = "public" + has_issues = true + has_wiki = true + auto_init = true +} + +#Set default branch 'master' +resource "github_branch_default" "master" { + repository = github_repository.repo.name + branch = "main" +} + +#Create branch protection rule to protect the default branch. (Use "github_branch_protection_v3" resource for Organisation rules) +resource "github_branch_protection" "default" { + repository_id = github_repository.repo.id + pattern = github_branch_default.master.branch + require_conversation_resolution = true + enforce_admins = true + + required_pull_request_reviews { + required_approving_review_count = 1 + } +} diff --git a/terraform/github/variable.tf b/terraform/github/variable.tf new file mode 100644 index 0000000000..b04e46c7c3 --- /dev/null +++ b/terraform/github/variable.tf @@ -0,0 +1,7 @@ + ### Variables.tf ### + +variable "token" { + type = string + description = "Specifies the GitHub PAT token or `GITHUB_TOKEN`" + sensitive = true +} diff --git a/terraform/yandex/main.tf b/terraform/yandex/main.tf new file mode 100644 index 0000000000..77e1aa4dd5 --- /dev/null +++ b/terraform/yandex/main.tf @@ -0,0 +1,85 @@ +terraform { + required_providers { + yandex = { + source = "yandex-cloud/yandex" + } + } + required_version = ">= 0.13" +} + +provider "yandex" { + zone = var.time_zone +} + +resource "yandex_compute_disk" "boot-disk-1" { + name = "boot-disk-1" + type = "network-hdd" + zone = var.time_zone + size = "20" + image_id = "fd8anitv6eua45627i0e" +} + +resource "yandex_compute_disk" "boot-disk-2" { + name = "boot-disk-2" + type = "network-hdd" + zone = var.time_zone + size = "20" + image_id = "fd8anitv6eua45627i0e" +} + +resource "yandex_compute_instance" "vm-1" { + name = "terraform1" + + resources { + cores = 2 + memory = 2 + } + + boot_disk { + disk_id = yandex_compute_disk.boot-disk-1.id + } + + network_interface { + subnet_id = yandex_vpc_subnet.subnet-1.id + nat = true + } + + metadata = { + ssh-keys = "ubuntu:${file("~/.ssh/id_ed25519.pub")}" + } +} + +resource "yandex_compute_instance" "vm-2" { + name = "terraform2" + + resources { + cores = 2 + memory = 2 + } + + boot_disk { + disk_id = yandex_compute_disk.boot-disk-2.id + } + + network_interface { + subnet_id = yandex_vpc_subnet.subnet-1.id + nat = true + } + + metadata = { + ssh-keys = "ubuntu:${file("~/.ssh/id_ed25519.pub")}" + } +} + +resource "yandex_vpc_network" "network-1" { + name = "network1" +} + +resource "yandex_vpc_subnet" "subnet-1" { + name = "subnet1" + zone = var.time_zone + network_id = yandex_vpc_network.network-1.id + v4_cidr_blocks = ["192.168.10.0/24"] +} + + diff --git a/terraform/yandex/outputs.tf b/terraform/yandex/outputs.tf new file mode 100644 index 0000000000..f73340783e --- /dev/null +++ b/terraform/yandex/outputs.tf @@ -0,0 +1,15 @@ +output "internal_ip_address_vm_1" { + value = yandex_compute_instance.vm-1.network_interface.0.ip_address +} + +output "internal_ip_address_vm_2" { + value = yandex_compute_instance.vm-2.network_interface.0.ip_address +} + +output "external_ip_address_vm_1" { + value = yandex_compute_instance.vm-1.network_interface.0.nat_ip_address +} + +output "external_ip_address_vm_2" { + value = yandex_compute_instance.vm-2.network_interface.0.nat_ip_address +} \ No newline at end of file diff --git a/terraform/yandex/variable.tf b/terraform/yandex/variable.tf new file mode 100644 index 0000000000..6323ccc367 --- /dev/null +++ b/terraform/yandex/variable.tf @@ -0,0 +1,5 @@ +variable "time_zone" { + description = "Yandex timezone" + type = string + default = "ru-central1-b" +} \ No newline at end of file