diff --git a/.github/workflows/app_python.yml b/.github/workflows/app_python.yml new file mode 100644 index 0000000000..768d8a4cd3 --- /dev/null +++ b/.github/workflows/app_python.yml @@ -0,0 +1,127 @@ +name: CI for app_python + +on: + push: + paths: + - 'app_python/**' + - '.github/workflows/app_python.yml' + pull_request: + paths: + - 'app_python/**' + - '.github/workflows/app_python.yml' + +defaults: + run: + working-directory: app_python + +jobs: + lint_and_format: + timeout-minutes: 2 + runs-on: ubuntu-22.04 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.13' + cache: 'pip' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install pylint + + - name: Run code formatter (black) + uses: psf/black@stable + with: + options: "--check --diff" + src: "./app_python" + + - name: Run linter (pylint) + run: pylint app.py --disable=R,C + + test: + timeout-minutes: 2 + runs-on: ubuntu-22.04 + needs: lint_and_format + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.13' + cache: 'pip' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install pytest requests + + - name: Run tests + run: pytest test_app.py + + security_scan: + timeout-minutes: 5 + runs-on: ubuntu-22.04 + needs: [lint_and_format, test] + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run Snyk to check for vulnerabilities + uses: snyk/actions/python-3.10@master + with: + args: --skip-unresolved app_python/ + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + + + docker_build_and_push: + timeout-minutes: 10 + runs-on: ubuntu-22.04 + needs: [lint_and_format, test, security_scan] + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Get short commit hash + id: commit + run: echo "SHORT_COMMIT_HASH=$(echo ${GITHUB_SHA} | cut -c1-7)" >> $GITHUB_ENV + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + platforms: linux/amd64,linux/arm64,linux/arm/v7 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + registry: ghcr.io + + - name: Log in to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + registry: docker.io + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: "{{defaultContext}}:app_python" + push: true + tags: | + ghcr.io/${{ github.repository_owner }}/moscow-time:${{ env.SHORT_COMMIT_HASH }} + ${{ secrets.DOCKERHUB_USERNAME }}/moscow-time:${{ env.SHORT_COMMIT_HASH }} + platforms: linux/amd64,linux/arm64,linux/arm/v7 + cache-from: type=gha,type=registry,ref=ghcr.io/${{ github.repository_owner }}/moscow-time:buildcache + cache-to: type=gha,mode=max,type=registry,ref=ghcr.io/${{ github.repository_owner }}/moscow-time:buildcache,mode=max diff --git a/.github/workflows/app_ruby.yml b/.github/workflows/app_ruby.yml new file mode 100644 index 0000000000..afe3ccdb92 --- /dev/null +++ b/.github/workflows/app_ruby.yml @@ -0,0 +1,119 @@ +name: CI for app_ruby + +on: + push: + paths: + - 'app_ruby/**' + - '.github/workflows/app_ruby.yml' + pull_request: + paths: + - 'app_ruby/**' + - '.github/workflows/app_ruby.yml' + +defaults: + run: + working-directory: app_ruby + +jobs: + lint_and_format: + timeout-minutes: 2 + runs-on: ubuntu-22.04 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: '3.4' + bundler-cache: true + + - name: Install dependencies + run: | + gem install bundler + bundle install + + - name: Run RuboCop + run: bundle exec rubocop -A + + test: + timeout-minutes: 2 + runs-on: ubuntu-22.04 + needs: lint_and_format + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: '3.4' + bundler-cache: true + + - name: Install dependencies + run: | + gem install bundler + bundle install + + - name: Run tests + run: bundle exec rspec + + security_scan: + timeout-minutes: 5 + runs-on: ubuntu-22.04 + needs: [lint_and_format, test] + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run Snyk to check for vulnerabilities + uses: snyk/actions/ruby@master + continue-on-error: true + with: + args: --skip-unresolved app_ruby/ + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + + docker_build_and_push: + timeout-minutes: 10 + runs-on: ubuntu-22.04 + needs: [lint_and_format, test, security_scan] + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Get short commit hash + id: commit + run: echo "SHORT_COMMIT_HASH=$(echo ${GITHUB_SHA} | cut -c1-7)" >> $GITHUB_ENV + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + platforms: linux/amd64,linux/arm64,linux/arm/v7 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + registry: ghcr.io + + - name: Log in to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + registry: docker.io + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: "{{defaultContext}}:app_ruby" + push: true + tags: | + ghcr.io/${{ github.repository_owner }}/omsk-time:${{ env.SHORT_COMMIT_HASH }} + ${{ secrets.DOCKERHUB_USERNAME }}/omsk-time:${{ env.SHORT_COMMIT_HASH }} + platforms: linux/amd64,linux/arm64,linux/arm/v7 + cache-from: type=gha,type=registry,ref=ghcr.io/${{ github.repository_owner }}/omsk-time:buildcache + cache-to: type=gha,mode=max,type=registry,ref=ghcr.io/${{ github.repository_owner }}/omsk-time:buildcache,mode=max diff --git a/ansible/.gitignore b/ansible/.gitignore new file mode 100644 index 0000000000..cab52b5c29 --- /dev/null +++ b/ansible/.gitignore @@ -0,0 +1,4 @@ +fact_cache/ +__pycache__/ +.ansible/ +yacloud_token diff --git a/ansible/ANSIBLE.md b/ansible/ANSIBLE.md new file mode 100644 index 0000000000..15ea0201d8 --- /dev/null +++ b/ansible/ANSIBLE.md @@ -0,0 +1,252 @@ +# Ansible + +I used *Yandex Cloud Compute VM* that I created with `Terraform` as targets to run my playbooks. + +## Best practices + +- Properly structured Ansible project +- Use Dynamic Inventory for Cloud Environments +- Write Idempotent Playbooks +- Use Handlers for Service Restarts +- Write Reusable Roles +- Use `fact_caching` +- Test Playbooks Before Running on Production + - Use `ansible-lint` to check for best practices. + - Use `--check` mode to preview changes (`ansible-playbook main.yml --check`) +- Use `loop` instead of duplicating tasks + +## Execute playbook to deploy the Docker role + +```bash +ebob@laptop ansible % ansible-playbook playbooks/dev/main.yml -i inventory/yacloud_compute.yml --diff --check + +PLAY [Install and configure Docker] ********************************************************************************** + +TASK [Gathering Facts] *********************************************************************************************** +ok: [vm-1] + +TASK [docker : Install Docker] *************************************************************************************** +included: /Users/ebob/Code/devops-labs/ansible/roles/docker/tasks/install_docker.yml for vm-1 + +TASK [docker : Update apt package index] ***************************************************************************** +changed: [vm-1] + +TASK [docker : Install required system packages] ********************************************************************* +ok: [vm-1] => (item=apt-transport-https) +ok: [vm-1] => (item=ca-certificates) +ok: [vm-1] => (item=curl) +ok: [vm-1] => (item=gnupg-agent) +ok: [vm-1] => (item=software-properties-common) + +TASK [docker : Add Docker's official GPG key] ************************************************************************ +ok: [vm-1] + +TASK [docker : Add Docker's official apt repository] ***************************************************************** +ok: [vm-1] + +TASK [docker : Install Docker and dependencies] ********************************************************************** +ok: [vm-1] + +TASK [docker : Add user to docker group] ***************************************************************************** +ok: [vm-1] + +TASK [docker : Enable Docker service to start on boot] *************************************************************** +ok: [vm-1] + +TASK [docker : Install Docker Compose] ******************************************************************************* +included: /Users/ebob/Code/devops-labs/ansible/roles/docker/tasks/install_docker_compose.yml for vm-1 + +TASK [docker : Install Docker Compose] ******************************************************************************* +ok: [vm-1] + +PLAY RECAP *********************************************************************************************************** +vm-1 : ok=11 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + +WARNING: All log messages before absl::InitializeLog() is called are written to STDERR +E0000 00:00:1739210586.456952 1547116 init.cc:232] grpc_wait_for_shutdown_with_timeout() timed out. +``` + +## Inventory Details + +### `ansible-inventory -i .yaml --list` + +```bash +venvebob@laptop ansible % ansible-inventory -i inventory/yacloud_compute.yml --list | tail -n 50 + + }, + "ansible_user_gecos": { + "__ansible_unsafe": "Ubuntu" + }, + "ansible_user_gid": 1000, + "ansible_user_id": { + "__ansible_unsafe": "ubuntu" + }, + "ansible_user_shell": { + "__ansible_unsafe": "/bin/bash" + }, + "ansible_user_uid": 1000, + "ansible_userspace_architecture": { + "__ansible_unsafe": "x86_64" + }, + "ansible_userspace_bits": { + "__ansible_unsafe": "64" + }, + "ansible_virtualization_role": { + "__ansible_unsafe": "NA" + }, + "ansible_virtualization_tech_guest": [], + "ansible_virtualization_tech_host": [], + "ansible_virtualization_type": { + "__ansible_unsafe": "NA" + }, + "discovered_interpreter_python": { + "__ansible_unsafe": "/usr/bin/python3.12" + }, + "gather_subset": [ + { + "__ansible_unsafe": "all" + } + ], + "module_setup": true + } + } + }, + "all": { + "children": [ + "ungrouped", + "yacloud" + ] + }, + "yacloud": { + "hosts": [ + "vm-1" + ] + } +} +``` + +### `ansible-inventory -i .yaml --graph` + +```bash +ebob@laptop ansible % ansible-inventory -i inventory/yacloud_compute.yml --graph +@all: + |--@ungrouped: + |--@yacloud: + | |--vm-1 +``` + +## Application Deployment + +### Deploy `app_python` + +`ansible-playbook playbooks/dev/app_python/main.yml -i inventory/yacloud_compute.yml` + +```shell +ebob@laptop ansible % ansible-playbook playbooks/dev/app_python/main.yml -i inventory/yacloud_compute.yml + +PLAY [Deploy app_python] ********************************************************************************************************************************************** + +TASK [Gathering Facts] ************************************************************************************************************************************************ +ok: [vm-1] + +TASK [docker : Install Docker] **************************************************************************************************************************************** +included: /Users/ebob/Code/devops-labs/ansible/roles/docker/tasks/install_docker.yml for vm-1 + +TASK [docker : Update apt package index] ****************************************************************************************************************************** +changed: [vm-1] + +TASK [docker : Install required system packages] ********************************************************************************************************************** +ok: [vm-1] => (item=apt-transport-https) +ok: [vm-1] => (item=ca-certificates) +ok: [vm-1] => (item=curl) +ok: [vm-1] => (item=gnupg-agent) +ok: [vm-1] => (item=software-properties-common) + +TASK [docker : Add Docker's official GPG key] ************************************************************************************************************************* +ok: [vm-1] + +TASK [docker : Add Docker's official apt repository] ****************************************************************************************************************** +ok: [vm-1] + +TASK [docker : Install Docker and dependencies] *********************************************************************************************************************** +ok: [vm-1] + +TASK [docker : Add user to docker group] ****************************************************************************************************************************** +ok: [vm-1] + +TASK [docker : Configure Docker security settings] ******************************************************************************************************************** +ok: [vm-1] + +TASK [docker : Enable Docker service to start on boot] **************************************************************************************************************** +ok: [vm-1] + +TASK [docker : Install Docker Compose] ******************************************************************************************************************************** +included: /Users/ebob/Code/devops-labs/ansible/roles/docker/tasks/install_docker_compose.yml for vm-1 + +TASK [docker : Install Docker Compose] ******************************************************************************************************************************** +ok: [vm-1] + +TASK [web_app : Full wipe] ******************************************************************************************************************************************** +included: /Users/ebob/Code/devops-labs/ansible/roles/web_app/tasks/0-wipe.yml for vm-1 + +TASK [web_app : Ensure web_app_dir exists] **************************************************************************************************************************** +changed: [vm-1] + +TASK [web_app : Check if docker-compose.yml exists] ******************************************************************************************************************* +ok: [vm-1] + +TASK [web_app : Wipe images] ****************************************************************************************************************************************** +skipping: [vm-1] + +TASK [web_app : Remove app directory] ********************************************************************************************************************************* +changed: [vm-1] + +TASK [web_app : Deploy dockerized app] ******************************************************************************************************************************** +included: /Users/ebob/Code/devops-labs/ansible/roles/web_app/tasks/1-deploy.yml for vm-1 + +TASK [web_app : Create app directory] ********************************************************************************************************************************* +changed: [vm-1] + +TASK [web_app : Copy Docker Compose template] ************************************************************************************************************************* +changed: [vm-1] + +TASK [web_app : Ensure docker service is OK] ************************************************************************************************************************** +ok: [vm-1] + +TASK [web_app : Create and start the services] ************************************************************************************************************************ +changed: [vm-1] + +PLAY RECAP ************************************************************************************************************************************************************ +vm-1 : ok=21 changed=6 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 +``` + +### Wipe `app_python` + +`ansible-playbook playbooks/dev/app_python/main.yml -i inventory/yacloud_compute.yml --tags=wipe` + +```shell +ebob@laptop ansible % ansible-playbook playbooks/dev/app_python/main.yml -i inventory/yacloud_compute.yml --tags=wipe + +PLAY [Deploy app_python] ********************************************************************************************************************************************** + +TASK [Gathering Facts] ************************************************************************************************************************************************ +ok: [vm-1] + +TASK [web_app : Full wipe] ******************************************************************************************************************************************** +included: /Users/ebob/Code/devops-labs/ansible/roles/web_app/tasks/0-wipe.yml for vm-1 + +TASK [web_app : Ensure web_app_dir exists] **************************************************************************************************************************** +ok: [vm-1] + +TASK [web_app : Check if docker-compose.yml exists] ******************************************************************************************************************* +ok: [vm-1] + +TASK [web_app : Wipe images] ****************************************************************************************************************************************** +changed: [vm-1] + +TASK [web_app : Remove app directory] ********************************************************************************************************************************* +changed: [vm-1] + +PLAY RECAP ************************************************************************************************************************************************************ +vm-1 : ok=6 changed=2 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +``` diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000000..65b71a1522 --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,13 @@ +[defaults] +inventory = inventory +inventory_plugins = ./inventory_plugins +remote_user = ubuntu +playbook_dir = ./playbooks/ +roles_path = ./roles/ +host_key_checking = False +fact_caching = jsonfile +fact_caching_connection = ./fact_cache +fact_caching_timeout = 86400 + +[inventory] +enable_plugins = yacloud_compute diff --git a/ansible/inventory/yacloud_compute.yml b/ansible/inventory/yacloud_compute.yml new file mode 100644 index 0000000000..5d718a65bb --- /dev/null +++ b/ansible/inventory/yacloud_compute.yml @@ -0,0 +1,4 @@ +plugin: yacloud_compute +yacloud_clouds: cloud-ebob +yacloud_folders: default +yacloud_token_file: ./inventory/yacloud_token diff --git a/ansible/inventory_plugins/yacloud_compute.py b/ansible/inventory_plugins/yacloud_compute.py new file mode 100644 index 0000000000..fb105c61b6 --- /dev/null +++ b/ansible/inventory_plugins/yacloud_compute.py @@ -0,0 +1,171 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ + name: yacloud_compute + plugin_type: inventory + short_description: Yandex.Cloud compute inventory source + requirements: + - yandexcloud + extends_documentation_fragment: + - inventory_cache + - constructed + description: + - Get inventory hosts from Yandex Cloud + - Uses a YAML configuration file that ends with C(yacloud_compute.(yml|yaml)). + options: + plugin: + description: Token that ensures this is a source file for the plugin. + required: True + choices: ['yacloud_compute'] + yacloud_token: + description: Oauth token for yacloud connection + yacloud_token_file: + description: File with oauth token for yacloud connection + yacloud_clouds: + description: Names of clouds to get hosts from + type: list + default: [] + yacloud_folders: + description: Names of folders to get hosts from + type: list + default: [] + yacloud_group_label: + description: VM's label used for group assignment + type: string + default: "" +""" + +EXAMPLES = """ +""" + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_native +from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, Constructable +from ansible.utils.display import Display + +try: + import yandexcloud + from google.protobuf.json_format import MessageToDict + from yandex.cloud.compute.v1.instance_service_pb2 import ListInstancesRequest + from yandex.cloud.compute.v1.instance_service_pb2_grpc import InstanceServiceStub + from yandex.cloud.resourcemanager.v1.cloud_service_pb2 import ListCloudsRequest + from yandex.cloud.resourcemanager.v1.cloud_service_pb2_grpc import CloudServiceStub + from yandex.cloud.resourcemanager.v1.folder_service_pb2 import ListFoldersRequest + from yandex.cloud.resourcemanager.v1.folder_service_pb2_grpc import ( + FolderServiceStub, + ) +except ImportError: + raise AnsibleError("The yacloud dynamic inventory plugin requires yandexcloud") + +display = Display() + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + NAME = "yacloud_compute" + + def verify_file(self, path): + if super(InventoryModule, self).verify_file(path): + if path.endswith(("yacloud_compute.yml", "yacloud_compute.yaml")): + return True + display.debug( + "yacloud_compute inventory filename must end with 'yacloud_compute.yml' or 'yacloud_compute.yaml'" + ) + return False + + def _get_ip_for_instance(self, instance): + interfaces = instance["networkInterfaces"] + for interface in interfaces: + address = interface["primaryV4Address"] + if address: + if address.get("oneToOneNat"): + return address["oneToOneNat"]["address"] + else: + return address["address"] + return None + + def _get_clouds(self): + all_clouds = MessageToDict(self.cloud_service.List(ListCloudsRequest()))[ + "clouds" + ] + if self.get_option("yacloud_clouds"): + all_clouds[:] = [ + x for x in all_clouds if x["name"] in self.get_option("yacloud_clouds") + ] + self.clouds = all_clouds + + def _get_folders(self): + all_folders = [] + for cloud in self.clouds: + all_folders += MessageToDict( + self.folder_service.List(ListFoldersRequest(cloud_id=cloud["id"])) + )["folders"] + + if self.get_option("yacloud_folders"): + all_folders[:] = [ + x + for x in all_folders + if x["name"] in self.get_option("yacloud_folders") + ] + + self.folders = all_folders + + def _get_all_hosts(self): + self.hosts = [] + for folder in self.folders: + hosts = self.instance_service.List( + ListInstancesRequest(folder_id=folder["id"]) + ) + dict_ = MessageToDict(hosts) + + if dict_: + self.hosts += dict_["instances"] + + def _init_client(self): + file = self.get_option("yacloud_token_file") + if file is not None: + token = open(file).read().strip() + else: + token = self.get_option("yacloud_token") + if not token: + raise AnsibleError( + "token it empty. provide either `yacloud_token_file` or `yacloud_token`" + ) + sdk = yandexcloud.SDK(token=token) + + self.instance_service = sdk.client(InstanceServiceStub) + self.folder_service = sdk.client(FolderServiceStub) + self.cloud_service = sdk.client(CloudServiceStub) + + def _process_hosts(self): + group_label = str(self.get_option("yacloud_group_label")) + + for instance in self.hosts: + if group_label and group_label in instance["labels"]: + group = instance["labels"][group_label] + else: + group = "yacloud" + + self.inventory.add_group(group=group) + if instance["status"] == "RUNNING": + ip = self._get_ip_for_instance(instance) + if ip: + self.inventory.add_host(instance["name"], group=group) + self.inventory.set_variable( + instance["name"], "ansible_host", to_native(ip) + ) + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + + self._read_config_data(path) + self._init_client() + + self._get_clouds() + self._get_folders() + + self._get_all_hosts() + self._process_hosts() diff --git a/ansible/playbooks/dev/app_python/main.yml b/ansible/playbooks/dev/app_python/main.yml new file mode 100644 index 0000000000..5891fb352c --- /dev/null +++ b/ansible/playbooks/dev/app_python/main.yml @@ -0,0 +1,10 @@ +- name: Deploy app_python + hosts: all + become: true + roles: + - web_app + vars: + web_app_name: moscow-time + web_app_internal_port: 8080 + web_app_external_port: 8080 + web_app_full_wipe: true diff --git a/ansible/playbooks/dev/app_ruby/main.yml b/ansible/playbooks/dev/app_ruby/main.yml new file mode 100644 index 0000000000..4c7d4ee35d --- /dev/null +++ b/ansible/playbooks/dev/app_ruby/main.yml @@ -0,0 +1,10 @@ +- name: Deploy app_ruby + hosts: all + become: true + roles: + - web_app + vars: + web_app_name: omsk-time + web_app_internal_port: 4567 + web_app_external_port: 8081 + web_app_full_wipe: true diff --git a/ansible/roles/docker/README.md b/ansible/roles/docker/README.md new file mode 100644 index 0000000000..8392a19749 --- /dev/null +++ b/ansible/roles/docker/README.md @@ -0,0 +1,18 @@ +# Docker Role + +This role installs and configures Docker and Docker Compose. + +## Requirements + +- Ansible 2.18+ +- Ubuntu 24.04 LTS + +## Usage + +```yaml +- name: Install and configure Docker + hosts: all + roles: + - role: docker + become: true +``` diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml new file mode 100644 index 0000000000..ed97d539c0 --- /dev/null +++ b/ansible/roles/docker/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml new file mode 100644 index 0000000000..a8a0bf67c7 --- /dev/null +++ b/ansible/roles/docker/handlers/main.yml @@ -0,0 +1,4 @@ +- name: Restart Docker + ansible.builtin.service: + name: docker + state: restarted diff --git a/ansible/roles/docker/tasks/install_docker.yml b/ansible/roles/docker/tasks/install_docker.yml new file mode 100644 index 0000000000..fd8e831fff --- /dev/null +++ b/ansible/roles/docker/tasks/install_docker.yml @@ -0,0 +1,59 @@ +- name: Update apt package index + ansible.builtin.apt: + update_cache: true + +- name: Install required system packages + ansible.builtin.apt: + name: "{{ item }}" + state: present + loop: + - apt-transport-https + - ca-certificates + - curl + - gnupg-agent + - software-properties-common + +- name: Add Docker's official GPG key + ansible.builtin.apt_key: + url: https://download.docker.com/linux/ubuntu/gpg + state: present + +- name: Add Docker's official apt repository + ansible.builtin.apt_repository: + repo: deb https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable + state: present + +- name: Install Docker and dependencies + ansible.builtin.apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + state: present + update_cache: true + +- name: Add user to docker group + ansible.builtin.user: + name: "{{ ansible_user }}" + groups: docker + append: true + when: ansible_user != 'root' + notify: Restart Docker + +- name: Configure Docker security settings + ansible.builtin.copy: + dest: /etc/docker/daemon.json + content: | + { + "userns-remap": "default" + } + owner: root + group: root + mode: '0644' + notify: Restart Docker + +- name: Enable Docker service to start on boot + ansible.builtin.systemd: + name: docker + enabled: true + state: started diff --git a/ansible/roles/docker/tasks/install_docker_compose.yml b/ansible/roles/docker/tasks/install_docker_compose.yml new file mode 100644 index 0000000000..066e1b70e9 --- /dev/null +++ b/ansible/roles/docker/tasks/install_docker_compose.yml @@ -0,0 +1,4 @@ +- name: Install Docker Compose + ansible.builtin.package: + name: docker-compose + state: present diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml new file mode 100644 index 0000000000..545706f87f --- /dev/null +++ b/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,5 @@ +- name: Install Docker + ansible.builtin.include_tasks: install_docker.yml + +- name: Install Docker Compose + ansible.builtin.include_tasks: install_docker_compose.yml diff --git a/ansible/roles/web_app/README.md b/ansible/roles/web_app/README.md new file mode 100644 index 0000000000..182c0f6119 --- /dev/null +++ b/ansible/roles/web_app/README.md @@ -0,0 +1,50 @@ +# Docker Role + +This role deploys application in Docker container using docker compose + +## Requirements + +- Ansible 2.18+ +- Ubuntu 24.04 LTS +- docker role (`../docker/`) + +## Variables + +| Variable Name | Description | Example | +|------------------------|-------------------------------------------------------------------------------------------------------|------------------------| +| web_app_name | The name of the web application. | "web_app" | +| web_app_dir | The directory where the web application is installed, using the value of web_app_name. | "/opt/{{ web_app_name }}/ " | +| web_app_docker_registry | The Docker registry where the web application image is hosted. | "docker.io" | +| web_app_docker_username | The username for accessing the Docker registry. | "ebob" | +| web_app_full_wipe | Determines whether a full wipe of the web application is required. | false | +| web_app_image | The full name of the web application image, including the registry, username, and application name. | "{{ web_app_docker_registry }}/{{ web_app_docker_username }}/{{ web_app_name }}" | +| web_app_image_tag | The tag for the web application image. | "latest" | +| web_app_internal_port | The internal port on which the web application operates within the container. | 80 | +| web_app_external_port | The external port on which the web application is accessible outside the container. | 8080 | + +This table provides a clear and organized documentation for each variable, including their descriptions and examples. + +## Tags + +We support tags for wipe only and deploy only, just add them at the end of `ansible-playbook` command: + +```bash +--tags=wipe + +--tags=deploy +``` + +## Usage + +```yaml +- name: Deploy web_app + hosts: all + become: true + roles: + - web_app + vars: + web_app_name: web_app + web_app_internal_port: 8080 + web_app_external_port: 8080 + web_app_full_wipe: true +``` diff --git a/ansible/roles/web_app/defaults/main.yml b/ansible/roles/web_app/defaults/main.yml new file mode 100644 index 0000000000..f186ef4d0d --- /dev/null +++ b/ansible/roles/web_app/defaults/main.yml @@ -0,0 +1,11 @@ +web_app_name: "web_app" +web_app_dir: "/opt/{{ web_app_name }}/" + +web_app_docker_registry: docker.io +web_app_docker_username: ebob + +web_app_full_wipe: false +web_app_image: "{{ web_app_docker_registry }}/{{ web_app_docker_username }}/{{ web_app_name }}" +web_app_image_tag: "v1.0" +web_app_internal_port: 8080 +web_app_external_port: 8080 diff --git a/ansible/roles/web_app/handlers/main.yml b/ansible/roles/web_app/handlers/main.yml new file mode 100644 index 0000000000..ed97d539c0 --- /dev/null +++ b/ansible/roles/web_app/handlers/main.yml @@ -0,0 +1 @@ +--- diff --git a/ansible/roles/web_app/meta/main.yml b/ansible/roles/web_app/meta/main.yml new file mode 100644 index 0000000000..6ad37f8159 --- /dev/null +++ b/ansible/roles/web_app/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - role: docker diff --git a/ansible/roles/web_app/tasks/0-wipe.yml b/ansible/roles/web_app/tasks/0-wipe.yml new file mode 100644 index 0000000000..aa63489446 --- /dev/null +++ b/ansible/roles/web_app/tasks/0-wipe.yml @@ -0,0 +1,25 @@ +- name: Ensure web_app_dir exists + ansible.builtin.file: + path: "{{ web_app_dir }}" + state: directory + mode: '0755' + when: web_app_full_wipe + +- name: Check if docker-compose.yml exists + ansible.builtin.stat: + path: "{{ web_app_dir }}/docker-compose.yml" + register: compose_file + +- name: Wipe images + community.docker.docker_compose_v2: + project_src: "{{ web_app_dir }}" + remove_orphans: true + remove_volumes: true + remove_images: all + state: absent + when: compose_file.stat.exists + +- name: Remove app directory + ansible.builtin.file: + path: "{{ web_app_dir }}" + state: absent diff --git a/ansible/roles/web_app/tasks/1-deploy.yml b/ansible/roles/web_app/tasks/1-deploy.yml new file mode 100644 index 0000000000..23dede38a4 --- /dev/null +++ b/ansible/roles/web_app/tasks/1-deploy.yml @@ -0,0 +1,28 @@ +- name: Setup configuration files + block: + - name: Create app directory + ansible.builtin.file: + path: "{{ web_app_dir }}" + state: directory + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: "0755" + + - name: Copy Docker Compose template + ansible.builtin.template: + src: docker-compose.yml.j2 + dest: "{{ web_app_dir }}/docker-compose.yml" + mode: "0644" + +- name: Start dockerized app + block: + - name: Ensure docker service is OK + ansible.builtin.service: + name: docker + enabled: true + state: started + - name: Create and start the services + community.docker.docker_compose_v2: + project_src: "{{ web_app_dir }}" + remove_orphans: true + state: present diff --git a/ansible/roles/web_app/tasks/main.yml b/ansible/roles/web_app/tasks/main.yml new file mode 100644 index 0000000000..8aa0af5364 --- /dev/null +++ b/ansible/roles/web_app/tasks/main.yml @@ -0,0 +1,14 @@ +- name: Full wipe + when: web_app_full_wipe is defined and web_app_full_wipe + tags: wipe + ansible.builtin.include_tasks: + file: "0-wipe.yml" + apply: + tags: wipe + +- name: Deploy dockerized app + tags: deploy + ansible.builtin.include_tasks: + file: "1-deploy.yml" + apply: + tags: deploy diff --git a/ansible/roles/web_app/templates/docker-compose.yml.j2 b/ansible/roles/web_app/templates/docker-compose.yml.j2 new file mode 100644 index 0000000000..b4124f1262 --- /dev/null +++ b/ansible/roles/web_app/templates/docker-compose.yml.j2 @@ -0,0 +1,6 @@ +services: + app: + image: "{{ web_app_image }}:{{ web_app_image_tag }}" + ports: + - target: "{{ web_app_internal_port }}" + published: "{{ web_app_external_port }}" diff --git a/app_python/.dockerignore b/app_python/.dockerignore new file mode 100644 index 0000000000..93403c3ebc --- /dev/null +++ b/app_python/.dockerignore @@ -0,0 +1,33 @@ +# Ignore Python bytecode and cache files +__pycache__/ +*.mypy_cache/ +*.pytest_cache/ +*.pyc +*.pyo + +# Ignore virtual environments +venv/ +env/ + +# Ignore editor-specific files +*.idea/ +*.vscode/ +*.DS_Store + +# Ignore documentation files +*.md + +# Ignore Docker-related files +Dockerfile +.dockerignore + +# Ignore testing files +test_app.py + +# Ignore git related files +.git/ +.gitignore +.gitattributes +.gitmodules +.gitkeep +.pre-commit-config.yaml diff --git a/app_python/.gitignore b/app_python/.gitignore new file mode 100644 index 0000000000..b2e1518ce2 --- /dev/null +++ b/app_python/.gitignore @@ -0,0 +1,7 @@ +# Cache +__pycache__/ +.mypy_cache/ +.pytest_cache/ + +# Virtual Environment +venv/ diff --git a/app_python/.pre-commit-config.yaml b/app_python/.pre-commit-config.yaml new file mode 100644 index 0000000000..2b18280562 --- /dev/null +++ b/app_python/.pre-commit-config.yaml @@ -0,0 +1,49 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-added-large-files + + + - repo: https://github.com/psf/black + rev: 24.10.0 + hooks: + - id: black + + - repo: https://github.com/PyCQA/flake8 + rev: 7.1.1 + hooks: + - id: flake8 + args: ["--max-line-length=88"] + + - repo: https://github.com/pre-commit/mirrors-isort + rev: v5.10.1 + hooks: + - id: isort + + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.14.1 + hooks: + - id: mypy + additional_dependencies: + - bottle==0.13.2 + - pytest==8.3.4 + - requests==2.32.3 + - "types-requests" + + - repo: https://github.com/pycqa/bandit + rev: 1.8.2 + hooks: + - id: bandit + args: ["--severity-level", "high"] + + - repo: https://github.com/PyCQA/pylint + rev: v3.3.3 + hooks: + - id: pylint + additional_dependencies: + - bottle==0.13.2 + - pytest==8.3.4 + - requests==2.32.3 diff --git a/app_python/CI.md b/app_python/CI.md new file mode 100644 index 0000000000..22ebb6527e --- /dev/null +++ b/app_python/CI.md @@ -0,0 +1,30 @@ +# CI Workflow for `app_python` + +[![CI for app_python](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_python.yml/badge.svg?branch=lab3)](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_python.yml) + +This project uses a CI workflow to automatically lint, test, check security, and build/push Docker images whenever there are changes to the application. Below are the key steps that the workflow performs: + +## 1. **Code Quality Check** + +- **Linting**: The code is checked using **Pylint** to ensure it follows best practices. +- **Code Formatting**: **Black** is used to automatically check that the code is properly formatted. + +## 2. **Testing** + +- **Pytest** is used to run tests to verify that the code behaves as expected. + +## 3. **Security Scan** + +- **Snyk** is used to scan the dependencies for known vulnerabilities and security risks. + +## 4. **Docker Build & Push** + +- The application is built into a Docker image using **Docker Buildx**. +- The image is then pushed to **GitHub Container Registry** and **DockerHub**. + +## Key Features of This CI Workflow + +- **Caching**: We cache dependencies and Docker layers to speed up the workflow. +- **Security**: Credentials (like tokens and passwords) are stored securely using GitHub Secrets. +- **Fast Feedback**: The workflow runs fast by stopping early on errors and continuing with other checks. +- **Docker Integration**: The app is automatically built into a Docker image and pushed to registries. diff --git a/app_python/DOCKER.md b/app_python/DOCKER.md new file mode 100644 index 0000000000..d9adc1322f --- /dev/null +++ b/app_python/DOCKER.md @@ -0,0 +1,42 @@ +# Docker best practices + +## **Use of an Official Base Image** + +I used lightweight Alpine-based official Python image `python:3.13.1-alpine3.21`, which significantly reduces the image size with pinned version tag to ensure consistency across builds and avoids unexpected issues due to updates in the base image. + +## **Non-Root User** + +I create user with limited permissions and use it in the container. This mitigates the risk of privilege escalation in case of an exploit. + +## **Optimized build process with layer caching** + +This layer will only be rebuilt if `requirements.txt` changes, leveraging Docker’s layer caching and reducing build times. + +## **.dockerignore** + +I keep the `.dockerignore` file clean and avoid using the `COPY . .` in the Dockerfile, to make sure that only the necessary files will be added to the image. + +## **Use Haskell Dockerfile Linter** + +[Hadolint](https://github.com/hadolint/hadolint) is a smarter Dockerfile linter that helps to build best practice Docker images. + +## **Docker Scout** + +I analyze image with Docker Scout to find out possible vulnerabilities and fix them. + +Docker Scout + +## **DockerHub** + +Image is available on [DockerHub](https://hub.docker.com/repository/docker/ebob/moscow-time/tags/v1.0/sha256-963767cb63ad8759727d0507f84fa4891bffe760742a9509bd899a49a7873757) + +## **Distroless Image** + +Additionaly, I build distroless image. I didn't create an additional user because I used a container with a `nonroot` tag. Distroless image appeared to be 20 MB larger than the original one. I think this is because we don't compile the python program into a binary file, so reducing the size is not an advantage. But the distroless container is a very good solution in terms of security: it does not contain shell and other utilities that reduce attack surface. + +Here is image size comparison: + +![image](https://github.com/user-attachments/assets/37cbc610-a7a2-4da1-bcab-34a81515347b) + + +I upload it on [DockerHub](https://hub.docker.com/repository/docker/ebob/moscow-time/tags/v1.0-distroless/sha256-cee4db447ea129aca4c6a05e045e3de5758d01343a68345abbdd93b6affae59d) too diff --git a/app_python/Dockerfile b/app_python/Dockerfile new file mode 100644 index 0000000000..224d20cc8e --- /dev/null +++ b/app_python/Dockerfile @@ -0,0 +1,29 @@ +# Use an official Python image as the base +FROM python:3.13.1-alpine3.21 + +# Set environment variables to make the image more secure +ENV PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 + +# Create non-root user +RUN addgroup -S appgroup && \ + adduser -S -G appgroup appuser + +# Set the working directory in the container +WORKDIR /app + +# Copy the requirements file and install dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy the rest of the application code into the container +COPY app.py . + +# Switch to non-root user +USER appuser + +# Expose the port that the app will run on +EXPOSE 8080 + +# Command to run the app using Gunicorn +CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:8080", "app:app", "--access-logfile", "-", "--error-logfile", "-"] diff --git a/app_python/PYTHON.md b/app_python/PYTHON.md new file mode 100644 index 0000000000..fcc82266d3 --- /dev/null +++ b/app_python/PYTHON.md @@ -0,0 +1,63 @@ +# Python Web Application + +## Framework choice + +To develop this web application, I chose the [**bottle**](https://bottlepy.org/docs/dev/) web framework for Python. Bottle is a lightweight Python framework, making it ideal for small, straightforward applications like this one. + +## Best practicies + +I implemented following Python best practices: + +* Follow PEP 8 Code Style + +* Documenting code with Docstrings and Comments + +* Using pre-commit hooks with following tools: + + * [`black`](https://github.com/psf/black) code formatter + + * [`flake8`](https://github.com/PyCQA/flake8) code checker + + * [`isort`](https://github.com/PyCQA/isort) library sorter + + * [`mypy`](https://github.com/python/mypy) static type checker + + * [`bandit`](https://github.com/PyCQA/bandit) security analyzer + + * [`pylint`](https://github.com/pylint-dev/pylint) static code analyser + +* Maintain a clean `.gitignore` file + +* Manage Dependencies with `requirements.txt` + +* Using [gunicorn](https://gunicorn.org/) WSGI server instead of bottle's default one + +## Testing code + +For automatic code testing I use [`pytest`](https://docs.pytest.org/) framework. + +Here are a few code testing best practices I've learned and applied: + +1. Use a Clear Testing Strategy + * Apply **unit tests** for individual functions or components. + * Use **integration tests** to verify how different parts interact. + * Implement **end-to-end tests** to ensure the whole application behaves correctly. + +2. Isolate Unit Tests + * Unit tests should not depend on external systems (e.g., databases, APIs, file systems). + * Mock external dependencies where needed. + +3. Test Edge Cases + * Validate boundary conditions (e.g., leap years). + +### How to run tests? + +I use `pytest 8.3.4`, so first of all we need to install it: + +* ``` pip install pytest==8.3.4 ``` + +Also for testing we need `requests` + +* ``` pip install requests==2.32.3 ``` + +* ``` pytest test_app.py ``` diff --git a/app_python/README.md b/app_python/README.md new file mode 100644 index 0000000000..179315fa1e --- /dev/null +++ b/app_python/README.md @@ -0,0 +1,153 @@ +# Moscow Time Web Application + +[![CI for app_python](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_python.yml/badge.svg?branch=lab3)](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_python.yml) + +## Overview + +This is a web app that shows current date and time in Moscow. It is written in Python with Bottle framework. + +- Endpoint `/visits` displays the recorded visits. +- Endpoint `/metrics` displays Prometheus metrics. + +## Requirements + +* Python 3.12 + +## Installation + +Clone this repository: + +```bash +git clone https://github.com/cuprum-acid/devops-labs.git -b lab1 +``` + +Open directory: + +```bash +cd devops-labs/app_python +``` + +Install virtual environment and dependencies: + +```bash +python -m venv venv +``` + +```bash +source venv/bin/activate # Linux/Mac +``` + +```bash +venv\Scripts\activate # Windows +``` + +```bash +pip install -r requirements.txt +``` + +Run the app: + +```bash +python app.py +``` + +Now it is available on `localhost:8080` in browser. Or you can run in terminal: + +```bash +curl localhost:8080 +``` + +## Run tests + +If you want to run automatic tests, then you need to install additional packages: + +```bash +pip install pytest==8.3.4 +``` + +```bash +pip install requests==2.32.3 +``` + +They were not included in `requirements.txt` because they are not required to run application + +After that run: + +```bash +pytest test_app.py +``` + +## Docker + +### Build + +```bash +cd devops-labs/app_python +``` + +```bash +docker build -t ebob/moscow-time:v1.2 . +``` + +### Build for multi-arch + +```bash +docker buildx create --use +``` + +```bash +docker buildx build \ + --push \ + --platform linux/arm/v7,linux/arm64/v8,linux/amd64 \ + --tag ebob/moscow-time:v1.2 \ + . +``` + +### Pull and Run + +```bash +docker pull ebob/moscow-time:v1.2 +``` + +```bash +docker run -d --name msk -p 8080:8080 -v $(pwd)/data:/tmp ebob/moscow-time:v1.2 +``` + +Now it is available on `localhost:8080` + +## Docker Compose + +```bash +docker compose up --build +``` + +## Distroless Docker Image + +### Build + +```bash +docker build -t ebob/moscow-time:v1.0-distroless -f distroless.Dockerfile . +``` + +### Pull and Run + +```bash +docker pull ebob/moscow-time:v1.0-distroless +``` + +```bash +docker run -d --name msk-distroless -p 8081:8080 ebob/moscow-time:v1.0-distroless +``` + +Now it is available on `localhost:8081` + +## Continuous Integration + +This repository contains a CI pipeline configuration for the python application. The CI pipeline is managed with `GitHub Actions` and includes multiple jobs to ensure the code quality, functionality, security, and successful deployment of the application. + +The pipeline consists of these main jobs: + +1. Lint and Format: Ensures the code follows linting and formatting standards. +2. Test: Runs tests to verify the correctness of the application. +3. Security Scan: Checks for security vulnerabilities in the codebase using `Snyk` tool. +4. Docker Build and Push: Builds and pushes a Docker image to the DockerHub and ghcr. diff --git a/app_python/app.py b/app_python/app.py new file mode 100644 index 0000000000..4089f9fe4e --- /dev/null +++ b/app_python/app.py @@ -0,0 +1,77 @@ +"""Bottle web application that shows the current time and date in Moscow. +Author: Evgeny B. +""" + +import os +from datetime import datetime, timedelta, timezone +from bottle import Bottle, response, run +from prometheus_client import CONTENT_TYPE_LATEST, Counter, generate_latest + +# Create a Bottle app instance +app = Bottle() +m_requests = Counter("http_requests_total", "Total HTTP Requests") + +# Define the MSK timezone (UTC+3) +MSK_TIMEZONE = timezone(timedelta(hours=3)) + +VISITS_FILE = "/tmp/visits" + + +def get_visits(): + """Read the visits count from file.""" + if not os.path.exists(VISITS_FILE): + return 0 + with open(VISITS_FILE, "r", encoding="utf-8") as visits_file: + try: + return int(visits_file.read().strip()) + except ValueError: + return 0 + + +def update_visits(): + """Increment the visits count and save to file.""" + os.makedirs(os.path.dirname(VISITS_FILE), exist_ok=True) + visit_count = get_visits() + 1 + with open(VISITS_FILE, "w", encoding="utf-8") as visits_file: + visits_file.write(str(visit_count)) + return visit_count + + +@app.route("/metrics") +def metrics(): + """Expose Prometheus metrics.""" + response.content_type = CONTENT_TYPE_LATEST + return generate_latest() + + +@app.route("/") +def show_time(): + """Show the current time and date in Moscow.""" + m_requests.inc() + visit_count = update_visits() + # Get the current time in Moscow + now = datetime.now(MSK_TIMEZONE) + formatted_time = now.strftime("%H:%M:%S") + formatted_date = now.strftime("%d.%m.%Y") + + # Set the response content type to HTML + response.content_type = "text/html; charset=utf-8" + return ( + f"

Current time and date in Moscow

" + f"

Time: {formatted_time}

" + f"

Date: {formatted_date}

" + f"

Visits: {visit_count}

" + ) + + +@app.route("/visits") +def visits_page(): + """Display the number of visits.""" + response.content_type = "text/plain; charset=utf-8" + return f"Visits: {get_visits()}\n" + + +# Run the Bottle app +if __name__ == "__main__": + # Run the Bottle app on the server + run(app, host="0.0.0.0", port=8080, debug=False, reloader=False) diff --git a/app_python/data/visits b/app_python/data/visits new file mode 100644 index 0000000000..e69de29bb2 diff --git a/app_python/distroless.Dockerfile b/app_python/distroless.Dockerfile new file mode 100644 index 0000000000..e0147de79a --- /dev/null +++ b/app_python/distroless.Dockerfile @@ -0,0 +1,31 @@ +# Use an official Python image as the base for building the app +FROM python:3.13.1-alpine3.21 AS builder + +# Set environment variables to make the image more secure +ENV PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 + +# Set the working directory in the container +WORKDIR /app + +# Copy the requirements file and install dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt && cp "$(which gunicorn)" /app + +# Copy the rest of the application code into the container +COPY app.py . + +# Use Distroless as the runtime image +FROM gcr.io/distroless/python3-debian12:nonroot AS runtime + +# Copy application files from builder +WORKDIR /app +COPY --from=builder /app /app +COPY --from=builder /usr/local/lib/python3.13/site-packages /usr/local/lib/python3.11/site-packages +ENV PYTHONPATH=/usr/local/lib/python3.11/site-packages + +# Expose the port that the app will run on +EXPOSE 8080 + +# Command to run the app using Gunicorn +CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:8080", "app:app"] diff --git a/app_python/docker-compose.yml b/app_python/docker-compose.yml new file mode 100644 index 0000000000..d6e1767729 --- /dev/null +++ b/app_python/docker-compose.yml @@ -0,0 +1,7 @@ +services: + app: + build: . + ports: + - "8080:8080" + volumes: + - ./data:/tmp diff --git a/app_python/requirements.txt b/app_python/requirements.txt new file mode 100644 index 0000000000..c6614da4cc --- /dev/null +++ b/app_python/requirements.txt @@ -0,0 +1,3 @@ +bottle==0.13.2 +gunicorn==23.0.0 +prometheus-client==0.21.1 diff --git a/app_python/test_app.py b/app_python/test_app.py new file mode 100644 index 0000000000..076fc1596f --- /dev/null +++ b/app_python/test_app.py @@ -0,0 +1,114 @@ +"""Tests for the Bottle application.""" + +import re +import threading +import time +import unittest +from datetime import datetime, timedelta +from subprocess import Popen + +import pytest +import requests +from app import MSK_TIMEZONE, app + +BASE_URL = "http://127.0.0.1:8080/" + + +def wait_for_server(url, timeout=5): + """Utility function to wait for the server to be up.""" + start_time = time.time() + while time.time() - start_time < timeout: + try: + response = requests.get(url, timeout=1) + if response.status_code == 200: + return True + except requests.exceptions.ConnectionError: + time.sleep(0.5) + raise RuntimeError("Server did not start in time") + + +class TestAppUnit(unittest.TestCase): + """Unit tests for the Bottle application.""" + + def test_msk_timezone(self): + """Test if the Moscow timezone is correctly set.""" + self.assertEqual(MSK_TIMEZONE.utcoffset(None), timedelta(hours=3)) + + def test_show_time_format(self): + """Test if the time is formatted correctly.""" + now = datetime(2025, 1, 1, 15, 30, 45, tzinfo=MSK_TIMEZONE) + formatted_time = now.strftime("%H:%M:%S") + self.assertEqual(formatted_time, "15:30:45") + + def test_show_date_format(self): + """Test if the date is formatted correctly.""" + now = datetime(2025, 1, 1, tzinfo=MSK_TIMEZONE) + formatted_date = now.strftime("%d.%m.%Y") + self.assertEqual(formatted_date, "01.01.2025") + + def test_leap_year(self): + """Test if the application handles leap years correctly.""" + leap_date = datetime(2024, 2, 29, tzinfo=MSK_TIMEZONE) + formatted_date = leap_date.strftime("%d.%m.%Y") + self.assertEqual(formatted_date, "29.02.2024") + + def test_midnight_rollover(self): + """Test if the application correctly handles midnight rollover.""" + before_midnight = datetime(2025, 1, 1, 23, 59, 59, tzinfo=MSK_TIMEZONE) + after_midnight = before_midnight + timedelta(seconds=1) + self.assertEqual(after_midnight.strftime("%H:%M:%S"), "00:00:00") + + +@pytest.fixture(scope="module", autouse=True) +def start_server(): + """Fixture to start the Bottle server before tests and stop after.""" + + with Popen(["python", "app.py"]) as process: + wait_for_server(BASE_URL, timeout=5) + yield + process.terminate() + + +def test_root_route(): + """Test if the root route returns the correct HTML response.""" + response = requests.get(BASE_URL, timeout=5) + + # Verify response status and headers + assert response.status_code == 200 + assert response.headers["Content-Type"].startswith("text/html") + + # Verify response content + assert "Current time and date in Moscow" in response.text + assert re.search(r"

Time: \d{2}:\d{2}:\d{2}

", response.text) + assert re.search(r"

Date: \d{2}\.\d{2}\.\d{4}

", response.text) + + +class TestAppE2E(unittest.TestCase): + """End-to-end tests for the Bottle application.""" + + @classmethod + def setUpClass(cls): + """Start the server in a separate thread.""" + cls.server_thread = threading.Thread( + target=lambda: app.run( + host="127.0.0.1", port=8080, debug=False, quiet=True + ), + daemon=True, + ) + cls.server_thread.start() + wait_for_server(BASE_URL, timeout=5) + + @classmethod + def tearDownClass(cls): + """Stop the server.""" + cls.server_thread.join(0) + + def test_root_endpoint(self): + """Test if the '/' endpoint returns the correct response.""" + response = requests.get(BASE_URL, timeout=5) + self.assertEqual(response.status_code, 200) + self.assertIn("Current time and date in Moscow", response.text) + self.assertIn("Time:", response.text) + self.assertIn("Date:", response.text) + self.assertRegex(response.text, r"

Time: \d{2}:\d{2}:\d{2}

") + self.assertRegex(response.text, r"

Date: \d{2}\.\d{2}\.\d{4}

") diff --git a/app_ruby/.dockerignore b/app_ruby/.dockerignore new file mode 100644 index 0000000000..66125b81ce --- /dev/null +++ b/app_ruby/.dockerignore @@ -0,0 +1,25 @@ +# Ignore editor-specific files +*.idea/ +*.vscode/ +*.DS_Store + +# Ignore documentation files +*.md + +# Ignore Docker-related files +Dockerfile +.dockerignore + +# Ignore testing files +spec/ + +# Ignore git related files +.git/ +.gitignore +.gitattributes +.gitmodules +.gitkeep + +# Ignore configuration files +.pre-commit-config.yaml +.robocop.yml diff --git a/app_ruby/.gitignore b/app_ruby/.gitignore new file mode 100644 index 0000000000..e43b0f9889 --- /dev/null +++ b/app_ruby/.gitignore @@ -0,0 +1 @@ +.DS_Store diff --git a/app_ruby/.pre-commit-config.yaml b/app_ruby/.pre-commit-config.yaml new file mode 100644 index 0000000000..c91233f5f6 --- /dev/null +++ b/app_ruby/.pre-commit-config.yaml @@ -0,0 +1,23 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-added-large-files + + - repo: local + hooks: + - id: rubocop + name: RuboCop + entry: rubocop --auto-correct + language: system + files: \.rb$ + + - repo: local + hooks: + - id: rails_best_practices + name: Rails Best Practices + entry: rails_best_practices + language: system + files: ^app_ruby/ diff --git a/app_ruby/.reek.yml b/app_ruby/.reek.yml new file mode 100644 index 0000000000..25c30c83da --- /dev/null +++ b/app_ruby/.reek.yml @@ -0,0 +1,3 @@ +detectors: + UtilityFunction: + enabled: false diff --git a/app_ruby/.robocop.yml b/app_ruby/.robocop.yml new file mode 100644 index 0000000000..747f8af4ea --- /dev/null +++ b/app_ruby/.robocop.yml @@ -0,0 +1,9 @@ +AllCops: + NewCops: enable + TargetRubyVersion: 3.4 + +Metrics/ClassLength: + Max: 100 + +Style/Documentation: + Enabled: false diff --git a/app_ruby/CI.md b/app_ruby/CI.md new file mode 100644 index 0000000000..8b0d84e08e --- /dev/null +++ b/app_ruby/CI.md @@ -0,0 +1,30 @@ +# CI Workflow for `app_ruby` + +[![CI for app_ruby](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_ruby.yml/badge.svg)](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_ruby.yml) + +This project uses a CI workflow to automatically lint, test, check security, and build/push Docker images whenever there are changes to the application. Below are the key steps that the workflow performs: + +## 1. **Code Quality Check** + +- **Linting**: The code is checked using **RuboCop** to enforce Ruby style and best practices. +- **Code Formatting**: **RuboCop** also ensures that the code is properly formatted. + +## 2. **Testing** + +- **RSpec** is used to run tests and verify that the application works as expected. + +## 3. **Security Scan** + +- **Snyk** is used to scan the dependencies for known vulnerabilities and security risks. + +## 4. **Docker Build & Push** + +- The application is built into a Docker image using **Docker Buildx**. +- The image is then pushed to **GitHub Container Registry** and **DockerHub**. + +## Key Features of This CI Workflow + +- **Caching**: We cache dependencies and Docker layers to speed up the workflow. +- **Security**: Credentials (like tokens and passwords) are stored securely using GitHub Secrets. +- **Fast Feedback**: The workflow runs fast by stopping early on errors and continuing with other checks. +- **Docker Integration**: The app is automatically built into a Docker image and pushed to registries. diff --git a/app_ruby/DOCKER.md b/app_ruby/DOCKER.md new file mode 100644 index 0000000000..c2189f28d4 --- /dev/null +++ b/app_ruby/DOCKER.md @@ -0,0 +1,41 @@ +# Docker best practices + +## **Use of an Official Base Image** + +I used lightweight Alpine-based official Ruby image `ruby:3.4.1-alpine3.21`, which significantly reduces the image size with pinned version tag to ensure consistency across builds and avoids unexpected issues due to updates in the base image. + +## **Non-Root User** + +I create user with limited permissions and use it in the container. This mitigates the risk of privilege escalation in case of an exploit. + +## **Optimized build process with layer caching** + +This layer will only be rebuilt if `Gemfile` changes, leveraging Docker’s layer caching and reducing build times. + +## **.dockerignore** + +I keep the `.dockerignore` file clean and avoid using the `COPY . .` in the Dockerfile, to make sure that only the necessary files will be added to the image. + +## **Use Haskell Dockerfile Linter** + +[Hadolint](https://github.com/hadolint/hadolint) is a smarter Dockerfile linter that helps to build best practice Docker images. + +## **Docker Scout** + +I analyze image with Docker Scout to find out possible vulnerabilities and fix them. + +Docker Scout + +## **DockerHub** + +Image is available on [DockerHub](https://hub.docker.com/repository/docker/ebob/omsk-time/tags/v1.0/sha256-0d436c0125cf7307f573fa7f7cf3b7ab2671ba3fe1455babeb08ee45f213ec11) + +## **Distroless Image** + +Additionaly, I build distroless image. I didn't create an additional user because I used a container with a `nonroot` tag. Distroless image appeared to be 70 MB larger than the original one. I think this is because we don't compile the ruby program into a binary file, so reducing the size is not an advantage. Also `alpine` base image did not work in distroless environment, so I used `slim` which is a little bit larger. But the distroless container is a very good solution in terms of security: it does not contain shell and other utilities that reduce attack surface. + +Here is image size comparison: + +Comparison Size + +I upload it on [DockerHub](https://hub.docker.com/repository/docker/ebob/omsk-time/tags/v1.0-distroless/sha256-f7e2aba76f6b08839e08129c95aa371841a48f780116d12e9e8f66840b20c3f8) too diff --git a/app_ruby/Dockerfile b/app_ruby/Dockerfile new file mode 100644 index 0000000000..5f54959801 --- /dev/null +++ b/app_ruby/Dockerfile @@ -0,0 +1,31 @@ +FROM ruby:3.4.1-alpine3.21 + +ENV BUNDLE_WITHOUT=development:test \ + LANG=C.UTF-8 + +# Install system dependencies +RUN apk add --no-cache \ + gcc=14.2.0-r4 \ + musl-dev=1.2.5-r9 \ + tzdata=2025a-r0 \ + make=4.4.1-r2 + +RUN addgroup -S appgroup && \ + adduser -S -G appgroup appuser + +WORKDIR /app + +RUN gem install nio4r:2.7.4 -- --use-system-libraries && \ + gem install bundler:2.6.3 + +COPY Gemfile Gemfile.lock ./ + +RUN bundle install + +COPY public/styles.css views/index.erb app.rb config.ru ./ + +USER appuser + +EXPOSE 4567 + +CMD ["ruby", "app.rb"] diff --git a/app_ruby/Gemfile b/app_ruby/Gemfile new file mode 100644 index 0000000000..1c207317f0 --- /dev/null +++ b/app_ruby/Gemfile @@ -0,0 +1,19 @@ +# frozen_string_literal: true + +source 'https://rubygems.org' + +# Gems for running the app (production) +gem 'prometheus-client', '~> 4.2' +gem 'puma', '~> 6.5' +gem 'rack', '~> 3.1', '>= 3.1.10' +gem 'rackup', '~> 2.2' +gem 'sinatra', '~> 4.1' +gem 'time', '~> 0.4' +gem 'tzinfo', '~> 2.0' + +# Gems for development and testing +group :development, :test do + gem 'rack-test', '~> 2.2' + gem 'rspec' + gem 'rubocop', '~> 1.71', require: false +end diff --git a/app_ruby/Gemfile.lock b/app_ruby/Gemfile.lock new file mode 100644 index 0000000000..cdb2b91ac7 --- /dev/null +++ b/app_ruby/Gemfile.lock @@ -0,0 +1,98 @@ +GEM + remote: https://rubygems.org/ + specs: + ast (2.4.2) + base64 (0.2.0) + concurrent-ruby (1.3.5) + date (3.4.1) + diff-lcs (1.5.1) + json (2.9.1) + language_server-protocol (3.17.0.3) + logger (1.6.5) + mustermann (3.0.3) + ruby2_keywords (~> 0.0.1) + nio4r (2.7.4) + parallel (1.26.3) + parser (3.3.7.0) + ast (~> 2.4.1) + racc + prometheus-client (4.2.4) + base64 + puma (6.5.0) + nio4r (~> 2.0) + racc (1.8.1) + rack (3.1.10) + rack-protection (4.1.1) + base64 (>= 0.1.0) + logger (>= 1.6.0) + rack (>= 3.0.0, < 4) + rack-session (2.1.0) + base64 (>= 0.1.0) + rack (>= 3.0.0) + rack-test (2.2.0) + rack (>= 1.3) + rackup (2.2.1) + rack (>= 3) + rainbow (3.1.1) + regexp_parser (2.10.0) + rspec (3.13.0) + rspec-core (~> 3.13.0) + rspec-expectations (~> 3.13.0) + rspec-mocks (~> 3.13.0) + rspec-core (3.13.2) + rspec-support (~> 3.13.0) + rspec-expectations (3.13.3) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.13.0) + rspec-mocks (3.13.2) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.13.0) + rspec-support (3.13.2) + rubocop (1.71.0) + json (~> 2.3) + language_server-protocol (>= 3.17.0) + parallel (~> 1.10) + parser (>= 3.3.0.2) + rainbow (>= 2.2.2, < 4.0) + regexp_parser (>= 2.9.3, < 3.0) + rubocop-ast (>= 1.36.2, < 2.0) + ruby-progressbar (~> 1.7) + unicode-display_width (>= 2.4.0, < 4.0) + rubocop-ast (1.37.0) + parser (>= 3.3.1.0) + ruby-progressbar (1.13.0) + ruby2_keywords (0.0.5) + sinatra (4.1.1) + logger (>= 1.6.0) + mustermann (~> 3.0) + rack (>= 3.0.0, < 4) + rack-protection (= 4.1.1) + rack-session (>= 2.0.0, < 3) + tilt (~> 2.0) + tilt (2.6.0) + time (0.4.1) + date + tzinfo (2.0.6) + concurrent-ruby (~> 1.0) + unicode-display_width (3.1.4) + unicode-emoji (~> 4.0, >= 4.0.4) + unicode-emoji (4.0.4) + +PLATFORMS + arm64-darwin-23 + ruby + +DEPENDENCIES + prometheus-client (~> 4.2) + puma (~> 6.5) + rack (~> 3.1, >= 3.1.10) + rack-test (~> 2.2) + rackup (~> 2.2) + rspec + rubocop (~> 1.71) + sinatra (~> 4.1) + time (~> 0.4) + tzinfo (~> 2.0) + +BUNDLED WITH + 2.6.3 diff --git a/app_ruby/README.md b/app_ruby/README.md new file mode 100644 index 0000000000..4cbd95cf7e --- /dev/null +++ b/app_ruby/README.md @@ -0,0 +1,112 @@ +# Ruby Web Application + +[![CI for app_ruby](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_ruby.yml/badge.svg)](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_ruby.yml) + +## Overview + +This application shows current time in **Omsk** + +## Requirements + +* Ruby 3.4 + +## Installation + +Clone this repository: + +```bash +git clone https://github.com/cuprum-acid/devops-labs.git -b lab1 +``` + +Open directory: + +```bash +cd devops-labs/app_ruby +``` + +Install bundler: + +```bash +gem install bundler +``` + +Install dependencies from `Gemfile`: + +```bash +bundle install +``` + +Run the app: + +```bash +ruby app.rb +``` + +Open `localhost:4567` in browser or run: + +```bash +curl localhost:4567 +``` + +## Test + +To run auto-tests: + +```bash +rspec spec/app_spec.rb +``` + +## Docker + +### Build + +```bash +cd devops-labs/app_ruby +``` + +```bash +docker build -t ebob/omsk-time:v1.0 . +``` + +### Pull and Run + +```bash +docker pull ebob/omsk-time:v1.0 +``` + +```bash +docker run -d --name omsk -p 4567:4567 ebob/omsk-time:v1.0 +``` + +Now it is available on `localhost:4567` + +## Distroless Docker Image + +### Build + +```bash +docker build -t ebob/omsk-time:v1.0-distroless -f distroless.Dockerfile . +``` + +### Pull and Run + +```bash +docker pull ebob/omsk-time:v1.0-distroless +``` + +```bash +docker run -d --name omsk-distroless -p 4568:4567 ebob/omsk-time:v1.0-distroless +``` + +Now it is available on `localhost:4568` + +## Continuous Integration + +This repository contains a CI pipeline configuration for the python application. The CI pipeline is managed with `GitHub Actions` and includes multiple jobs to ensure the code quality, functionality, security, and successful deployment of the application. + +The pipeline consists of these main jobs: + +1. Lint and Format: Ensures the code follows linting and formatting standards. +2. Test: Runs tests to verify the correctness of the application. +3. Security Scan: Checks for security vulnerabilities in the codebase using `Snyk` tool. +4. Docker Build and Push: Builds and pushes a Docker image to the DockerHub and ghcr. diff --git a/app_ruby/RUBY.md b/app_ruby/RUBY.md new file mode 100644 index 0000000000..c8245fe38d --- /dev/null +++ b/app_ruby/RUBY.md @@ -0,0 +1,25 @@ +# Ruby Web Application + +## Framework choice + +To develop this web application, I chose the [**Sinatra**](https://sinatrarb.com/) web framework for Ruby. Sinatra is a lightweight web framework that is perfect for small applications. + +## Best practicies + +I implemented the following best practices in the development of this Ruby web application: + +* MVC architectural pattern + +* Tools for Code Quality & Security + + * [`RuboCop`](https://github.com/rubocop/rubocop): A static code analyzer and style checker for Ruby + + * [`Reek`](https://github.com/troessner/reek): A code smell detector + + * [`rails_best_practices`](https://github.com/flyerhzm/rails_best_practices) : A tool that checks for best practices in Ruby on Rails applications, even if you're using Sinatra framework + +* I use `Gemfile` to manage dependencies + +## Testing code + +In addition to manually testing the application by running it and verifying the output, I have written automated tests using the `RSpec` testing framework. These tests ensure that the application behaves as expected and that future changes do not introduce bugs. diff --git a/app_ruby/app.rb b/app_ruby/app.rb new file mode 100644 index 0000000000..889f227816 --- /dev/null +++ b/app_ruby/app.rb @@ -0,0 +1,104 @@ +# frozen_string_literal: true + +require 'sinatra' +require 'tzinfo' +require 'prometheus/client' +require 'prometheus/client/formats/text' + +VISITS_FILE = '/tmp/visits' + +def read_visits + File.exist?(VISITS_FILE) ? File.read(VISITS_FILE).to_i : 0 +end + +def increment_visits + visits = read_visits + 1 + File.write(VISITS_FILE, visits) + visits +end + +# Explicitly set trusted_hosts to allow Prometheus and local connections +set :protection, except: [:json_csrf] +set :bind, '0.0.0.0' +set :environment, :production + +# Allow access from any host +use Rack::Protection::HostAuthorization, + trusted_hosts: [/^localhost$/, /^127\.0\.0\.1$/, /^.+\.monitoring$/, /^omsk-time-app$/, /^prometheus$/, /.*/, nil] + +# Initialize Prometheus registry and metrics +prometheus = Prometheus::Client.registry + +# Define metrics +http_requests_total = Prometheus::Client::Counter.new( + :http_requests_total, + docstring: 'Total number of HTTP requests', + labels: %i[method path status] +) + +http_request_duration_seconds = Prometheus::Client::Histogram.new( + :http_request_duration_seconds, + docstring: 'HTTP request duration in seconds', + labels: %i[method path] +) + +timezone_lookup_errors = Prometheus::Client::Counter.new( + :timezone_lookup_errors_total, + docstring: 'Total number of timezone lookup errors' +) + +# Register metrics +prometheus.register(http_requests_total) +prometheus.register(http_request_duration_seconds) +prometheus.register(timezone_lookup_errors) + +# Middleware to track request metrics +before do + @start_time = Time.now +end + +after do + duration = Time.now - @start_time + http_requests_total.increment(labels: { method: request.request_method, path: request.path, status: response.status }) + http_request_duration_seconds.observe(duration, labels: { method: request.request_method, path: request.path }) +end + +get '/' do + content_type 'text/plain' + visits = increment_visits + + # Set the timezone to Omsk + begin + timezone = TZInfo::Timezone.get('Asia/Omsk') + + # Get the current time in Omsk timezone + omsk_time = timezone.now.strftime('%Y-%m-%d %H:%M:%S') + + # Display the time and visit count + "Current time in Omsk: #{omsk_time}\nTotal visits: #{visits}" + rescue TZInfo::InvalidTimezoneIdentifier => e + timezone_lookup_errors.increment + "Error: Invalid timezone identifier - #{e.message}" + rescue StandardError => e + timezone_lookup_errors.increment + "Error: #{e.message}" + end +end + +# Endpoint to get visit count +get '/visits' do + content_type 'text/plain' + "Total visits: #{read_visits}" +end + +# Prometheus metrics endpoint with explicit host check bypass +get '/metrics' do + # Skip host protection for the metrics endpoint + env['rack.protection.host_authorization'] = true + + # Set the content type with explicit charset parameter + content_type 'text/plain; version=0.0.4; charset=utf-8' + + # Marshal the metrics + Prometheus::Client::Formats::Text.marshal(prometheus) +end diff --git a/app_ruby/config.ru b/app_ruby/config.ru new file mode 100644 index 0000000000..d7eef6c86b --- /dev/null +++ b/app_ruby/config.ru @@ -0,0 +1,9 @@ +# frozen_string_literal: true + +require './app' + +# Configure Rack directly +use Rack::Protection, except: %i[host_authorization json_csrf] + +# Run the application +run Sinatra::Application diff --git a/app_ruby/data/visits b/app_ruby/data/visits new file mode 100644 index 0000000000..e69de29bb2 diff --git a/app_ruby/distroless.Dockerfile b/app_ruby/distroless.Dockerfile new file mode 100644 index 0000000000..f1ee7c8fe1 --- /dev/null +++ b/app_ruby/distroless.Dockerfile @@ -0,0 +1,40 @@ +FROM ruby:3.4.1-slim AS builder + +ENV BUNDLE_WITHOUT=development:test \ + LANG=C.UTF-8 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + gcc=4:12.2.0-3 \ + musl-dev=1.2.3-1 \ + tzdata=2024b-0+deb12u1 \ + make=4.3-4.1 && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /app + +RUN gem install nio4r:2.7.4 -- --use-system-libraries && \ + gem install bundler:2.6.3 + +COPY Gemfile Gemfile.lock ./ + +RUN bundle install && cp "$(which ruby)" /app + +COPY public/styles.css views/index.erb app.rb config.ru ./ + +FROM gcr.io/distroless/base-debian12:nonroot AS runtime + +WORKDIR /app +COPY --from=builder /app /app +COPY --from=builder /usr/local /usr/local +COPY --from=builder /usr/lib /usr/lib +COPY --from=builder /usr/local/bin/ruby /usr/local/bin/ruby +COPY --from=builder /usr/local/lib/ruby/3.4.0 /usr/local/lib/ruby/3.4.0 +COPY --from=builder /usr/local/bundle /usr/local/bundle + +ENV PATH=/usr/local/bundle/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +ENV GEM_HOME=/usr/local/bundle + +EXPOSE 4567 + +CMD ["./ruby", "app.rb"] diff --git a/app_ruby/docker-compose.yml b/app_ruby/docker-compose.yml new file mode 100644 index 0000000000..ea6f3a06d1 --- /dev/null +++ b/app_ruby/docker-compose.yml @@ -0,0 +1,7 @@ +services: + app: + build: . + ports: + - "4567:4567" + volumes: + - ./data:/tmp diff --git a/app_ruby/public/styles.css b/app_ruby/public/styles.css new file mode 100644 index 0000000000..8116798834 --- /dev/null +++ b/app_ruby/public/styles.css @@ -0,0 +1,13 @@ +body { + font-family: Arial, sans-serif; + margin: 0 auto; + max-width: 600px; + } + + h1 { + text-align: center; + } + + form { + margin-bottom: 10px; + } diff --git a/app_ruby/spec/app_spec.rb b/app_ruby/spec/app_spec.rb new file mode 100644 index 0000000000..1ead1ad0c6 --- /dev/null +++ b/app_ruby/spec/app_spec.rb @@ -0,0 +1,74 @@ +# frozen_string_literal: true + +require 'rack/test' +require './app' + +describe 'Omsk Time Web App - Basic Responses' do + include Rack::Test::Methods + + def app + Sinatra::Application + end + + describe 'GET /' do + it 'responds successfully' do + header 'Host', 'localhost' + get '/' + expect(last_response).to be_ok + end + + it 'displays the current time in Omsk' do + header 'Host', 'localhost' + get '/' + expect(last_response.body).to match(/Current time in Omsk: \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}/) + end + + it 'returns text/plain content type' do + header 'Host', 'localhost' + get '/' + expect(last_response.headers['Content-Type']).to include('text/plain') + end + end +end + +describe 'Omsk Time Web App - Error Handling' do + include Rack::Test::Methods + + def app + Sinatra::Application + end + + it 'handles invalid timezone errors gracefully' do + allow(TZInfo::Timezone).to receive(:get).and_raise(TZInfo::InvalidTimezoneIdentifier, 'Invalid timezone') + + header 'Host', 'localhost' + get '/' + + expect(last_response).to be_ok + expect(last_response.body).to include('Error: Invalid timezone identifier - Invalid timezone') + end + + it 'handles unexpected errors gracefully' do + allow(TZInfo::Timezone).to receive(:get).and_raise(StandardError, 'Unexpected error') + + header 'Host', 'localhost' + get '/' + + expect(last_response).to be_ok + expect(last_response.body).to include('Error: Unexpected error') + end +end + +describe 'Omsk Time Web App - Time Format' do + include Rack::Test::Methods + + def app + Sinatra::Application + end + + it 'displays time in YYYY-MM-DD HH:MM:SS format' do + header 'Host', 'localhost' + get '/' + expect(last_response.body).to match(/\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}/) + end +end diff --git a/app_ruby/views/index.erb b/app_ruby/views/index.erb new file mode 100644 index 0000000000..3a0b84b503 --- /dev/null +++ b/app_ruby/views/index.erb @@ -0,0 +1,31 @@ + + + + + Omsk Current Time + + + +
+

Current Time in Omsk

+

<%= @current_time.strftime("%Y-%m-%d %H:%M:%S %Z") %>

+
+ + diff --git a/k8s/11.md b/k8s/11.md new file mode 100644 index 0000000000..87fc828ccb --- /dev/null +++ b/k8s/11.md @@ -0,0 +1,353 @@ +# Kubernetes Secrets and Hashicorp Vault + +## Task 1: Kubernetes Secrets and Resource Management + +### Create a Secret + +Use `kubectl create secret`: + +```bash +kubectl create secret generic my-secret --from-literal=username=username --from-literal=password=password +``` + +Example: + +```bash +ebob@laptop devops-labs % kubectl create secret generic eugengold-secret --from-literal=username=eugengold --from-literal=password=amogus123 +secret/eugengold-secret created +``` + +### Verify the Secret + +```bash +kubectl get secrets +``` + +Example: + +```bash +ebob@laptop devops-labs % kubectl get secrets +NAME TYPE DATA AGE +eugengold-secret Opaque 2 91s +nginx-ingress-ingress-nginx-admission Opaque 3 4d22h +``` + +View details: + +```bash +kubectl describe secret eugengold-secret +``` + +Example: + +```bash +ebob@laptop devops-labs % kubectl describe secret eugengold-secret +Name: eugengold-secret +Namespace: default +Labels: +Annotations: + +Type: Opaque + +Data +==== +username: 9 bytes +password: 9 bytes +``` + +### Decode the Secret + +Kubernetes stores secret data in Base64 encoding. To decode it, use: + +```bash +kubectl get secret eugengold-secret -o jsonpath="{.data.username}" | base64 --decode +echo +kubectl get secret eugengold-secret -o jsonpath="{.data.password}" | base64 --decode +echo +``` + +Example: + +```bash +ebob@laptop devops-labs % kubectl get secret eugengold-secret -o jsonpath="{.data.username}" | base64 --decode +echo +kubectl get secret eugengold-secret -o jsonpath="{.data.password}" | base64 --decode +echo +eugengold +amogus123 +``` + +### Manage Secrets with Helm + +I followed steps from the [video](https://www.youtube.com/watch?v=hRSlKRvYe1A) and used GPG key to encode my secret. I can store it directly in the repository. + +Installation of the chart is performed via `helm secrets install moscow-time ./moscow-time -n default -f ./moscow-time/secrets.yaml`: + +```bash +ebob@laptop k8s % helm secrets install moscow-time ./moscow-time -n default -f ./moscow-time/secrets.yaml +[helm-secrets] Decrypt: ./moscow-time/secrets.yaml +NAME: moscow-time +LAST DEPLOYED: Wed Mar 5 00:16:20 2025 +NAMESPACE: default +STATUS: deployed +REVISION: 1 +[helm-secrets] Removed: ./moscow-time/secrets.yaml.dec +ebob@laptop k8s % helm ls +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +moscow-time default 1 2025-03-05 00:16:20.928616 +0300 MSK deployed moscow-time-0.1.0 1.1 +``` + +```bash +ebob@laptop k8s % kubectl get po +NAME READY STATUS RESTARTS AGE +moscow-time-67946d5f79-2xbh4 1/1 Running 0 27m +moscow-time-67946d5f79-bk9zb 1/1 Running 0 27m +moscow-time-67946d5f79-kr9wl 1/1 Running 0 27m +ebob@laptop k8s % kubectl exec moscow-time-67946d5f79-2xbh4 -- printenv | grep MY_PASS +MY_PASS=amogus123 +``` + +## Task 2: Vault Secret Management System + +### Install Vault Using Helm Chart: + +```bash +ebob@laptop k8s % helm repo add hashicorp https://helm.releases.hashicorp.com +"hashicorp" has been added to your repositories +``` + +```bash +ebob@laptop k8s % helm repo update +Hang tight while we grab the latest from your chart repositories... +...Successfully got an update from the "hashicorp" chart repository +...Successfully got an update from the "ingress-nginx" chart repository +...Successfully got an update from the "external-secrets" chart repository +...Successfully got an update from the "prometheus-community" chart repository +...Successfully got an update from the "grafana" chart repository +Update Complete. ⎈Happy Helming!⎈ +``` + +```bash +ebob@laptop k8s % helm install vault hashicorp/vault --set "server.dev.enabled=true" +NAME: vault +LAST DEPLOYED: Wed Mar 5 00:54:06 2025 +NAMESPACE: default +STATUS: deployed +REVISION: 1 +NOTES: +Thank you for installing HashiCorp Vault! + +Now that you have deployed Vault, you should look over the docs on using +Vault with Kubernetes available here: + +https://developer.hashicorp.com/vault/docs + + +Your release is named vault. To learn more about the release, try: + + $ helm status vault + $ helm get manifest vault +``` + +```bash +ebob@laptop k8s % kubectl get pods +NAME READY STATUS RESTARTS AGE +moscow-time-67946d5f79-2xbh4 1/1 Running 0 46m +moscow-time-67946d5f79-bk9zb 1/1 Running 0 46m +moscow-time-67946d5f79-kr9wl 1/1 Running 0 46m +vault-0 1/1 Running 0 9m3s +vault-agent-injector-66f45b5fd5-hnvjp 1/1 Running 0 9m4s +``` + +### Set a secret in Vault + +Follow [Vault Installation Guide](https://developer.hashicorp.com/vault/tutorials/kubernetes/kubernetes-sidecar#install-the-vault-helm-chart): + +```bash +ebob@laptop k8s % kubectl exec -it vault-0 -- /bin/sh +/ $ vault secrets enable -path=internal kv-v2 +Success! Enabled the kv-v2 secrets engine at: internal/ +/ $ vault kv put internal/database/config username="db-readonly-username" password="db-secret-password" +======== Secret Path ======== +internal/data/database/config + +======= Metadata ======= +Key Value +--- ----- +created_time 2025-03-04T22:09:50.090774888Z +custom_metadata +deletion_time n/a +destroyed false +version 1 +/ $ vault kv get internal/database/config +======== Secret Path ======== +internal/data/database/config + +======= Metadata ======= +Key Value +--- ----- +created_time 2025-03-04T22:09:50.090774888Z +custom_metadata +deletion_time n/a +destroyed false +version 1 + +====== Data ====== +Key Value +--- ----- +password db-secret-password +username db-readonly-username +``` + +Follow guide, patch deployment: + +```bash +kubectl patch deployment moscow-time --patch-file=patch.yaml +``` + +Verify result: + +```bash +ebob@laptop moscow-time % kubectl exec -it moscow-time-5c56f64c59-47kdr -- sh +Defaulted container "moscow-time" out of: moscow-time, vault-agent, vault-agent-init (init) +/app $ ls +app.py requirements.txt +/app $ cd .. +/ $ ls +app bin dev etc home lib media mnt opt proc root run sbin srv sys tmp usr var vault +/ $ cd vault/secrets/ +/vault/secrets $ ls +database-config.txt +/vault/secrets $ cat database-config.txt +postgresql://db-readonly-username:db-secret-password@postgres:5432/wizard/vault/secrets $ +/vault/secrets $ df -h +Filesystem Size Used Available Use% Mounted on +overlay 58.4G 48.5G 6.9G 88% / +tmpfs 64.0M 0 64.0M 0% /dev +shm 64.0M 0 64.0M 0% /dev/shm +tmpfs 3.8G 4.0K 3.8G 0% /vault/secrets +/dev/vda1 58.4G 48.5G 6.9G 88% /dev/termination-log +/dev/vda1 58.4G 48.5G 6.9G 88% /etc/resolv.conf +/dev/vda1 58.4G 48.5G 6.9G 88% /etc/hostname +/dev/vda1 58.4G 48.5G 6.9G 88% /etc/hosts +tmpfs 3.8G 12.0K 3.8G 0% /run/secrets/kubernetes.io/serviceaccount +tmpfs 64.0M 0 64.0M 0% /proc/kcore +tmpfs 64.0M 0 64.0M 0% /proc/keys +tmpfs 64.0M 0 64.0M 0% /proc/timer_list +tmpfs 1.9G 0 1.9G 0% /proc/scsi +tmpfs 1.9G 0 1.9G 0% /sys/firmware +``` + +## Resources + +Resources are configured in `values.yaml` + +App after apply changs: + +```bash +ebob@laptop moscow-time % kubectl describe deployments.apps moscow-time +Name: moscow-time +Namespace: default +CreationTimestamp: Wed, 05 Mar 2025 00:16:50 +0300 +Labels: app.kubernetes.io/instance=moscow-time + app.kubernetes.io/managed-by=Helm + app.kubernetes.io/name=moscow-time + app.kubernetes.io/version=1.1 + helm.sh/chart=moscow-time-0.1.0 +Annotations: deployment.kubernetes.io/revision: 4 + meta.helm.sh/release-name: moscow-time + meta.helm.sh/release-namespace: default +Selector: app.kubernetes.io/instance=moscow-time,app.kubernetes.io/name=moscow-time +Replicas: 3 desired | 3 updated | 3 total | 3 available | 0 unavailable +StrategyType: RollingUpdate +MinReadySeconds: 0 +RollingUpdateStrategy: 25% max unavailable, 25% max surge +Pod Template: + Labels: app.kubernetes.io/instance=moscow-time + app.kubernetes.io/name=moscow-time + Annotations: vault.hashicorp.com/agent-inject: true + vault.hashicorp.com/agent-inject-secret-database-config.txt: internal/data/database/config + vault.hashicorp.com/agent-inject-status: update + vault.hashicorp.com/agent-inject-template-database-config.txt: + {{- with secret "internal/data/database/config" -}} + postgresql://{{ .Data.data.username }}:{{ .Data.data.password }}@postgres:5432/wizard + {{- end -}} + vault.hashicorp.com/role: internal-app + Service Account: internal-app + Containers: + moscow-time: + Image: ebob/moscow-time:v1.1 + Port: 80/TCP + Host Port: 0/TCP + Environment: + ENVIRONMENT: stage + Mounts: + Volumes: + Node-Selectors: + Tolerations: +Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable +OldReplicaSets: moscow-time-67946d5f79 (0/0 replicas created), moscow-time-8db9dcb6c (0/0 replicas created), moscow-time-5c56f64c59 (0/0 replicas created) +NewReplicaSet: moscow-time-58dc48f497 (3/3 replicas created) +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 52m deployment-controller Scaled up replica set moscow-time-8db9dcb6c from 0 to 1 +``` + +## Environmental variables + +```bash +ebob@laptop moscow-time % kubectl exec -it moscow-time-58dc48f497-4h2bz -- env +Defaulted container "moscow-time" out of: moscow-time, vault-agent, vault-agent-init (init) +PATH=/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +HOSTNAME=moscow-time-58dc48f497-4h2bz +TERM=xterm +ENVIRONMENT=stage +KUBERNETES_PORT_443_TCP_PROTO=tcp +KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1 +MOSCOW_TIME_SERVICE_HOST=10.99.90.244 +VAULT_PORT_8200_TCP_PROTO=tcp +VAULT_PORT_8200_TCP_ADDR=10.98.88.189 +VAULT_AGENT_INJECTOR_SVC_SERVICE_PORT=443 +VAULT_AGENT_INJECTOR_SVC_PORT_443_TCP_PROTO=tcp +KUBERNETES_SERVICE_PORT=443 +VAULT_SERVICE_HOST=10.98.88.189 +VAULT_SERVICE_PORT=8200 +VAULT_PORT_8201_TCP_PROTO=tcp +VAULT_PORT_8201_TCP_ADDR=10.98.88.189 +VAULT_AGENT_INJECTOR_SVC_PORT_443_TCP_PORT=443 +KUBERNETES_SERVICE_HOST=10.96.0.1 +KUBERNETES_PORT_443_TCP_PORT=443 +MOSCOW_TIME_SERVICE_PORT_HTTP=80 +VAULT_SERVICE_PORT_HTTPS_INTERNAL=8201 +VAULT_PORT_8200_TCP_PORT=8200 +KUBERNETES_SERVICE_PORT_HTTPS=443 +KUBERNETES_PORT=tcp://10.96.0.1:443 +MOSCOW_TIME_SERVICE_PORT=80 +MOSCOW_TIME_PORT=tcp://10.99.90.244:80 +VAULT_PORT_8201_TCP=tcp://10.98.88.189:8201 +VAULT_AGENT_INJECTOR_SVC_PORT_443_TCP=tcp://10.102.73.9:443 +VAULT_SERVICE_PORT_HTTP=8200 +VAULT_PORT=tcp://10.98.88.189:8200 +MOSCOW_TIME_PORT_80_TCP_ADDR=10.99.90.244 +VAULT_PORT_8201_TCP_PORT=8201 +VAULT_AGENT_INJECTOR_SVC_PORT=tcp://10.102.73.9:443 +VAULT_AGENT_INJECTOR_SVC_PORT_443_TCP_ADDR=10.102.73.9 +MOSCOW_TIME_PORT_80_TCP_PORT=80 +VAULT_AGENT_INJECTOR_SVC_SERVICE_PORT_HTTPS=443 +KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443 +MOSCOW_TIME_PORT_80_TCP_PROTO=tcp +VAULT_AGENT_INJECTOR_SVC_SERVICE_HOST=10.102.73.9 +MOSCOW_TIME_PORT_80_TCP=tcp://10.99.90.244:80 +VAULT_PORT_8200_TCP=tcp://10.98.88.189:8200 +GPG_KEY=7169605F62C751356D054A26A821E680E5FA6305 +PYTHON_VERSION=3.13.1 +PYTHON_SHA256=9cf9427bee9e2242e3877dd0f6b641c1853ca461f39d6503ce260a59c80bf0d9 +PYTHONDONTWRITEBYTECODE=1 +PYTHONUNBUFFERED=1 +HOME=/home/appuser +``` diff --git a/k8s/12.md b/k8s/12.md new file mode 100644 index 0000000000..2657f9b895 --- /dev/null +++ b/k8s/12.md @@ -0,0 +1,92 @@ +# ConfigMap Implementation + +I add directory `files` with `config.json`. Update `deployment.yaml` and `configmap.yaml`. + +Install the updated Helm chart: + +```bash +helm upgrade --install moscow-time . +``` + +```bash +ebob@laptop moscow-time % helm upgrade --install moscow-time . +Release "moscow-time" has been upgraded. Happy Helming! +NAME: moscow-time +LAST DEPLOYED: Sun Mar 16 16:38:02 2025 +NAMESPACE: default +STATUS: deployed +REVISION: 6 +ebob@laptop moscow-time % kubectl get configmap +NAME DATA AGE +kube-root-ca.crt 1 16d +moscow-time-config 1 12s +ebob@laptop moscow-time % kubectl describe configmap moscow-time-config +Name: moscow-time-config +Namespace: default +Labels: app.kubernetes.io/managed-by=Helm +Annotations: meta.helm.sh/release-name: moscow-time + meta.helm.sh/release-namespace: default + +Data +==== +config.json: +---- +{ + "name": "Evgeny", + "surname": "Bobkunov" +} + + +BinaryData +==== + +Events: + +ebob@laptop moscow-time % kubectl get po +NAME READY STATUS RESTARTS AGE +moscow-time-58dc48f497-4h2bz 0/2 Completed 0 11d +moscow-time-58dc48f497-7bjbd 0/2 Completed 0 11d +moscow-time-58dc48f497-jjqjc 0/2 Completed 0 11d +moscow-time-74cc99dbdd-kwrgj 2/2 Running 0 32s +omsk-time-6c7f4d9765-mr2j4 1/1 Running 1 (9d ago) 11d +omsk-time-6c7f4d9765-sf75t 1/1 Running 1 (9d ago) 11d +omsk-time-6c7f4d9765-xsmfp 1/1 Running 1 (9d ago) 11d +vault-0 1/1 Running 1 (70m ago) 11d +vault-agent-injector-66f45b5fd5-hnvjp 1/1 Running 1 (9d ago) 11d + +ebob@laptop moscow-time % kubectl exec -it moscow-time-74cc99dbdd-kwrgj --container moscow-time -- /bin/sh +/app $ cd .. +/ $ ls -a +. .dockerenv bin dev home media opt root sbin sys usr vault +.. app data etc lib mnt proc run srv tmp var +/ $ cd data/ +/data $ ls -a +. .. config.json +/data $ cat config.json +{ + "name": "Evgeny", + "surname": "Bobkunov" +} +/data $ exit + +``` + +## Bonus + +I implement same logic in Ruby app. + +```bash +ebob@laptop omsk-time % kubectl get pods + +NAME READY STATUS RESTARTS AGE +omsk-time-f79757f5c-5zdqg 1/1 Running 0 9s +omsk-time-f79757f5c-rsgrq 1/1 Running 0 8s +omsk-time-f79757f5c-sqwmf 1/1 Running 0 11s +vault-0 1/1 Running 1 (106m ago) 11d +vault-agent-injector-66f45b5fd5-hnvjp 1/1 Running 1 (9d ago) 11d + +ebob@laptop omsk-time % kubectl exec -it omsk-time-f79757f5c-5zdqg -- env | grep TIMEZONE +TIMEZONE=Asia/Omsk +ebob@laptop omsk-time % kubectl exec -it omsk-time-f79757f5c-5zdqg -- env | grep APP_MODE +APP_MODE=production +``` diff --git a/k8s/HELM.md b/k8s/HELM.md new file mode 100644 index 0000000000..e3077b706e --- /dev/null +++ b/k8s/HELM.md @@ -0,0 +1,500 @@ +# Helm + +Install Helm, verify the Workloads page in the Minikube dashboard. + +Снимок экрана 2025-02-27 в 00 41 40 + +## Deploy moscow-time app using Helm + +Create helm chart for python app: + +```bash +helm create moscow-time +``` + +Change `values.yaml` and install helm chart: + +```bash +ebob@laptop k8s % helm install moscow-time ./moscow-time +NAME: moscow-time +LAST DEPLOYED: Sun Feb 23 03:58:30 2025 +NAMESPACE: default +STATUS: deployed +REVISION: 1 +``` + +```bash +ebob@laptop ~ % kubectl get pods,svc +NAME READY STATUS RESTARTS AGE +pod/moscow-time-7bcf4d744f-4hndm 1/1 Running 0 13h +pod/moscow-time-7bcf4d744f-84pq6 1/1 Running 0 13h +pod/moscow-time-7bcf4d744f-lwbg9 1/1 Running 0 13h + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/kubernetes ClusterIP 10.96.0.1 443/TCP 15h +service/moscow-time ClusterIP 10.110.30.20 80/TCP 13h +``` + +```bash +ebob@laptop ~ % minikube service --all +|-----------|------------|-------------|--------------| +| NAMESPACE | NAME | TARGET PORT | URL | +|-----------|------------|-------------|--------------| +| default | kubernetes | | No node port | +|-----------|------------|-------------|--------------| +😿 service default/kubernetes has no node port +|-----------|-------------|-------------|--------------| +| NAMESPACE | NAME | TARGET PORT | URL | +|-----------|-------------|-------------|--------------| +| default | moscow-time | | No node port | +|-----------|-------------|-------------|--------------| +😿 service default/moscow-time has no node port +❗ Services [default/kubernetes default/moscow-time] have type "ClusterIP" not meant to be exposed, however for local development minikube allows you to access this ! +🏃 Starting tunnel for service kubernetes. +🏃 Starting tunnel for service moscow-time. +|-----------|-------------|-------------|------------------------| +| NAMESPACE | NAME | TARGET PORT | URL | +|-----------|-------------|-------------|------------------------| +| default | kubernetes | | http://127.0.0.1:52535 | +| default | moscow-time | | http://127.0.0.1:52536 | +|-----------|-------------|-------------|------------------------| +🎉 Opening service default/kubernetes in default browser... +🎉 Opening service default/moscow-time in default browser... +❗ Because you are using a Docker driver on darwin, the terminal needs to be open to run it. +``` + +## Helm Chart Hooks + +### `helm lint` + +```bash +ebob@laptop k8s % helm lint moscow-time +==> Linting moscow-time +[INFO] Chart.yaml: icon is recommended + +1 chart(s) linted, 0 chart(s) failed +``` + +### `helm install --dry-run helm-hooks` + +```bash +ebob@laptop k8s % helm install --dry-run helm-hooks moscow-time +NAME: helm-hooks +LAST DEPLOYED: Sun Feb 23 18:23:42 2025 +NAMESPACE: default +STATUS: pending-install +REVISION: 1 +HOOKS: +--- +# Source: moscow-time/templates/post-install-hook.yaml +apiVersion: v1 +kind: Pod +metadata: + name: helm-hooks-moscow-time-post-install + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-weight": "5" + #"helm.sh/hook-delete-policy": hook-succeeded +spec: + restartPolicy: Never + containers: + - name: post-install-job + image: busybox + command: ['sh', '-c', 'echo "Starting post-install hook"; sleep 20; echo "Post-install hook completed"'] +--- +# Source: moscow-time/templates/pre-install-hook.yaml +apiVersion: v1 +kind: Pod +metadata: + name: helm-hooks-moscow-time-pre-install + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-weight": "-5" + #"helm.sh/hook-delete-policy": hook-succeeded +spec: + restartPolicy: Never + containers: + - name: pre-install-job + image: busybox + command: ['sh', '-c', 'echo "Starting pre-install hook"; sleep 20; echo "Pre-install hook completed"'] +--- +# Source: moscow-time/templates/tests/test-connection.yaml +apiVersion: v1 +kind: Pod +metadata: + name: "helm-hooks-moscow-time-test-connection" + labels: + helm.sh/chart: moscow-time-0.1.0 + app.kubernetes.io/name: moscow-time + app.kubernetes.io/instance: helm-hooks + app.kubernetes.io/version: "1.16.0" + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['helm-hooks-moscow-time:80'] + restartPolicy: Never +MANIFEST: +--- +# Source: moscow-time/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: helm-hooks-moscow-time + labels: + helm.sh/chart: moscow-time-0.1.0 + app.kubernetes.io/name: moscow-time + app.kubernetes.io/instance: helm-hooks + app.kubernetes.io/version: "1.16.0" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + app.kubernetes.io/name: moscow-time + app.kubernetes.io/instance: helm-hooks +--- +# Source: moscow-time/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: helm-hooks-moscow-time + labels: + helm.sh/chart: moscow-time-0.1.0 + app.kubernetes.io/name: moscow-time + app.kubernetes.io/instance: helm-hooks + app.kubernetes.io/version: "1.16.0" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/name: moscow-time + app.kubernetes.io/instance: helm-hooks + template: + metadata: + labels: + app.kubernetes.io/name: moscow-time + app.kubernetes.io/instance: helm-hooks + spec: + containers: + - name: moscow-time + image: "ebob/moscow-time:v1.1" + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 8080 + protocol: TCP + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 250m + memory: 64Mi +--- +# Source: moscow-time/templates/ingress.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: helm-hooks-moscow-time + labels: + helm.sh/chart: moscow-time-0.1.0 + app.kubernetes.io/name: moscow-time + app.kubernetes.io/instance: helm-hooks + app.kubernetes.io/version: "1.16.0" + app.kubernetes.io/managed-by: Helm + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / +spec: + ingressClassName: nginx + rules: + - host: "moscow-time.local" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: helm-hooks-moscow-time + port: + number: 80 +``` + +### `kubectl get po` + +```bash +ebob@laptop ~ % kubectl get po +NAME READY STATUS RESTARTS AGE +moscow-time-7bcf4d744f-fhbmt 1/1 Running 0 3m51s +moscow-time-7bcf4d744f-j7hqx 1/1 Running 0 3m51s +moscow-time-7bcf4d744f-lmzxq 1/1 Running 0 3m51s +moscow-time-post-install 0/1 Completed 0 3m51s +moscow-time-pre-install 0/1 Completed 0 4m18s +``` + +### `kubectl describe po moscow-time-pre-install` + +```bash +ebob@laptop ~ % kubectl describe po moscow-time-pre-install +Name: moscow-time-pre-install +Namespace: default +Priority: 0 +Service Account: default +Node: minikube/192.168.49.2 +Start Time: Sun, 23 Feb 2025 18:16:54 +0300 +Labels: +Annotations: helm.sh/hook: pre-install + helm.sh/hook-weight: -5 +Status: Succeeded +IP: 10.244.0.18 +IPs: + IP: 10.244.0.18 +Containers: + pre-install-job: + Container ID: docker://a9eb3677f57c50a03d5a4878da65dcf345cc382460e9e0a62d5abd44f837765e + Image: busybox + Image ID: docker-pullable://busybox@sha256:498a000f370d8c37927118ed80afe8adc38d1edcbfc071627d17b25c88efcab0 + Port: + Host Port: + Command: + sh + -c + echo "Starting pre-install hook"; sleep 20; echo "Pre-install hook completed" + State: Terminated + Reason: Completed + Exit Code: 0 + Started: Sun, 23 Feb 2025 18:16:59 +0300 + Finished: Sun, 23 Feb 2025 18:17:19 +0300 + Ready: False + Restart Count: 0 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-5vjhm (ro) +Conditions: + Type Status + PodReadyToStartContainers False + Initialized True + Ready False + ContainersReady False + PodScheduled True +Volumes: + kube-api-access-5vjhm: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + ConfigMapOptional: + DownwardAPI: true +QoS Class: BestEffort +Node-Selectors: +Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 5m6s default-scheduler Successfully assigned default/moscow-time-pre-install to minikube + Normal Pulling 5m6s kubelet Pulling image "busybox" + Normal Pulled 5m1s kubelet Successfully pulled image "busybox" in 4.799s (4.799s including waiting). Image size: 4042190 bytes. + Normal Created 5m1s kubelet Created container: pre-install-job + Normal Started 5m1s kubelet Started container pre-install-job +``` + +### `kubectl describe po moscow-time-post-install` + +```bash +ebob@laptop ~ % kubectl describe po moscow-time-post-install +Name: moscow-time-post-install +Namespace: default +Priority: 0 +Service Account: default +Node: minikube/192.168.49.2 +Start Time: Sun, 23 Feb 2025 18:17:21 +0300 +Labels: +Annotations: helm.sh/hook: post-install + helm.sh/hook-weight: 5 +Status: Succeeded +IP: 10.244.0.22 +IPs: + IP: 10.244.0.22 +Containers: + post-install-job: + Container ID: docker://5a02bdf5adb5d33e4e4f2baea2ea8444cbc0f704ec58162eef23e0cc5874aa45 + Image: busybox + Image ID: docker-pullable://busybox@sha256:498a000f370d8c37927118ed80afe8adc38d1edcbfc071627d17b25c88efcab0 + Port: + Host Port: + Command: + sh + -c + echo "Starting post-install hook"; sleep 20; echo "Post-install hook completed" + State: Terminated + Reason: Completed + Exit Code: 0 + Started: Sun, 23 Feb 2025 18:17:24 +0300 + Finished: Sun, 23 Feb 2025 18:17:44 +0300 + Ready: False + Restart Count: 0 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-jgt76 (ro) +Conditions: + Type Status + PodReadyToStartContainers False + Initialized True + Ready False + ContainersReady False + PodScheduled True +Volumes: + kube-api-access-jgt76: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + ConfigMapOptional: + DownwardAPI: true +QoS Class: BestEffort +Node-Selectors: +Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 5m26s default-scheduler Successfully assigned default/moscow-time-post-install to minikube + Normal Pulling 5m25s kubelet Pulling image "busybox" + Normal Pulled 5m23s kubelet Successfully pulled image "busybox" in 1.49s (1.49s including waiting). Image size: 4042190 bytes. + Normal Created 5m23s kubelet Created container: post-install-job + Normal Started 5m23s kubelet Started container post-install-job +``` + +`kubectl get pods` + +```bash +kubectl get pods +NAME READY STATUS RESTARTS AGE +moscow-time-7bcf4d744f-fhbmt 1/1 Running 0 46h +moscow-time-7bcf4d744f-j7hqx 1/1 Running 0 46h +moscow-time-7bcf4d744f-lmzxq 1/1 Running 0 46h +moscow-time-post-install 0/1 Completed 0 46h +moscow-time-pre-install 0/1 Completed 0 46h +``` + +### Hook Delete Policy + +Add this to `hook.yaml`: + +```bash +"helm.sh/hook-delete-policy": hook-succeeded +``` + +## Helm Library Chart + +Create lib directory: + +```bash +helm create common-lib +``` + +```bash +cd moscow-time +helm dependency build +``` + +```bash +helm install moscow-time . --set labels.environment=production +``` + +Deploy second app: + +```bash +kubectl get pods +NAME READY STATUS RESTARTS AGE +moscow-time-74498bff5b-6ctjb 1/1 Running 0 108s +moscow-time-74498bff5b-jrgz6 1/1 Running 0 108s +moscow-time-74498bff5b-wdg6s 1/1 Running 0 108s +omsk-time-9474b67c6-qjpns 1/1 Running 0 3m14s +omsk-time-9474b67c6-rnw5s 1/1 Running 0 3m14s +omsk-time-9474b67c6-vnjp8 1/1 Running 0 3m14s + +kubectl get deployment moscow-time -o yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "1" + meta.helm.sh/release-name: moscow-time + meta.helm.sh/release-namespace: default + creationTimestamp: "2025-02-25T14:41:49Z" + generation: 1 + labels: + app.kubernetes.io/instance: moscow-time + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: moscow-time + app.kubernetes.io/version: "1.1" + helm.sh/chart: moscow-time-0.1.0 + name: moscow-time + namespace: default + resourceVersion: "112911" + uid: 8f4e3b68-224b-4d38-81eb-400a6c9bf8d9 +spec: + progressDeadlineSeconds: 600 + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/instance: moscow-time + app.kubernetes.io/name: moscow-time + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/instance: moscow-time + app.kubernetes.io/name: moscow-time + spec: + containers: + - image: ebob/moscow-time:v1.1 + imagePullPolicy: IfNotPresent + name: moscow-time + ports: + - containerPort: 80 + name: http + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 +status: + availableReplicas: 3 + conditions: + - lastTransitionTime: "2025-02-25T14:41:52Z" + lastUpdateTime: "2025-02-25T14:41:52Z" + message: Deployment has minimum availability. + reason: MinimumReplicasAvailable + status: "True" + type: Available + - lastTransitionTime: "2025-02-25T14:41:49Z" + lastUpdateTime: "2025-02-25T14:41:52Z" + message: ReplicaSet "moscow-time-74498bff5b" has successfully progressed. + reason: NewReplicaSetAvailable + status: "True" + type: Progressing + observedGeneration: 1 + readyReplicas: 3 + replicas: 3 + updatedReplicas: 3 +``` diff --git a/k8s/README.md b/k8s/README.md new file mode 100644 index 0000000000..29499e08de --- /dev/null +++ b/k8s/README.md @@ -0,0 +1,163 @@ +# Kubernetes ☸ + +## Setup and Basic Deployment + +Use `minikube` for local cluster: + +```bash +minikube start +``` + +Create a deployment and expose: + +```bash +ebob@laptop ~ % kubectl create deployment moscow-time --image=ebob/moscow-time:v1.1 +deployment.apps/moscow-time created +ebob@laptop ~ % kubectl expose deployment moscow-time --type=LoadBalancer --port=8080 +service/moscow-time exposed +ebob@laptop ~ % kubectl get pods,svc +NAME READY STATUS RESTARTS AGE +pod/moscow-time-849cb46c68-nvmcc 1/1 Running 0 27s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/kubernetes ClusterIP 10.96.0.1 443/TCP 72m +service/moscow-time LoadBalancer 10.101.142.76 127.0.0.1 8080:32533/TCP 6s +``` + +Verify availability: + +```bash +ebob@laptop ~ % curl 127.0.0.1:8080 +

Current time and date in Moscow

Time: 02:52:55

Date: 23.02.2025

+``` + +Cleanup: + +```bash +ebob@laptop ~ % kubectl delete deployment moscow-time +deployment.apps "moscow-time" deleted +ebob@laptop ~ % kubectl delete svc moscow-time +service "moscow-time" deleted +ebob@laptop ~ % kubectl get pods,svc +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/kubernetes ClusterIP 10.96.0.1 443/TCP 81m +``` + +## Declarative Kubernetes Manifests + +To apply manifests, run: + +```bash +kubectl apply -f deployment.yml +``` + +### `kubectl get pods,svc` + +```bash +ebob@laptop ~ % kubectl get pods,svc +NAME READY STATUS RESTARTS AGE +pod/moscow-time-db47bdf76-jnv9q 1/1 Running 0 5m27s +pod/moscow-time-db47bdf76-khrbk 1/1 Running 0 5m27s +pod/moscow-time-db47bdf76-sp4xc 1/1 Running 0 5m27s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/kubernetes ClusterIP 10.96.0.1 443/TCP 37m +service/moscow-time-service ClusterIP 10.110.66.123 80/TCP 5m22s +``` + +### `minikube service --all` + +```bash +ebob@laptop ~ % minikube service --all +|-----------|------------|-------------|--------------| +| NAMESPACE | NAME | TARGET PORT | URL | +|-----------|------------|-------------|--------------| +| default | kubernetes | | No node port | +|-----------|------------|-------------|--------------| +😿 service default/kubernetes has no node port +|-----------|---------------------|-------------|--------------| +| NAMESPACE | NAME | TARGET PORT | URL | +|-----------|---------------------|-------------|--------------| +| default | moscow-time-service | | No node port | +|-----------|---------------------|-------------|--------------| +😿 service default/moscow-time-service has no node port +❗ Services [default/kubernetes default/moscow-time-service] have type "ClusterIP" not meant to be exposed, however for local development minikube allows you to access this ! +🏃 Starting tunnel for service kubernetes. +🏃 Starting tunnel for service moscow-time-service. +|-----------|---------------------|-------------|------------------------| +| NAMESPACE | NAME | TARGET PORT | URL | +|-----------|---------------------|-------------|------------------------| +| default | kubernetes | | http://127.0.0.1:61906 | +| default | moscow-time-service | | http://127.0.0.1:61907 | +|-----------|---------------------|-------------|------------------------| +🎉 Opening service default/kubernetes in default browser... +🎉 Opening service default/moscow-time-service in default browser... +❗ Because you are using a Docker driver on darwin, the terminal needs to be open to run it. +``` + +Screenshot from browser: + +screenshot + +## Ruby App and Ingress + +Apply manifests for Ruby app: + +```bash +ebob@laptop ~ % kubectl get pods,svc +NAME READY STATUS RESTARTS AGE +pod/moscow-time-db47bdf76-jnv9q 1/1 Running 0 22m +pod/moscow-time-db47bdf76-khrbk 1/1 Running 0 22m +pod/moscow-time-db47bdf76-sp4xc 1/1 Running 0 22m +pod/omsk-time-6f8d56f4c4-2cnbt 1/1 Running 0 6m11s +pod/omsk-time-6f8d56f4c4-t9vmq 1/1 Running 0 6m11s +pod/omsk-time-6f8d56f4c4-z6426 1/1 Running 0 6m11s + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/kubernetes ClusterIP 10.96.0.1 443/TCP 53m +service/moscow-time-service ClusterIP 10.110.66.123 80/TCP 21m +service/omsk-time-service ClusterIP 10.97.58.80 80/TCP 6m3s +``` + +Set up Nginx Ingress Controller: + +Run: + +```bash +minikube addons enable ingress +``` + +Verify: + +```bash +ebob@laptop ~ % kubectl get deployments -n ingress-nginx +NAME READY UP-TO-DATE AVAILABLE AGE +ingress-nginx-controller 1/1 1 1 59m +``` + +Apply `ingress.yml` manifest: + +```bash +ebob@laptop ~ % kubectl get ingress +NAME CLASS HOSTS ADDRESS PORTS AGE +application-ingress nginx moscow-time.local,omsk-time.local 192.168.49.2 80 13m +``` + +Add these lines to `/etc/hosts`: + +```bash +127.0.0.1 moscow-time.local +127.0.0.1 omsk-time.local +``` + +Then run `curl`: + +```bash +ebob@laptop ~ % curl http://moscow-time.local/ +

Current time and date in Moscow

Time: 02:31:36

Date: 23.02.2025

+ +ebob@laptop ~ % curl http://omsk-time.local/ +Current time in Omsk: 2025-02-23 05:32:22 +``` + +ingress-screenshot diff --git a/k8s/app_python/deployment.yml b/k8s/app_python/deployment.yml new file mode 100644 index 0000000000..b1070776c3 --- /dev/null +++ b/k8s/app_python/deployment.yml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: moscow-time + labels: + app: moscow-time +spec: + replicas: 3 + selector: + matchLabels: + app: moscow-time + template: + metadata: + labels: + app: moscow-time + spec: + containers: + - name: moscow-time + image: ebob/moscow-time:v1.1 + ports: + - containerPort: 8080 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" diff --git a/k8s/app_python/service.yml b/k8s/app_python/service.yml new file mode 100644 index 0000000000..b311bb71a3 --- /dev/null +++ b/k8s/app_python/service.yml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: moscow-time-service +spec: + selector: + app: moscow-time + ports: + - protocol: TCP + port: 80 + targetPort: 8080 + type: ClusterIP diff --git a/k8s/app_ruby/deployment.yml b/k8s/app_ruby/deployment.yml new file mode 100644 index 0000000000..9552e0ced6 --- /dev/null +++ b/k8s/app_ruby/deployment.yml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: omsk-time + labels: + app: omsk-time +spec: + replicas: 3 + selector: + matchLabels: + app: omsk-time + template: + metadata: + labels: + app: omsk-time + spec: + containers: + - name: omsk-time + image: ebob/omsk-time:v1.1 + ports: + - containerPort: 4567 + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" diff --git a/k8s/app_ruby/service.yml b/k8s/app_ruby/service.yml new file mode 100644 index 0000000000..1d880c99a3 --- /dev/null +++ b/k8s/app_ruby/service.yml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: omsk-time-service +spec: + selector: + app: omsk-time + ports: + - protocol: TCP + port: 80 + targetPort: 4567 + type: ClusterIP diff --git a/k8s/common-lib/.helmignore b/k8s/common-lib/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/k8s/common-lib/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/k8s/common-lib/Chart.yaml b/k8s/common-lib/Chart.yaml new file mode 100644 index 0000000000..a55d1447cc --- /dev/null +++ b/k8s/common-lib/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: common-lib +description: A Helm library chart for common templates +type: library +version: 0.1.0 diff --git a/k8s/common-lib/templates/NOTES.txt b/k8s/common-lib/templates/NOTES.txt new file mode 100644 index 0000000000..e4b54f944c --- /dev/null +++ b/k8s/common-lib/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common-lib.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "common-lib.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common-lib.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "common-lib.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/k8s/common-lib/templates/_helpers.tpl b/k8s/common-lib/templates/_helpers.tpl new file mode 100644 index 0000000000..4bf947b40e --- /dev/null +++ b/k8s/common-lib/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "common-lib.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common-lib.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common-lib.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "common-lib.labels" -}} +helm.sh/chart: {{ include "common-lib.chart" . }} +{{ include "common-lib.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "common-lib.selectorLabels" -}} +app.kubernetes.io/name: {{ include "common-lib.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "common-lib.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "common-lib.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/k8s/common-lib/templates/_labels.tpl b/k8s/common-lib/templates/_labels.tpl new file mode 100644 index 0000000000..e72bddeded --- /dev/null +++ b/k8s/common-lib/templates/_labels.tpl @@ -0,0 +1,15 @@ +{{- define "common-lib.labels" -}} +app.kubernetes.io/name: {{ .Release.Name }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} +{{- if .Values.labels }} +{{ toYaml .Values.labels }} +{{- end }} +{{- end }} + +{{- define "common-lib.selectorLabels" -}} +app.kubernetes.io/name: {{ .Release.Name }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/k8s/common-lib/templates/deployment.yaml b/k8s/common-lib/templates/deployment.yaml new file mode 100644 index 0000000000..0b6cd1178a --- /dev/null +++ b/k8s/common-lib/templates/deployment.yaml @@ -0,0 +1,68 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "common-lib.fullname" . }} + labels: + {{- include "common-lib.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "common-lib.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "common-lib.labels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "common-lib.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.volumes }} + volumes: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/k8s/common-lib/templates/hpa.yaml b/k8s/common-lib/templates/hpa.yaml new file mode 100644 index 0000000000..df76b1807d --- /dev/null +++ b/k8s/common-lib/templates/hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "common-lib.fullname" . }} + labels: + {{- include "common-lib.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "common-lib.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/k8s/common-lib/templates/ingress.yaml b/k8s/common-lib/templates/ingress.yaml new file mode 100644 index 0000000000..21c24eb16e --- /dev/null +++ b/k8s/common-lib/templates/ingress.yaml @@ -0,0 +1,43 @@ +{{- if .Values.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "common-lib.fullname" . }} + labels: + {{- include "common-lib.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- with .Values.ingress.className }} + ingressClassName: {{ . }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- with .pathType }} + pathType: {{ . }} + {{- end }} + backend: + service: + name: {{ include "common-lib.fullname" $ }} + port: + number: {{ $.Values.service.port }} + {{- end }} + {{- end }} +{{- end }} diff --git a/k8s/common-lib/templates/service.yaml b/k8s/common-lib/templates/service.yaml new file mode 100644 index 0000000000..5a562a6cc5 --- /dev/null +++ b/k8s/common-lib/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common-lib.fullname" . }} + labels: + {{- include "common-lib.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "common-lib.selectorLabels" . | nindent 4 }} diff --git a/k8s/common-lib/templates/serviceaccount.yaml b/k8s/common-lib/templates/serviceaccount.yaml new file mode 100644 index 0000000000..c898ed2cce --- /dev/null +++ b/k8s/common-lib/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "common-lib.serviceAccountName" . }} + labels: + {{- include "common-lib.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automount }} +{{- end }} diff --git a/k8s/common-lib/templates/tests/test-connection.yaml b/k8s/common-lib/templates/tests/test-connection.yaml new file mode 100644 index 0000000000..603173fa0b --- /dev/null +++ b/k8s/common-lib/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "common-lib.fullname" . }}-test-connection" + labels: + {{- include "common-lib.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "common-lib.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/k8s/common-lib/values.yaml b/k8s/common-lib/values.yaml new file mode 100644 index 0000000000..28fd12ee22 --- /dev/null +++ b/k8s/common-lib/values.yaml @@ -0,0 +1 @@ +labels: {} diff --git a/k8s/ingress.yml b/k8s/ingress.yml new file mode 100644 index 0000000000..6b466b58d6 --- /dev/null +++ b/k8s/ingress.yml @@ -0,0 +1,29 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: application-ingress + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / +spec: + ingressClassName: nginx + rules: + - host: moscow-time.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: moscow-time-service + port: + number: 80 + - host: omsk-time.local + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: omsk-time-service + port: + number: 80 diff --git a/k8s/moscow-time/.helmignore b/k8s/moscow-time/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/k8s/moscow-time/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/k8s/moscow-time/Chart.lock b/k8s/moscow-time/Chart.lock new file mode 100644 index 0000000000..cfc298908e --- /dev/null +++ b/k8s/moscow-time/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common-lib + repository: file://../common-lib + version: 0.1.0 +digest: sha256:20073f8787800aa68dec8f48b8c4ee0c196f0d6ee2eba090164f5a9478995895 +generated: "2025-02-25T17:27:05.753535+03:00" diff --git a/k8s/moscow-time/Chart.yaml b/k8s/moscow-time/Chart.yaml new file mode 100644 index 0000000000..3a54d1b8d2 --- /dev/null +++ b/k8s/moscow-time/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: moscow-time +description: A Helm chart for Moscow Time application +type: application +version: 0.1.0 +appVersion: "1.1" +dependencies: + - name: common-lib + version: 0.1.0 + repository: file://../common-lib diff --git a/k8s/moscow-time/charts/common-lib-0.1.0.tgz b/k8s/moscow-time/charts/common-lib-0.1.0.tgz new file mode 100644 index 0000000000..d8d97feaec Binary files /dev/null and b/k8s/moscow-time/charts/common-lib-0.1.0.tgz differ diff --git a/k8s/moscow-time/files/config.json b/k8s/moscow-time/files/config.json new file mode 100644 index 0000000000..9fc24573c5 --- /dev/null +++ b/k8s/moscow-time/files/config.json @@ -0,0 +1,4 @@ +{ + "name": "Evgeny", + "surname": "Bobkunov" +} diff --git a/k8s/moscow-time/patch.yaml b/k8s/moscow-time/patch.yaml new file mode 100644 index 0000000000..55997d4fb9 --- /dev/null +++ b/k8s/moscow-time/patch.yaml @@ -0,0 +1,12 @@ +spec: + template: + metadata: + annotations: + vault.hashicorp.com/agent-inject: 'true' + vault.hashicorp.com/agent-inject-status: 'update' + vault.hashicorp.com/role: 'internal-app' + vault.hashicorp.com/agent-inject-secret-database-config.txt: 'internal/data/database/config' + vault.hashicorp.com/agent-inject-template-database-config.txt: | + {{- with secret "internal/data/database/config" -}} + postgresql://{{ .Data.data.username }}:{{ .Data.data.password }}@postgres:5432/wizard + {{- end -}} diff --git a/k8s/moscow-time/secrets.yaml b/k8s/moscow-time/secrets.yaml new file mode 100644 index 0000000000..da51a156c4 --- /dev/null +++ b/k8s/moscow-time/secrets.yaml @@ -0,0 +1,24 @@ +password: ENC[AES256_GCM,data:MXV0R4F8nz/1,iv:b8rNpdpecTz3R+C2MUC+mAgw352Qof5rl6uXNfbYnsc=,tag:OPHxjzNnJ0o2uxkDE1Qy4Q==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: [] + lastmodified: "2025-03-04T21:04:26Z" + mac: ENC[AES256_GCM,data:11EN5IVAzkgvCPpDVVP9caBKvj9hFDSiJml7FcuvjsNtK5Y+Fu+2aoXED1DBcY251n7xjc5P/VxfeKqvG0YNFi9nqdrojT8aMLTmgwHoGLN9ncA1GjDukpCe4V74zHA/xlbsi00LA3qyYqRJJ2jXfDqg+OsiX4/nY4Y27DXOtB8=,iv:rslrj8P32i+4vf8sekhqv4VsEmgP85mjhFonnOS1K2g=,tag:EiOCeOO3lWid2o3hCzbAhA==,type:str] + pgp: + - created_at: "2025-03-04T21:03:21Z" + enc: |- + -----BEGIN PGP MESSAGE----- + + hF4DYGL4POjMwEASAQdAHeIb1FiB8B/1J+l/N+7AryL0UHpGwN1PNTOaPKRKl3Iw + 5VDcAy+pqQkWAaNDNfwaI0vkxdK5VcX3rHNrMbYMK2hKBtQI5kW/uWdRDJwgmWPJ + 1GgBCQIQPjSc51z/WGBMIgaCZ+oOdE7/4ckaZuUNmkE6wv6LoC/GzhP16j33aAH9 + eyaDqVJMOV6PkCh7tz1CsLB6NScSjfmePaRyfC9+YYg9cDRwUd+zVlPy358cpy2r + pdIzVkeuxOkXOw== + =eR1P + -----END PGP MESSAGE----- + fp: 5E122B618402A7BD8EE1DBDAE1E7C1D76CD61BA9 + unencrypted_suffix: _unencrypted + version: 3.9.4 diff --git a/k8s/moscow-time/templates/_helpers.tpl b/k8s/moscow-time/templates/_helpers.tpl new file mode 100644 index 0000000000..52197bc781 --- /dev/null +++ b/k8s/moscow-time/templates/_helpers.tpl @@ -0,0 +1,67 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "moscow-time.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "moscow-time.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "moscow-time.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "moscow-time.labels" -}} +helm.sh/chart: {{ include "moscow-time.chart" . }} +{{ include "moscow-time.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "moscow-time.selectorLabels" -}} +app.kubernetes.io/name: {{ include "moscow-time.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "moscow-time.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "moscow-time.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{- define "common.env" }} +- name: ENVIRONMENT + value: {{ .Values.env.ENVIRONMENT | quote }} +{{ end }} diff --git a/k8s/moscow-time/templates/configmap.yaml b/k8s/moscow-time/templates/configmap.yaml new file mode 100644 index 0000000000..beff9404b0 --- /dev/null +++ b/k8s/moscow-time/templates/configmap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-config +data: + config.json: |- +{{ .Files.Get "files/config.json" | indent 4 }} diff --git a/k8s/moscow-time/templates/deployment.yaml b/k8s/moscow-time/templates/deployment.yaml new file mode 100644 index 0000000000..39e5bc0020 --- /dev/null +++ b/k8s/moscow-time/templates/deployment.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }} + labels: + {{- include "common-lib.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "common-lib.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "common-lib.selectorLabels" . | nindent 8 }} + spec: + serviceAccountName: {{ include "moscow-time.serviceAccountName" . }} + {{- with .Values.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + volumeMounts: + - name: config-volume + mountPath: /app/config + readOnly: true + env: + {{ include "common.env" . | nindent 12 }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + + volumes: + - name: config-volume + configMap: + name: {{ .Release.Name }}-config diff --git a/k8s/moscow-time/templates/ingress.yaml b/k8s/moscow-time/templates/ingress.yaml new file mode 100644 index 0000000000..db8f7fff51 --- /dev/null +++ b/k8s/moscow-time/templates/ingress.yaml @@ -0,0 +1,29 @@ +{{- if .Values.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "moscow-time.fullname" . }} + labels: + {{- include "moscow-time.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + ingressClassName: {{ .Values.ingress.className }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + pathType: {{ .pathType }} + backend: + service: + name: {{ include "moscow-time.fullname" $ }} + port: + number: {{ $.Values.service.port }} + {{- end }} + {{- end }} +{{- end }} diff --git a/k8s/moscow-time/templates/post-install-hook.yaml b/k8s/moscow-time/templates/post-install-hook.yaml new file mode 100644 index 0000000000..bbecc071c5 --- /dev/null +++ b/k8s/moscow-time/templates/post-install-hook.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "moscow-time.fullname" . }}-post-install + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-weight": "5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + restartPolicy: Never + containers: + - name: post-install-job + image: busybox + command: ['sh', '-c', 'echo "Starting post-install hook"; sleep 20; echo "Post-install hook completed"'] diff --git a/k8s/moscow-time/templates/pre-install-hook.yaml b/k8s/moscow-time/templates/pre-install-hook.yaml new file mode 100644 index 0000000000..dd76c671ec --- /dev/null +++ b/k8s/moscow-time/templates/pre-install-hook.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "moscow-time.fullname" . }}-pre-install + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + restartPolicy: Never + containers: + - name: pre-install-job + image: busybox + command: ['sh', '-c', 'echo "Starting pre-install hook"; sleep 20; echo "Pre-install hook completed"'] diff --git a/k8s/moscow-time/templates/secrets.yaml b/k8s/moscow-time/templates/secrets.yaml new file mode 100644 index 0000000000..d00c4a4a7f --- /dev/null +++ b/k8s/moscow-time/templates/secrets.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-secret +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} diff --git a/k8s/moscow-time/templates/service.yaml b/k8s/moscow-time/templates/service.yaml new file mode 100644 index 0000000000..cfa0e24d6b --- /dev/null +++ b/k8s/moscow-time/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "moscow-time.fullname" . }} + labels: + {{- include "moscow-time.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + {{- include "moscow-time.selectorLabels" . | nindent 4 }} diff --git a/k8s/moscow-time/templates/serviceaccount.yaml b/k8s/moscow-time/templates/serviceaccount.yaml new file mode 100644 index 0000000000..e3884c424b --- /dev/null +++ b/k8s/moscow-time/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "moscow-time.serviceAccountName" . }} + labels: + {{- include "moscow-time.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automount }} +{{- end }} diff --git a/k8s/moscow-time/templates/tests/test-connection.yaml b/k8s/moscow-time/templates/tests/test-connection.yaml new file mode 100644 index 0000000000..ef927d8620 --- /dev/null +++ b/k8s/moscow-time/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "moscow-time.fullname" . }}-test-connection" + labels: + {{- include "moscow-time.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "moscow-time.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/k8s/moscow-time/values.yaml b/k8s/moscow-time/values.yaml new file mode 100644 index 0000000000..b505d29b93 --- /dev/null +++ b/k8s/moscow-time/values.yaml @@ -0,0 +1,70 @@ +replicaCount: 3 + +image: + repository: ebob/moscow-time + tag: "v1.1" + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + create: true + automount: true + annotations: {} + name: "internal-app" + +podAnnotations: { + vault.hashicorp.com/agent-inject: 'true', + vault.hashicorp.com/role: 'internal-app', + vault.hashicorp.com/agent-inject-secret-database-config.txt: 'internal/data/database/config' +} + +podLabels: {} + +podSecurityContext: {} + +securityContext: {} + +service: + type: ClusterIP + port: 80 + targetPort: 8080 + +ingress: + enabled: true + className: nginx + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / + hosts: + - host: moscow-time.local + paths: + - path: / + pathType: Prefix + +resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 250m + memory: 64Mi + +livenessProbe: + httpGet: + path: / + port: http +readinessProbe: + httpGet: + path: / + port: http + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + +env: + ENVIRONMENT: stage diff --git a/k8s/omsk-time/.helmignore b/k8s/omsk-time/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/k8s/omsk-time/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/k8s/omsk-time/Chart.lock b/k8s/omsk-time/Chart.lock new file mode 100644 index 0000000000..b19ae141e6 --- /dev/null +++ b/k8s/omsk-time/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common-lib + repository: file://../common-lib + version: 0.1.0 +digest: sha256:20073f8787800aa68dec8f48b8c4ee0c196f0d6ee2eba090164f5a9478995895 +generated: "2025-02-25T17:34:00.331806+03:00" diff --git a/k8s/omsk-time/Chart.yaml b/k8s/omsk-time/Chart.yaml new file mode 100644 index 0000000000..1f964debf2 --- /dev/null +++ b/k8s/omsk-time/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: omsk-time +description: A Helm chart for Omsk Time application +type: application +version: 0.1.0 +appVersion: "1.1" +dependencies: + - name: common-lib + version: 0.1.0 + repository: file://../common-lib diff --git a/k8s/omsk-time/charts/common-lib-0.1.0.tgz b/k8s/omsk-time/charts/common-lib-0.1.0.tgz new file mode 100644 index 0000000000..013675364e Binary files /dev/null and b/k8s/omsk-time/charts/common-lib-0.1.0.tgz differ diff --git a/k8s/omsk-time/templates/_helpers.tpl b/k8s/omsk-time/templates/_helpers.tpl new file mode 100644 index 0000000000..31e37b951f --- /dev/null +++ b/k8s/omsk-time/templates/_helpers.tpl @@ -0,0 +1,67 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "omsk-time.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "omsk-time.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "omsk-time.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "omsk-time.labels" -}} +helm.sh/chart: {{ include "omsk-time.chart" . }} +{{ include "omsk-time.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "omsk-time.selectorLabels" -}} +app.kubernetes.io/name: {{ include "omsk-time.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "omsk-time.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "omsk-time.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{- define "common.env" }} +- name: ENVIRONMENT + value: {{ .Values.env.ENVIRONMENT | quote }} +{{ end }} diff --git a/k8s/omsk-time/templates/configmap.yaml b/k8s/omsk-time/templates/configmap.yaml new file mode 100644 index 0000000000..ba642264a9 --- /dev/null +++ b/k8s/omsk-time/templates/configmap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "omsk-time.fullname" . }}-config +data: + TIMEZONE: "Asia/Omsk" + APP_MODE: "production" diff --git a/k8s/omsk-time/templates/deployment.yaml b/k8s/omsk-time/templates/deployment.yaml new file mode 100644 index 0000000000..e5243735dc --- /dev/null +++ b/k8s/omsk-time/templates/deployment.yaml @@ -0,0 +1,27 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }} + labels: + {{- include "common-lib.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "common-lib.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "common-lib.selectorLabels" . | nindent 8 }} + spec: + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + envFrom: + - configMapRef: + name: {{ include "omsk-time.fullname" . }}-config + env: + {{ include "common.env" . | nindent 12 }} + ports: + - name: http + containerPort: {{ .Values.service.port }} diff --git a/k8s/omsk-time/templates/ingress.yaml b/k8s/omsk-time/templates/ingress.yaml new file mode 100644 index 0000000000..c0cc0d10e1 --- /dev/null +++ b/k8s/omsk-time/templates/ingress.yaml @@ -0,0 +1,29 @@ +{{- if .Values.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "omsk-time.fullname" . }} + labels: + {{- include "omsk-time.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + ingressClassName: {{ .Values.ingress.className }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + pathType: {{ .pathType }} + backend: + service: + name: {{ include "omsk-time.fullname" $ }} + port: + number: {{ $.Values.service.port }} + {{- end }} + {{- end }} +{{- end }} diff --git a/k8s/omsk-time/templates/post-install-hook.yaml b/k8s/omsk-time/templates/post-install-hook.yaml new file mode 100644 index 0000000000..f1d24963d8 --- /dev/null +++ b/k8s/omsk-time/templates/post-install-hook.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "omsk-time.fullname" . }}-post-install + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-weight": "5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + restartPolicy: Never + containers: + - name: post-install-job + image: busybox + command: ['sh', '-c', 'echo "Starting post-install hook"; sleep 20; echo "Post-install hook completed"'] diff --git a/k8s/omsk-time/templates/pre-install-hook.yaml b/k8s/omsk-time/templates/pre-install-hook.yaml new file mode 100644 index 0000000000..e1a044ed26 --- /dev/null +++ b/k8s/omsk-time/templates/pre-install-hook.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "omsk-time.fullname" . }}-pre-install + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + restartPolicy: Never + containers: + - name: pre-install-job + image: busybox + command: ['sh', '-c', 'echo "Starting pre-install hook"; sleep 20; echo "Pre-install hook completed"'] diff --git a/k8s/omsk-time/templates/service.yaml b/k8s/omsk-time/templates/service.yaml new file mode 100644 index 0000000000..f72f0f3c2a --- /dev/null +++ b/k8s/omsk-time/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "omsk-time.fullname" . }} + labels: + {{- include "omsk-time.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: http + selector: + {{- include "omsk-time.selectorLabels" . | nindent 4 }} diff --git a/k8s/omsk-time/templates/tests/test-connection.yaml b/k8s/omsk-time/templates/tests/test-connection.yaml new file mode 100644 index 0000000000..c0dd09a5b7 --- /dev/null +++ b/k8s/omsk-time/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "omsk-time.fullname" . }}-test-connection" + labels: + {{- include "omsk-time.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "omsk-time.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/k8s/omsk-time/values.yaml b/k8s/omsk-time/values.yaml new file mode 100644 index 0000000000..5ea83f851f --- /dev/null +++ b/k8s/omsk-time/values.yaml @@ -0,0 +1,51 @@ +replicaCount: 3 + +image: + repository: ebob/omsk-time + tag: "v1.1" + pullPolicy: IfNotPresent + +nameOverride: "" +fullnameOverride: "" + +service: + type: ClusterIP + port: 80 + targetPort: 4567 + +ingress: + enabled: true + className: nginx + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / + hosts: + - host: omsk-time.local + paths: + - path: / + pathType: Prefix + +resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 250m + memory: 64Mi + +livenessProbe: + httpGet: + path: / + port: http +readinessProbe: + httpGet: + path: / + port: http + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + +env: + ENVIRONMENT: stage diff --git a/lab16/index.html b/lab16/index.html index acce39eee3..a705f20e94 100644 --- a/lab16/index.html +++ b/lab16/index.html @@ -300,4 +300,4 @@

Learning Progression

- \ No newline at end of file + diff --git a/monitoring/LOGGING.md b/monitoring/LOGGING.md new file mode 100644 index 0000000000..601e5459cb --- /dev/null +++ b/monitoring/LOGGING.md @@ -0,0 +1,62 @@ +# Logging Stack Overview + +## Components + +![image](https://github.com/user-attachments/assets/bc6da33c-deb2-4f36-ac62-452cf59e4c4a) + +### Loki image + +**Role:** Loki is a log aggregation system designed for efficiency and cost-effectiveness. It stores logs and provides a query interface for retrieving them. + +**Configuration:** + +- Runs as a Docker container on port `3100`. +- Stores log data in the `loki-data` volume. +- Uses a configuration file `/etc/loki/local-config.yaml`. + +### Promtail image + +**Role:** Promtail is a log shipper that collects logs from Docker containers and forwards them to Loki. + +**Configuration:** + +- Mounts `/var/run/docker.sock` to access Docker logs. +- Uses `promtail-config.yml` to define log scraping rules. +- Configured to auto-discover Docker containers and extract labels (`app`, `logging=true`, etc.). +- Sends logs to Loki at `http://loki:3100/loki/api/v1/push`. + +### Grafana image + +**Role:** Grafana is a visualization tool that provides a user interface for querying and analyzing logs stored in Loki. + +**Configuration:** + +- Runs on port `3000`. +- Uses `grafana-datasources.yml` to configure Loki as the default data source. +- Authentication is managed via environment variables (`admin/admin`). +- Stores dashboard configurations in `grafana-data` volume. + +### Monitored Applications + +Two applications (`moscow-time-app` and `omsk-time-app`) are running in the logging stack: + +- These applications are labeled with `logging=true` to indicate that their logs should be collected. +- Logs are scraped by Promtail and sent to Loki. + +Снимок экрана 2025-02-21 в 20 36 23 + +Снимок экрана 2025-02-21 в 20 30 13 + +## Network and Storage + +- All services communicate over the `monitoring` network (Docker bridge). +- Persistent volumes (`loki-data`, `grafana-data`) are used to store logs and dashboards. + +## Data Flow + +1. **Log Generation:** Applications generate logs. +2. **Log Collection:** Promtail scrapes logs from Docker containers. +3. **Log Storage:** Logs are sent to Loki for storage and indexing. +4. **Log Visualization:** Grafana queries Loki and visualizes logs in dashboards. + +This setup ensures efficient log aggregation, storage, and visualization, providing insights into application behavior and system health. diff --git a/monitoring/METRICS.md b/monitoring/METRICS.md new file mode 100644 index 0000000000..0e06064092 --- /dev/null +++ b/monitoring/METRICS.md @@ -0,0 +1,115 @@ +# Metrics + +## Prometheus Targets + +Set up targets for Loki and Prometheus containers: + +prom-targets + +## Grafana Dashboards + +Снимок экрана 2025-02-21 в 22 22 36 + +Снимок экрана 2025-02-21 в 22 23 03 + +## Log Rotation + +To prevent excessive log growth, all services now use **log rotation**: + +- Maximum log file size: **10MB** +- Maximum number of log files: **3** + +Configuration in `docker-compose.yml`: + +```yaml +logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" +``` + +## Memory Limits + +Each service now has defined **memory usage limits** to improve system stability: + +- **Loki, Grafana, Prometheus:** `512MB` +- **Promtail, Moscow-time-app, Omsk-time-app:** `256MB` + +Configuration: + +```yaml +deploy: + resources: + limits: + memory: 512M +``` + +## Prometheus Metrics Collection + +Prometheus is configured to scrape metrics from all services. The updated `prometheus.yml` includes: + +```yaml +global: + scrape_interval: 15s + +scrape_configs: + - job_name: "prometheus" + static_configs: + - targets: ["prometheus:9090"] + + - job_name: "loki" + static_configs: + - targets: ["loki:3100"] + + - job_name: "grafana" + metrics_path: "/metrics" + static_configs: + - targets: ["grafana:3000"] + + - job_name: "moscow-time-app" + metrics_path: "/metrics" + static_configs: + - targets: ["moscow-time-app:8080"] + + - job_name: "omsk-time-app" + metrics_path: "/metrics" + static_configs: + - targets: ["omsk-time-app:4567"] +``` + +## Application Metrics + +### Python app + +Снимок экрана 2025-02-22 в 01 49 45 + +### Ruby app + +Снимок экрана 2025-02-22 в 01 50 22 + +## Targets + +![image](https://github.com/user-attachments/assets/ff368f9e-bba2-41d7-8c95-091a42d5fd33) + +## Health Checks + +```bash +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +76500886e596 grafana/grafana:latest "/run.sh" 2 minutes ago Up 2 minutes (healthy) 0.0.0.0:3000->3000/tcp monitoring-grafana-1 +1221174d217a grafana/promtail:latest "/usr/bin/promtail -…" 2 minutes ago Up 2 minutes monitoring-promtail-1 +2601ad899374 ebob/moscow-time:v1.1 "gunicorn -w 4 -b 0.…" 2 minutes ago Up 2 minutes (healthy) 0.0.0.0:8080->8080/tcp moscow-time-app +c88b7b114f7a ebob/omsk-time:v1.1 "ruby app.rb" 2 minutes ago Up 2 minutes (healthy) 0.0.0.0:8081->4567/tcp omsk-time-app +452a99ba7497 prom/prometheus:latest "/bin/prometheus --c…" 2 minutes ago Up 2 minutes (healthy) 0.0.0.0:9090->9090/tcp monitoring-prometheus-1 +818f468a006a grafana/loki:latest "/usr/bin/loki -conf…" 2 minutes ago Up 2 minutes (healthy) 0.0.0.0:3100->3100/tcp monitoring-loki-1 +``` + +Modify docker-compose.yml file with: + +```yml +healthcheck: + test: ["CMD", "wget", "-q", "--spider", "http://0.0.0.0:8080"] + interval: 30s + timeout: 10s + retries: 3 +``` diff --git a/monitoring/docker-compose.yml b/monitoring/docker-compose.yml new file mode 100644 index 0000000000..22f123ffc9 --- /dev/null +++ b/monitoring/docker-compose.yml @@ -0,0 +1,162 @@ +--- +services: + loki: + image: grafana/loki:latest + ports: + - "3100:3100" + volumes: + - loki-data:/loki + command: -config.file=/etc/loki/local-config.yaml + networks: + - monitoring + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + deploy: + resources: + limits: + memory: 512M + healthcheck: + test: ["CMD", "wget", "-q", "--spider", "http://localhost:3100/ready"] + interval: 30s + timeout: 10s + retries: 3 + + promtail: + image: grafana/promtail:latest + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /var/lib/docker/containers:/var/lib/docker/containers + - ./promtail-config.yml:/etc/promtail/config.yml + command: -config.file=/etc/promtail/config.yml + depends_on: + - loki + networks: + - monitoring + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + deploy: + resources: + limits: + memory: 256M + + grafana: + image: grafana/grafana:latest + ports: + - "3000:3000" + environment: + - GF_SECURITY_ADMIN_USER=admin + - GF_SECURITY_ADMIN_PASSWORD=admin + - GF_USERS_ALLOW_SIGN_UP=false + volumes: + - grafana-data:/var/lib/grafana + - ./grafana-datasources.yml:/etc/grafana/provisioning/datasources/datasources.yml + depends_on: + - loki + - prometheus + networks: + - monitoring + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + deploy: + resources: + limits: + memory: 512M + healthcheck: + test: ["CMD", "wget", "-q", "--spider", "http://localhost:3000/api/health"] + interval: 30s + timeout: 10s + retries: 3 + + prometheus: + image: prom/prometheus:latest + ports: + - "9090:9090" + volumes: + - ./prometheus.yml:/etc/prometheus/prometheus.yml + command: + - "--config.file=/etc/prometheus/prometheus.yml" + networks: + - monitoring + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + deploy: + resources: + limits: + memory: 512M + healthcheck: + test: ["CMD", "wget", "-q", "--spider", "http://localhost:9090/-/healthy"] + interval: 30s + timeout: 10s + retries: 3 + + moscow-time-app: + image: ebob/moscow-time:v1.1 + ports: + - "8080:8080" + container_name: moscow-time-app + restart: unless-stopped + networks: + - monitoring + labels: + - "logging=true" + - "app=moscow-time" + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + deploy: + resources: + limits: + memory: 256M + healthcheck: + test: ["CMD", "wget", "-q", "--spider", "http://0.0.0.0:8080"] + interval: 30s + timeout: 10s + retries: 3 + + omsk-time-app: + image: ebob/omsk-time:v1.1 + ports: + - "8081:4567" + container_name: omsk-time-app + restart: unless-stopped + networks: + - monitoring + labels: + - "logging=true" + - "app=omsk-time" + logging: + driver: "json-file" + options: + max-size: "10m" + max-file: "3" + deploy: + resources: + limits: + memory: 256M + healthcheck: + test: ["CMD", "wget", "-q", "--spider", "http://0.0.0.0:4567"] + interval: 30s + timeout: 10s + retries: 3 + +networks: + monitoring: + driver: bridge + +volumes: + loki-data: + grafana-data: diff --git a/monitoring/grafana-datasources.yml b/monitoring/grafana-datasources.yml new file mode 100644 index 0000000000..8942b0a77e --- /dev/null +++ b/monitoring/grafana-datasources.yml @@ -0,0 +1,18 @@ +--- + +apiVersion: 1 + +datasources: + - name: Loki + type: loki + access: proxy + url: http://loki:3100 + isDefault: false + editable: false + + - name: Prometheus + type: prometheus + access: proxy + url: http://prometheus:9090 + isDefault: true + editable: false diff --git a/monitoring/prometheus.yml b/monitoring/prometheus.yml new file mode 100644 index 0000000000..5f53ba5283 --- /dev/null +++ b/monitoring/prometheus.yml @@ -0,0 +1,28 @@ +--- + +global: + scrape_interval: 15s + +scrape_configs: + - job_name: "prometheus" + static_configs: + - targets: ["prometheus:9090"] + + - job_name: "loki" + static_configs: + - targets: ["loki:3100"] + + - job_name: "grafana" + metrics_path: "/metrics" + static_configs: + - targets: ["grafana:3000"] + + - job_name: "moscow-time-app" + metrics_path: "/metrics" + static_configs: + - targets: ["moscow-time-app:8080"] + + - job_name: "omsk-time-app" + metrics_path: "/metrics" + static_configs: + - targets: ["omsk-time-app:4567"] diff --git a/monitoring/promtail-config.yml b/monitoring/promtail-config.yml new file mode 100644 index 0000000000..2bea83ff08 --- /dev/null +++ b/monitoring/promtail-config.yml @@ -0,0 +1,24 @@ +--- + +server: + http_listen_port: 9080 + +positions: + filename: /tmp/positions.yaml + +clients: + - url: http://loki:3100/loki/api/v1/push + +scrape_configs: + - job_name: docker + docker_sd_configs: + - host: unix:///var/run/docker.sock + refresh_interval: 5s + relabel_configs: + - source_labels: ['__meta_docker_container_name'] + regex: '/(.*)' + target_label: 'container' + - source_labels: ['__meta_docker_container_log_stream'] + target_label: 'logstream' + - source_labels: ['__meta_docker_container_label_com_docker_compose_service'] + target_label: 'service' diff --git a/terraform/.gitignore b/terraform/.gitignore new file mode 100644 index 0000000000..ecac1dfa7b --- /dev/null +++ b/terraform/.gitignore @@ -0,0 +1,5 @@ +.terraform/ +*.tfstate +*.tfstate.backup +*.tfvars +.terraform.lock.hcl diff --git a/terraform/TF.md b/terraform/TF.md new file mode 100644 index 0000000000..afb1e7ec1b --- /dev/null +++ b/terraform/TF.md @@ -0,0 +1,722 @@ +# Terraform + +## Best Practicies + +* Use `.gitignore` in order not to push sensitive data from `terraform.tfstate` file (It is better to keep it in your S3 bucket). + +* Use `terraform fmt` and `terraform validate` to format and check correctness of your code. + +* It is better to review changes with `terraform plan` first before agree on `terraform apply` + +* Use `variables.tf` in order not to hardcode variables and mark sensitive with flag `sensitive = true` + +* Specify exact versions in `terraform required_providers` + +* Define variables in `.tfvars` or environment variables, but do not push them in repo + +## Docker Infrastructure Using Terraform + +### `terraform state list` command output + +```bash +ebob@laptop docker_terraform % terraform state list + +docker_container.app_python_container +docker_container.app_ruby_container +``` + +### `terraform state show ` command output + +```bash +ebob@laptop docker_terraform % terraform state show docker_container.app_python_container +# docker_container.app_python_container: +resource "docker_container" "app_python_container" { + attach = false + bridge = null + command = [ + "gunicorn", + "-w", + "4", + "-b", + "0.0.0.0:8080", + "app:app", + ] + container_read_refresh_timeout_milliseconds = 15000 + cpu_set = null + cpu_shares = 0 + domainname = null + entrypoint = [] + env = [] + hostname = "49cc30c669fa" + id = "49cc30c669fa8a39fab6ee8f89f43c269dd1a245a6f5629dafc7fa478dc69292" + image = "sha256:6dbe2f8b0f5e842457c6d2a4df1cae14e8f07dde54194a3b67fa6671be7d8d3b" + init = false + ipc_mode = "private" + log_driver = "json-file" + logs = false + max_retry_count = 0 + memory = 0 + memory_swap = 0 + must_run = true + name = "msk" + network_data = [ + { + gateway = "172.17.0.1" + global_ipv6_address = null + global_ipv6_prefix_length = 0 + ip_address = "172.17.0.2" + ip_prefix_length = 16 + ipv6_gateway = null + mac_address = "02:42:ac:11:00:02" + network_name = "bridge" + }, + ] + network_mode = "bridge" + pid_mode = null + privileged = false + publish_all_ports = false + read_only = false + remove_volumes = true + restart = "no" + rm = false + runtime = "runc" + security_opts = [] + shm_size = 64 + start = true + stdin_open = false + stop_signal = null + stop_timeout = 0 + tty = false + user = "appuser" + userns_mode = null + wait = false + wait_timeout = 60 + working_dir = "/app" + + ports { + external = 8080 + internal = 8080 + ip = "0.0.0.0" + protocol = "tcp" + } +} +``` + +### `terraform output` command + +```bash +ebob@laptop docker_terraform % terraform output +container_id_python = "49cc30c669fa8a39fab6ee8f89f43c269dd1a245a6f5629dafc7fa478dc69292" +container_id_ruby = "053cc71da7897e90ef78158ff045377e56e1d228b340788a16ade2c91f49460c" +container_image_python = "ebob/moscow-time:v1.0" +container_image_ruby = "ebob/omsk-time:v1.0" +container_name_python = "msk" +container_name_ruby = "omsk" +container_port_python = tolist([ + { + "external" = 8080 + "internal" = 8080 + "ip" = "0.0.0.0" + "protocol" = "tcp" + }, +]) +container_port_ruby = tolist([ + { + "external" = 8081 + "internal" = 4567 + "ip" = "0.0.0.0" + "protocol" = "tcp" + }, +]) +``` + +## Yandex Cloud Using Terraform + +### Getting started + +First of all, read [official guide from Yandex Cloud about Terraform](https://yandex.cloud/en-ru/docs/tutorials/infrastructure-management/terraform-quickstart). Then, visit [Yandex Cloud Provider on Terraform Registry](https://registry.terraform.io/providers/yandex-cloud/yandex/latest/docs). After that, we can start by creating service account and getting [IAM token](https://yandex.cloud/en-ru/docs/iam/operations/iam-token/create-for-sa). + +### `terraform plan` + +```bash +ebob@laptop yandex_cloud_terraform % terraform plan +var.cloud_id + Yandex Cloud ID + + Enter a value: + +var.folder_id + Yandex Folder ID + + Enter a value: + +var.iam_token + Enter a value: + + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # yandex_compute_disk.disk-1 will be created + + resource "yandex_compute_disk" "disk-1" { + + block_size = 4096 + + created_at = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + image_id = "fd8k2ed4jspu35gfde1u" + + name = "disk-1" + + product_ids = (known after apply) + + size = 20 + + status = (known after apply) + + type = "network-hdd" + + zone = "ru-central1-b" + + + disk_placement_policy (known after apply) + + + hardware_generation (known after apply) + } + + # yandex_compute_instance.vm-1 will be created + + resource "yandex_compute_instance" "vm-1" { + + created_at = (known after apply) + + folder_id = (known after apply) + + fqdn = (known after apply) + + gpu_cluster_id = (known after apply) + + hardware_generation = (known after apply) + + hostname = "vm-1" + + id = (known after apply) + + maintenance_grace_period = (known after apply) + + maintenance_policy = (known after apply) + + metadata = { + + "ssh-keys" = (sensitive value) + } + + name = "vm-1" + + network_acceleration_type = "standard" + + platform_id = "standard-v2" + + service_account_id = (known after apply) + + status = (known after apply) + + zone = "ru-central1-b" + + + boot_disk { + + auto_delete = true + + device_name = (known after apply) + + disk_id = (known after apply) + + mode = (known after apply) + + + initialize_params (known after apply) + } + + + metadata_options (known after apply) + + + network_interface { + + index = (known after apply) + + ip_address = (known after apply) + + ipv4 = true + + ipv6 = (known after apply) + + ipv6_address = (known after apply) + + mac_address = (known after apply) + + nat = true + + nat_ip_address = (known after apply) + + nat_ip_version = (known after apply) + + security_group_ids = (known after apply) + + subnet_id = (known after apply) + } + + + placement_policy (known after apply) + + + resources { + + core_fraction = 20 + + cores = 2 + + memory = 2 + } + + + scheduling_policy { + + preemptible = true + } + } + + # yandex_vpc_network.network-1 will be created + + resource "yandex_vpc_network" "network-1" { + + created_at = (known after apply) + + default_security_group_id = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + labels = (known after apply) + + name = "network-1" + + subnet_ids = (known after apply) + } + + # yandex_vpc_subnet.subnet-1 will be created + + resource "yandex_vpc_subnet" "subnet-1" { + + created_at = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + labels = (known after apply) + + name = "subnet-1" + + network_id = (known after apply) + + v4_cidr_blocks = [ + + "192.168.1.0/24", + ] + + v6_cidr_blocks = (known after apply) + + zone = "ru-central1-b" + } + +Plan: 4 to add, 0 to change, 0 to destroy. +``` + +### `terraform apply` + +```bash +ebob@laptop yandex_cloud_terraform % terraform apply +var.cloud_id + Yandex Cloud ID + + Enter a value: + +var.folder_id + Yandex Folder ID + + Enter a value: + +var.iam_token + Enter a value: + + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # yandex_compute_disk.disk-1 will be created + + resource "yandex_compute_disk" "disk-1" { + + block_size = 4096 + + created_at = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + image_id = "fd8k2ed4jspu35gfde1u" + + name = "disk-1" + + product_ids = (known after apply) + + size = 20 + + status = (known after apply) + + type = "network-hdd" + + zone = "ru-central1-b" + + + disk_placement_policy (known after apply) + + + hardware_generation (known after apply) + } + + # yandex_compute_instance.vm-1 will be created + + resource "yandex_compute_instance" "vm-1" { + + created_at = (known after apply) + + folder_id = (known after apply) + + fqdn = (known after apply) + + gpu_cluster_id = (known after apply) + + hardware_generation = (known after apply) + + hostname = "vm-1" + + id = (known after apply) + + maintenance_grace_period = (known after apply) + + maintenance_policy = (known after apply) + + metadata = { + + "ssh-keys" = (sensitive value) + } + + name = "vm-1" + + network_acceleration_type = "standard" + + platform_id = "standard-v2" + + service_account_id = (known after apply) + + status = (known after apply) + + zone = "ru-central1-b" + + + boot_disk { + + auto_delete = true + + device_name = (known after apply) + + disk_id = (known after apply) + + mode = (known after apply) + + + initialize_params (known after apply) + } + + + metadata_options (known after apply) + + + network_interface { + + index = (known after apply) + + ip_address = (known after apply) + + ipv4 = true + + ipv6 = (known after apply) + + ipv6_address = (known after apply) + + mac_address = (known after apply) + + nat = true + + nat_ip_address = (known after apply) + + nat_ip_version = (known after apply) + + security_group_ids = (known after apply) + + subnet_id = (known after apply) + } + + + placement_policy (known after apply) + + + resources { + + core_fraction = 20 + + cores = 2 + + memory = 2 + } + + + scheduling_policy { + + preemptible = true + } + } + + # yandex_vpc_network.network-1 will be created + + resource "yandex_vpc_network" "network-1" { + + created_at = (known after apply) + + default_security_group_id = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + labels = (known after apply) + + name = "network-1" + + subnet_ids = (known after apply) + } + + # yandex_vpc_subnet.subnet-1 will be created + + resource "yandex_vpc_subnet" "subnet-1" { + + created_at = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + labels = (known after apply) + + name = "subnet-1" + + network_id = (known after apply) + + v4_cidr_blocks = [ + + "192.168.1.0/24", + ] + + v6_cidr_blocks = (known after apply) + + zone = "ru-central1-b" + } + +Plan: 4 to add, 0 to change, 0 to destroy. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +yandex_vpc_network.network-1: Creating... +yandex_compute_disk.disk-1: Creating... +yandex_vpc_network.network-1: Creation complete after 5s [id=enp5ntrp4t5tvdbp0052] +yandex_vpc_subnet.subnet-1: Creating... +yandex_vpc_subnet.subnet-1: Creation complete after 0s [id=e2l1hqpsrv83hq97m16t] +yandex_compute_disk.disk-1: Still creating... [10s elapsed] +yandex_compute_disk.disk-1: Creation complete after 12s [id=epdh7buhebqifudk67p4] +yandex_compute_instance.vm-1: Creating... +yandex_compute_instance.vm-1: Still creating... [10s elapsed] +yandex_compute_instance.vm-1: Still creating... [20s elapsed] +yandex_compute_instance.vm-1: Still creating... [30s elapsed] +yandex_compute_instance.vm-1: Still creating... [40s elapsed] +yandex_compute_instance.vm-1: Creation complete after 43s [id=epd6avjtflh4nqkrg2an] + +Apply complete! Resources: 4 added, 0 changed, 0 destroyed. +``` + +### `terraform state list` + +```bash +ebob@laptop yandex_cloud_terraform % terraform state list +yandex_compute_disk.disk-1 +yandex_compute_instance.vm-1 +yandex_vpc_network.network-1 +yandex_vpc_subnet.subnet-1 +``` + +### `terraform state show` + +#### `yandex_vpc_network.network-1` + +```bash +ebob@laptop yandex_cloud_terraform % terraform state show yandex_vpc_network.network-1 +# yandex_vpc_network.network-1: +resource "yandex_vpc_network" "network-1" { + created_at = "2025-02-03T20:35:24Z" + default_security_group_id = "enpno7pvi66b7gepf4sr" + description = null + folder_id = "b1ghr0ljvdknal1p1q6g" + id = "enp5ntrp4t5tvdbp0052" + labels = {} + name = "network-1" + subnet_ids = [] +} +``` + +#### `yandex_vpc_subnet.subnet-1` + +```bash +ebob@laptop yandex_cloud_terraform % terraform state show yandex_vpc_subnet.subnet-1 +# yandex_vpc_subnet.subnet-1: +resource "yandex_vpc_subnet" "subnet-1" { + created_at = "2025-02-03T20:35:27Z" + description = null + folder_id = "b1ghr0ljvdknal1p1q6g" + id = "e2l1hqpsrv83hq97m16t" + labels = {} + name = "subnet-1" + network_id = "enp5ntrp4t5tvdbp0052" + route_table_id = null + v4_cidr_blocks = [ + "192.168.1.0/24", + ] + v6_cidr_blocks = [] + zone = "ru-central1-b" +} +``` + +#### `yandex_compute_disk.disk-1` + +```bash +ebob@laptop yandex_cloud_terraform % terraform state show yandex_compute_disk.disk-1 +# yandex_compute_disk.disk-1: +resource "yandex_compute_disk" "disk-1" { + block_size = 4096 + created_at = "2025-02-03T20:35:24Z" + description = null + folder_id = "b1ghr0ljvdknal1p1q6g" + id = "epdh7buhebqifudk67p4" + image_id = "fd8k2ed4jspu35gfde1u" + name = "disk-1" + product_ids = [ + "f2ekpu3f32a5gg9e40kq", + ] + size = 20 + snapshot_id = null + status = "ready" + type = "network-hdd" + zone = "ru-central1-b" + + disk_placement_policy { + disk_placement_group_id = null + } + + hardware_generation { + legacy_features { + pci_topology = "PCI_TOPOLOGY_V1" + } + } +} +``` + +#### `yandex_compute_instance.vm-1` + +```bash +ebob@laptop yandex_cloud_terraform % terraform state show yandex_compute_instance.vm-1 +# yandex_compute_instance.vm-1: +resource "yandex_compute_instance" "vm-1" { + created_at = "2025-02-03T20:35:35Z" + description = null + folder_id = "b1ghr0ljvdknal1p1q6g" + fqdn = "vm-1.ru-central1.internal" + gpu_cluster_id = null + hardware_generation = [ + { + generation2_features = [] + legacy_features = [ + { + pci_topology = "PCI_TOPOLOGY_V1" + }, + ] + }, + ] + hostname = "vm-1" + id = "epd6avjtflh4nqkrg2an" + maintenance_grace_period = null + metadata = { + "ssh-keys" = (sensitive value) + } + name = "vm-1" + network_acceleration_type = "standard" + platform_id = "standard-v2" + service_account_id = null + status = "running" + zone = "ru-central1-b" + + boot_disk { + auto_delete = true + device_name = "epdh7buhebqifudk67p4" + disk_id = "epdh7buhebqifudk67p4" + mode = "READ_WRITE" + + initialize_params { + block_size = 4096 + description = null + image_id = "fd8k2ed4jspu35gfde1u" + kms_key_id = null + name = "disk-1" + size = 20 + snapshot_id = null + type = "network-hdd" + } + } + + metadata_options { + aws_v1_http_endpoint = 1 + aws_v1_http_token = 2 + gce_http_endpoint = 1 + gce_http_token = 1 + } + + network_interface { + index = 0 + ip_address = "192.168.1.29" + ipv4 = true + ipv6 = false + ipv6_address = null + mac_address = "d0:0d:65:7e:7d:7d" + nat = true + nat_ip_address = "84.201.163.253" + nat_ip_version = "IPV4" + security_group_ids = [] + subnet_id = "e2l1hqpsrv83hq97m16t" + } + + placement_policy { + host_affinity_rules = [] + placement_group_id = null + placement_group_partition = 0 + } + + resources { + core_fraction = 20 + cores = 2 + gpus = 0 + memory = 2 + } + + scheduling_policy { + preemptible = true + } +} +``` + +Yandex Cloud VM + +## GitHub Terraform + +### Import with `terraform import` + +```bash +ebob@laptop github_terraform % terraform import "github_repository.repo" "devops-labs" +var.github_token + GitHub personal access token + + Enter a value: + +github_repository.repo: Importing from ID "devops-labs"... +github_repository.repo: Import prepared! + Prepared github_repository for import +github_repository.repo: Refreshing state... [id=devops-labs] + +Import successful! + +The resources that were imported are shown above. These resources are now in +your Terraform state and will henceforth be managed by Terraform. +``` + +### Apply changes + +```bash +ebob@laptop github_terraform % terraform apply +var.github_token + GitHub personal access token + + Enter a value: + +github_repository.repo: Refreshing state... [id=devops-labs] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + ~ update in-place + +Terraform will perform the following actions: + + # github_branch_default.master will be created + + resource "github_branch_default" "master" { + + branch = "master" + + etag = (known after apply) + + id = (known after apply) + + rename = false + + repository = "devops-labs" + } + + # github_branch_protection.master will be created + + resource "github_branch_protection" "master" { + + allows_deletions = false + + allows_force_pushes = false + + enforce_admins = true + + id = (known after apply) + + lock_branch = false + + pattern = "master" + + repository_id = "R_kgDONuYNyA" + + require_conversation_resolution = false + + require_signed_commits = false + + required_linear_history = false + + + required_pull_request_reviews { + + dismiss_stale_reviews = true + + require_code_owner_reviews = true + + require_last_push_approval = false + + required_approving_review_count = 1 + } + + + required_status_checks { + + strict = true + } + } + + # github_repository.repo will be updated in-place + ~ resource "github_repository" "repo" { + + description = "Innopolis University DevOps Course Labs" + ~ has_projects = true -> false + ~ has_wiki = true -> false + id = "devops-labs" + name = "devops-labs" + # (33 unchanged attributes hidden) + + # (1 unchanged block hidden) + } + +Plan: 2 to add, 1 to change, 0 to destroy. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +github_repository.repo: Modifying... [id=devops-labs] +github_repository.repo: Modifications complete after 2s [id=devops-labs] +github_branch_default.master: Creating... +github_branch_protection.master: Creating... +github_branch_default.master: Creation complete after 3s [id=devops-labs] +github_branch_protection.master: Creation complete after 5s [id=BPR_kwDONuYNyM4DiLyy] + +Apply complete! Resources: 2 added, 1 changed, 0 destroyed. +``` + +## GitHub Teams Terraform + +Organization: [Bobkunov](https://github.com/Bobkunov) + +Repo: [phoenix-project](https://github.com/Bobkunov/phoenix-project) + +Teams: [Developers, DevOps, QA](https://github.com/orgs/Bobkunov/teams) + +```bash +ebob@laptop github_teams_terraform % terraform state list +github_branch_default.main +github_branch_protection.repo_protection +github_repository.repo +github_team.developers +github_team.devops +github_team.qa +github_team_repository.developers_access +github_team_repository.devops_access +github_team_repository.qa_access +``` diff --git a/terraform/docker_terraform/main.tf b/terraform/docker_terraform/main.tf new file mode 100644 index 0000000000..e74aa80731 --- /dev/null +++ b/terraform/docker_terraform/main.tf @@ -0,0 +1,28 @@ +terraform { + required_providers { + docker = { + source = "kreuzwerker/docker" + version = "~> 3.0.2" + } + } +} + +provider "docker" {} + +resource "docker_container" "app_python_container" { + image = var.docker_image_python + name = var.container_name_python + ports { + internal = var.internal_port_python + external = var.external_port_python + } +} + +resource "docker_container" "app_ruby_container" { + image = var.docker_image_ruby + name = var.container_name_ruby + ports { + internal = var.internal_port_ruby + external = var.external_port_ruby + } +} diff --git a/terraform/docker_terraform/outputs.tf b/terraform/docker_terraform/outputs.tf new file mode 100644 index 0000000000..0a5062d427 --- /dev/null +++ b/terraform/docker_terraform/outputs.tf @@ -0,0 +1,31 @@ +output "container_name_python" { + value = docker_container.app_python_container.name +} + +output "container_id_python" { + value = docker_container.app_python_container.id +} + +output "container_image_python" { + value = docker_container.app_python_container.image +} + +output "container_port_python" { + value = docker_container.app_python_container.ports +} + +output "container_name_ruby" { + value = docker_container.app_ruby_container.name +} + +output "container_id_ruby" { + value = docker_container.app_ruby_container.id +} + +output "container_image_ruby" { + value = docker_container.app_ruby_container.image +} + +output "container_port_ruby" { + value = docker_container.app_ruby_container.ports +} diff --git a/terraform/docker_terraform/variables.tf b/terraform/docker_terraform/variables.tf new file mode 100644 index 0000000000..7f75edb92d --- /dev/null +++ b/terraform/docker_terraform/variables.tf @@ -0,0 +1,47 @@ +variable "container_name_python" { + description = "Docker container name for Python app" + type = string + default = "msk" +} + +variable "docker_image_python" { + description = "Docker image for Python app" + type = string + default = "ebob/moscow-time:v1.0" +} + +variable "internal_port_python" { + description = "Internal port for Python app" + type = number + default = 8080 +} + +variable "external_port_python" { + description = "External port for Python app" + type = number + default = 8080 +} + +variable "container_name_ruby" { + description = "Docker container name for Ruby app" + type = string + default = "omsk" +} + +variable "docker_image_ruby" { + description = "Docker image for Ruby app" + type = string + default = "ebob/omsk-time:v1.0" +} + +variable "internal_port_ruby" { + description = "Internal port for Ruby app" + type = number + default = 4567 +} + +variable "external_port_ruby" { + description = "External port for Ruby app" + type = number + default = 8081 +} diff --git a/terraform/github_teams_terraform/main.tf b/terraform/github_teams_terraform/main.tf new file mode 100644 index 0000000000..a3282b0441 --- /dev/null +++ b/terraform/github_teams_terraform/main.tf @@ -0,0 +1,79 @@ +terraform { + required_providers { + github = { + source = "integrations/github" + version = "~> 6.5.0" + } + } +} + +provider "github" { + owner = var.github_organization + token = var.github_token +} + + +resource "github_team" "developers" { + name = "Development Team" + description = "We write code" + privacy = "closed" + parent_team_id = null +} + +resource "github_team" "devops" { + name = "DevOps Team" + description = "We deploy code" + privacy = "closed" + parent_team_id = null +} + +resource "github_team" "qa" { + name = "QA Team" + description = "We test code" + privacy = "closed" + parent_team_id = null +} + +resource "github_repository" "repo" { + name = "phoenix-project" + description = "The Phoenix Project" + visibility = "public" + has_issues = true + has_wiki = true + auto_init = true + license_template = "mit" +} + +resource "github_branch_default" "main" { + repository = github_repository.repo.name + branch = "main" +} + +resource "github_branch_protection" "repo_protection" { + repository_id = github_repository.repo.id + pattern = github_branch_default.main.branch + require_conversation_resolution = true + enforce_admins = true + + required_pull_request_reviews { + required_approving_review_count = 1 + } +} + +resource "github_team_repository" "devops_access" { + team_id = github_team.devops.id + repository = github_repository.repo.name + permission = "admin" +} + +resource "github_team_repository" "developers_access" { + team_id = github_team.developers.id + repository = github_repository.repo.name + permission = "push" +} + +resource "github_team_repository" "qa_access" { + team_id = github_team.qa.id + repository = github_repository.repo.name + permission = "pull" +} diff --git a/terraform/github_teams_terraform/variables.tf b/terraform/github_teams_terraform/variables.tf new file mode 100644 index 0000000000..654e58a400 --- /dev/null +++ b/terraform/github_teams_terraform/variables.tf @@ -0,0 +1,11 @@ +variable "github_token" { + type = string + description = "GitHub token" + sensitive = true +} + +variable "github_organization" { + type = string + description = "Organization" + default = "Bobkunov" +} diff --git a/terraform/github_terraform/main.tf b/terraform/github_terraform/main.tf new file mode 100644 index 0000000000..ee61049209 --- /dev/null +++ b/terraform/github_terraform/main.tf @@ -0,0 +1,41 @@ +terraform { + required_providers { + github = { + source = "integrations/github" + version = "~> 6.5.0" + } + } +} + +provider "github" { + token = var.github_token +} + +resource "github_repository" "repo" { + name = var.repository_name + description = var.repository_description + visibility = var.repository_visibility + has_downloads = var.has_downloads + has_issues = var.has_issues + has_wiki = var.has_wiki + has_projects = var.has_projects +} + +resource "github_branch_default" "master" { + repository = github_repository.repo.name + branch = var.default_branch +} + +resource "github_branch_protection" "master" { + repository_id = github_repository.repo.node_id + pattern = var.default_branch + required_status_checks { + strict = var.strict + contexts = [] + } + enforce_admins = var.enforce_admins + required_pull_request_reviews { + dismiss_stale_reviews = var.dismiss_stale_reviews + require_code_owner_reviews = var.require_code_owner_reviews + } +} diff --git a/terraform/github_terraform/variables.tf b/terraform/github_terraform/variables.tf new file mode 100644 index 0000000000..4c6970ce57 --- /dev/null +++ b/terraform/github_terraform/variables.tf @@ -0,0 +1,77 @@ +variable "github_token" { + description = "GitHub personal access token" + type = string + sensitive = true +} + +variable "repository_name" { + default = "devops-labs" + description = "GitHub repository name" + type = string +} + +variable "repository_description" { + default = "Innopolis University DevOps Course Labs" + description = "GitHub repository description" + type = string +} + +variable "repository_visibility" { + default = "public" + description = "GitHub repository visibility" + type = string +} + +variable "has_downloads" { + default = true + description = "Enable GitHub downloads" + type = bool +} + +variable "has_issues" { + default = false + description = "Enable GitHub issues" + type = bool +} + +variable "has_wiki" { + default = false + description = "Enable GitHub wiki" + type = bool +} + +variable "has_projects" { + default = false + description = "Enable GitHub projects" + type = bool +} + +variable "default_branch" { + default = "master" + description = "GitHub default branch" + type = string +} + +variable "strict" { + default = false + description = "Require branches to be up to date before merging" + type = bool +} + +variable "enforce_admins" { + default = false + description = "Enforce all configured restrictions for administrators" + type = bool +} + +variable "dismiss_stale_reviews" { + default = false + description = "Dismiss approved reviews when someone pushes a new commit" + type = bool +} + +variable "require_code_owner_reviews" { + default = false + description = "Require an approved review in pull requests including files with a designated code owner" + type = bool +} diff --git a/terraform/yandex_cloud_terraform/main.tf b/terraform/yandex_cloud_terraform/main.tf new file mode 100644 index 0000000000..e7eaefbc00 --- /dev/null +++ b/terraform/yandex_cloud_terraform/main.tf @@ -0,0 +1,65 @@ +terraform { + required_providers { + yandex = { + source = "yandex-cloud/yandex" + version = "0.136.0" + } + } +} + +provider "yandex" { + zone = var.zone + token = var.iam_token + cloud_id = var.cloud_id + folder_id = var.folder_id +} + +resource "yandex_compute_instance" "vm-1" { + name = var.vm_name + platform_id = var.platform_id + zone = var.zone + hostname = var.hostname + + resources { + cores = var.cores + core_fraction = var.core_fraction + memory = var.memory + } + + scheduling_policy { + preemptible = var.preemptible + } + + boot_disk { + disk_id = yandex_compute_disk.disk-1.id + } + + network_interface { + subnet_id = yandex_vpc_subnet.subnet-1.id + nat = var.nat + } + + metadata = { + "ssh-keys" = format("%s:%s", var.vm_username, file(var.ssh_pubkey_path)) + } + +} + +resource "yandex_compute_disk" "disk-1" { + name = var.disk_name + zone = var.zone + size = var.disk_size + type = var.disk_type + image_id = var.image_id +} + +resource "yandex_vpc_network" "network-1" { + name = var.network_name +} + +resource "yandex_vpc_subnet" "subnet-1" { + name = var.subnet_name + zone = var.zone + network_id = yandex_vpc_network.network-1.id + v4_cidr_blocks = ["192.168.1.0/24"] +} diff --git a/terraform/yandex_cloud_terraform/variables.tf b/terraform/yandex_cloud_terraform/variables.tf new file mode 100644 index 0000000000..48ec36dadc --- /dev/null +++ b/terraform/yandex_cloud_terraform/variables.tf @@ -0,0 +1,120 @@ +variable "iam_token" { + type = string + sensitive = true +} + +variable "cloud_id" { + description = "Yandex Cloud ID" + type = string + sensitive = true +} + +variable "folder_id" { + description = "Yandex Folder ID" + type = string + sensitive = true +} + +variable "zone" { + description = "Yandex Cloud availability zone" + type = string + default = "ru-central1-b" +} + +variable "image_id" { + description = "ID image" + type = string + default = "fd8k2ed4jspu35gfde1u" +} + +variable "vm_name" { + description = "Name of the virtual machine" + type = string + default = "vm-1" +} + +variable "platform_id" { + description = "ID of the platform" + type = string + default = "standard-v2" +} + +variable "hostname" { + description = "Hostname of the virtual machine" + type = string + default = "vm-1" +} + +variable "cores" { + description = "Number of CPU cores" + type = number + default = 2 +} + +variable "core_fraction" { + description = "CPU core fraction" + type = number + default = 20 +} + +variable "memory" { + description = "Amount of memory in GB" + type = number + default = 2 +} + +variable "preemptible" { + description = "Preemptible instance" + type = bool + default = true +} + +variable "disk_name" { + description = "Name of the disk" + type = string + default = "disk-1" +} + +variable "disk_size" { + description = "Size of the disk in GB" + type = number + default = 20 +} + +variable "disk_type" { + description = "Type of the disk" + type = string + default = "network-hdd" +} + +variable "network_name" { + description = "Name of the network" + type = string + default = "network-1" +} + +variable "subnet_name" { + description = "Name of the subnet" + type = string + default = "subnet-1" +} + +variable "nat" { + description = "Enable NAT" + type = bool + default = true +} + +variable "vm_username" { + description = "Username for SSH access" + type = string + default = "ubuntu" + sensitive = true +} + +variable "ssh_pubkey_path" { + description = "Path to the SSH public key" + type = string + default = "~/.ssh/id_rsa.pub" + sensitive = true +}