diff --git a/.github/workflows/app_python.yml b/.github/workflows/app_python.yml new file mode 100644 index 0000000000..768d8a4cd3 --- /dev/null +++ b/.github/workflows/app_python.yml @@ -0,0 +1,127 @@ +name: CI for app_python + +on: + push: + paths: + - 'app_python/**' + - '.github/workflows/app_python.yml' + pull_request: + paths: + - 'app_python/**' + - '.github/workflows/app_python.yml' + +defaults: + run: + working-directory: app_python + +jobs: + lint_and_format: + timeout-minutes: 2 + runs-on: ubuntu-22.04 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.13' + cache: 'pip' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install pylint + + - name: Run code formatter (black) + uses: psf/black@stable + with: + options: "--check --diff" + src: "./app_python" + + - name: Run linter (pylint) + run: pylint app.py --disable=R,C + + test: + timeout-minutes: 2 + runs-on: ubuntu-22.04 + needs: lint_and_format + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.13' + cache: 'pip' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install pytest requests + + - name: Run tests + run: pytest test_app.py + + security_scan: + timeout-minutes: 5 + runs-on: ubuntu-22.04 + needs: [lint_and_format, test] + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run Snyk to check for vulnerabilities + uses: snyk/actions/python-3.10@master + with: + args: --skip-unresolved app_python/ + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + + + docker_build_and_push: + timeout-minutes: 10 + runs-on: ubuntu-22.04 + needs: [lint_and_format, test, security_scan] + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Get short commit hash + id: commit + run: echo "SHORT_COMMIT_HASH=$(echo ${GITHUB_SHA} | cut -c1-7)" >> $GITHUB_ENV + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + platforms: linux/amd64,linux/arm64,linux/arm/v7 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + registry: ghcr.io + + - name: Log in to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + registry: docker.io + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: "{{defaultContext}}:app_python" + push: true + tags: | + ghcr.io/${{ github.repository_owner }}/moscow-time:${{ env.SHORT_COMMIT_HASH }} + ${{ secrets.DOCKERHUB_USERNAME }}/moscow-time:${{ env.SHORT_COMMIT_HASH }} + platforms: linux/amd64,linux/arm64,linux/arm/v7 + cache-from: type=gha,type=registry,ref=ghcr.io/${{ github.repository_owner }}/moscow-time:buildcache + cache-to: type=gha,mode=max,type=registry,ref=ghcr.io/${{ github.repository_owner }}/moscow-time:buildcache,mode=max diff --git a/.github/workflows/app_ruby.yml b/.github/workflows/app_ruby.yml new file mode 100644 index 0000000000..afe3ccdb92 --- /dev/null +++ b/.github/workflows/app_ruby.yml @@ -0,0 +1,119 @@ +name: CI for app_ruby + +on: + push: + paths: + - 'app_ruby/**' + - '.github/workflows/app_ruby.yml' + pull_request: + paths: + - 'app_ruby/**' + - '.github/workflows/app_ruby.yml' + +defaults: + run: + working-directory: app_ruby + +jobs: + lint_and_format: + timeout-minutes: 2 + runs-on: ubuntu-22.04 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: '3.4' + bundler-cache: true + + - name: Install dependencies + run: | + gem install bundler + bundle install + + - name: Run RuboCop + run: bundle exec rubocop -A + + test: + timeout-minutes: 2 + runs-on: ubuntu-22.04 + needs: lint_and_format + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: '3.4' + bundler-cache: true + + - name: Install dependencies + run: | + gem install bundler + bundle install + + - name: Run tests + run: bundle exec rspec + + security_scan: + timeout-minutes: 5 + runs-on: ubuntu-22.04 + needs: [lint_and_format, test] + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run Snyk to check for vulnerabilities + uses: snyk/actions/ruby@master + continue-on-error: true + with: + args: --skip-unresolved app_ruby/ + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + + docker_build_and_push: + timeout-minutes: 10 + runs-on: ubuntu-22.04 + needs: [lint_and_format, test, security_scan] + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Get short commit hash + id: commit + run: echo "SHORT_COMMIT_HASH=$(echo ${GITHUB_SHA} | cut -c1-7)" >> $GITHUB_ENV + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + platforms: linux/amd64,linux/arm64,linux/arm/v7 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + registry: ghcr.io + + - name: Log in to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + registry: docker.io + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: "{{defaultContext}}:app_ruby" + push: true + tags: | + ghcr.io/${{ github.repository_owner }}/omsk-time:${{ env.SHORT_COMMIT_HASH }} + ${{ secrets.DOCKERHUB_USERNAME }}/omsk-time:${{ env.SHORT_COMMIT_HASH }} + platforms: linux/amd64,linux/arm64,linux/arm/v7 + cache-from: type=gha,type=registry,ref=ghcr.io/${{ github.repository_owner }}/omsk-time:buildcache + cache-to: type=gha,mode=max,type=registry,ref=ghcr.io/${{ github.repository_owner }}/omsk-time:buildcache,mode=max diff --git a/ansible/.gitignore b/ansible/.gitignore new file mode 100644 index 0000000000..cab52b5c29 --- /dev/null +++ b/ansible/.gitignore @@ -0,0 +1,4 @@ +fact_cache/ +__pycache__/ +.ansible/ +yacloud_token diff --git a/ansible/ANSIBLE.md b/ansible/ANSIBLE.md new file mode 100644 index 0000000000..15ea0201d8 --- /dev/null +++ b/ansible/ANSIBLE.md @@ -0,0 +1,252 @@ +# Ansible + +I used *Yandex Cloud Compute VM* that I created with `Terraform` as targets to run my playbooks. + +## Best practices + +- Properly structured Ansible project +- Use Dynamic Inventory for Cloud Environments +- Write Idempotent Playbooks +- Use Handlers for Service Restarts +- Write Reusable Roles +- Use `fact_caching` +- Test Playbooks Before Running on Production + - Use `ansible-lint` to check for best practices. + - Use `--check` mode to preview changes (`ansible-playbook main.yml --check`) +- Use `loop` instead of duplicating tasks + +## Execute playbook to deploy the Docker role + +```bash +ebob@laptop ansible % ansible-playbook playbooks/dev/main.yml -i inventory/yacloud_compute.yml --diff --check + +PLAY [Install and configure Docker] ********************************************************************************** + +TASK [Gathering Facts] *********************************************************************************************** +ok: [vm-1] + +TASK [docker : Install Docker] *************************************************************************************** +included: /Users/ebob/Code/devops-labs/ansible/roles/docker/tasks/install_docker.yml for vm-1 + +TASK [docker : Update apt package index] ***************************************************************************** +changed: [vm-1] + +TASK [docker : Install required system packages] ********************************************************************* +ok: [vm-1] => (item=apt-transport-https) +ok: [vm-1] => (item=ca-certificates) +ok: [vm-1] => (item=curl) +ok: [vm-1] => (item=gnupg-agent) +ok: [vm-1] => (item=software-properties-common) + +TASK [docker : Add Docker's official GPG key] ************************************************************************ +ok: [vm-1] + +TASK [docker : Add Docker's official apt repository] ***************************************************************** +ok: [vm-1] + +TASK [docker : Install Docker and dependencies] ********************************************************************** +ok: [vm-1] + +TASK [docker : Add user to docker group] ***************************************************************************** +ok: [vm-1] + +TASK [docker : Enable Docker service to start on boot] *************************************************************** +ok: [vm-1] + +TASK [docker : Install Docker Compose] ******************************************************************************* +included: /Users/ebob/Code/devops-labs/ansible/roles/docker/tasks/install_docker_compose.yml for vm-1 + +TASK [docker : Install Docker Compose] ******************************************************************************* +ok: [vm-1] + +PLAY RECAP *********************************************************************************************************** +vm-1 : ok=11 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + +WARNING: All log messages before absl::InitializeLog() is called are written to STDERR +E0000 00:00:1739210586.456952 1547116 init.cc:232] grpc_wait_for_shutdown_with_timeout() timed out. +``` + +## Inventory Details + +### `ansible-inventory -i .yaml --list` + +```bash +venvebob@laptop ansible % ansible-inventory -i inventory/yacloud_compute.yml --list | tail -n 50 + + }, + "ansible_user_gecos": { + "__ansible_unsafe": "Ubuntu" + }, + "ansible_user_gid": 1000, + "ansible_user_id": { + "__ansible_unsafe": "ubuntu" + }, + "ansible_user_shell": { + "__ansible_unsafe": "/bin/bash" + }, + "ansible_user_uid": 1000, + "ansible_userspace_architecture": { + "__ansible_unsafe": "x86_64" + }, + "ansible_userspace_bits": { + "__ansible_unsafe": "64" + }, + "ansible_virtualization_role": { + "__ansible_unsafe": "NA" + }, + "ansible_virtualization_tech_guest": [], + "ansible_virtualization_tech_host": [], + "ansible_virtualization_type": { + "__ansible_unsafe": "NA" + }, + "discovered_interpreter_python": { + "__ansible_unsafe": "/usr/bin/python3.12" + }, + "gather_subset": [ + { + "__ansible_unsafe": "all" + } + ], + "module_setup": true + } + } + }, + "all": { + "children": [ + "ungrouped", + "yacloud" + ] + }, + "yacloud": { + "hosts": [ + "vm-1" + ] + } +} +``` + +### `ansible-inventory -i .yaml --graph` + +```bash +ebob@laptop ansible % ansible-inventory -i inventory/yacloud_compute.yml --graph +@all: + |--@ungrouped: + |--@yacloud: + | |--vm-1 +``` + +## Application Deployment + +### Deploy `app_python` + +`ansible-playbook playbooks/dev/app_python/main.yml -i inventory/yacloud_compute.yml` + +```shell +ebob@laptop ansible % ansible-playbook playbooks/dev/app_python/main.yml -i inventory/yacloud_compute.yml + +PLAY [Deploy app_python] ********************************************************************************************************************************************** + +TASK [Gathering Facts] ************************************************************************************************************************************************ +ok: [vm-1] + +TASK [docker : Install Docker] **************************************************************************************************************************************** +included: /Users/ebob/Code/devops-labs/ansible/roles/docker/tasks/install_docker.yml for vm-1 + +TASK [docker : Update apt package index] ****************************************************************************************************************************** +changed: [vm-1] + +TASK [docker : Install required system packages] ********************************************************************************************************************** +ok: [vm-1] => (item=apt-transport-https) +ok: [vm-1] => (item=ca-certificates) +ok: [vm-1] => (item=curl) +ok: [vm-1] => (item=gnupg-agent) +ok: [vm-1] => (item=software-properties-common) + +TASK [docker : Add Docker's official GPG key] ************************************************************************************************************************* +ok: [vm-1] + +TASK [docker : Add Docker's official apt repository] ****************************************************************************************************************** +ok: [vm-1] + +TASK [docker : Install Docker and dependencies] *********************************************************************************************************************** +ok: [vm-1] + +TASK [docker : Add user to docker group] ****************************************************************************************************************************** +ok: [vm-1] + +TASK [docker : Configure Docker security settings] ******************************************************************************************************************** +ok: [vm-1] + +TASK [docker : Enable Docker service to start on boot] **************************************************************************************************************** +ok: [vm-1] + +TASK [docker : Install Docker Compose] ******************************************************************************************************************************** +included: /Users/ebob/Code/devops-labs/ansible/roles/docker/tasks/install_docker_compose.yml for vm-1 + +TASK [docker : Install Docker Compose] ******************************************************************************************************************************** +ok: [vm-1] + +TASK [web_app : Full wipe] ******************************************************************************************************************************************** +included: /Users/ebob/Code/devops-labs/ansible/roles/web_app/tasks/0-wipe.yml for vm-1 + +TASK [web_app : Ensure web_app_dir exists] **************************************************************************************************************************** +changed: [vm-1] + +TASK [web_app : Check if docker-compose.yml exists] ******************************************************************************************************************* +ok: [vm-1] + +TASK [web_app : Wipe images] ****************************************************************************************************************************************** +skipping: [vm-1] + +TASK [web_app : Remove app directory] ********************************************************************************************************************************* +changed: [vm-1] + +TASK [web_app : Deploy dockerized app] ******************************************************************************************************************************** +included: /Users/ebob/Code/devops-labs/ansible/roles/web_app/tasks/1-deploy.yml for vm-1 + +TASK [web_app : Create app directory] ********************************************************************************************************************************* +changed: [vm-1] + +TASK [web_app : Copy Docker Compose template] ************************************************************************************************************************* +changed: [vm-1] + +TASK [web_app : Ensure docker service is OK] ************************************************************************************************************************** +ok: [vm-1] + +TASK [web_app : Create and start the services] ************************************************************************************************************************ +changed: [vm-1] + +PLAY RECAP ************************************************************************************************************************************************************ +vm-1 : ok=21 changed=6 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 +``` + +### Wipe `app_python` + +`ansible-playbook playbooks/dev/app_python/main.yml -i inventory/yacloud_compute.yml --tags=wipe` + +```shell +ebob@laptop ansible % ansible-playbook playbooks/dev/app_python/main.yml -i inventory/yacloud_compute.yml --tags=wipe + +PLAY [Deploy app_python] ********************************************************************************************************************************************** + +TASK [Gathering Facts] ************************************************************************************************************************************************ +ok: [vm-1] + +TASK [web_app : Full wipe] ******************************************************************************************************************************************** +included: /Users/ebob/Code/devops-labs/ansible/roles/web_app/tasks/0-wipe.yml for vm-1 + +TASK [web_app : Ensure web_app_dir exists] **************************************************************************************************************************** +ok: [vm-1] + +TASK [web_app : Check if docker-compose.yml exists] ******************************************************************************************************************* +ok: [vm-1] + +TASK [web_app : Wipe images] ****************************************************************************************************************************************** +changed: [vm-1] + +TASK [web_app : Remove app directory] ********************************************************************************************************************************* +changed: [vm-1] + +PLAY RECAP ************************************************************************************************************************************************************ +vm-1 : ok=6 changed=2 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 +``` diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000000..65b71a1522 --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,13 @@ +[defaults] +inventory = inventory +inventory_plugins = ./inventory_plugins +remote_user = ubuntu +playbook_dir = ./playbooks/ +roles_path = ./roles/ +host_key_checking = False +fact_caching = jsonfile +fact_caching_connection = ./fact_cache +fact_caching_timeout = 86400 + +[inventory] +enable_plugins = yacloud_compute diff --git a/ansible/inventory/yacloud_compute.yml b/ansible/inventory/yacloud_compute.yml new file mode 100644 index 0000000000..5d718a65bb --- /dev/null +++ b/ansible/inventory/yacloud_compute.yml @@ -0,0 +1,4 @@ +plugin: yacloud_compute +yacloud_clouds: cloud-ebob +yacloud_folders: default +yacloud_token_file: ./inventory/yacloud_token diff --git a/ansible/inventory_plugins/yacloud_compute.py b/ansible/inventory_plugins/yacloud_compute.py new file mode 100644 index 0000000000..fb105c61b6 --- /dev/null +++ b/ansible/inventory_plugins/yacloud_compute.py @@ -0,0 +1,171 @@ +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ + name: yacloud_compute + plugin_type: inventory + short_description: Yandex.Cloud compute inventory source + requirements: + - yandexcloud + extends_documentation_fragment: + - inventory_cache + - constructed + description: + - Get inventory hosts from Yandex Cloud + - Uses a YAML configuration file that ends with C(yacloud_compute.(yml|yaml)). + options: + plugin: + description: Token that ensures this is a source file for the plugin. + required: True + choices: ['yacloud_compute'] + yacloud_token: + description: Oauth token for yacloud connection + yacloud_token_file: + description: File with oauth token for yacloud connection + yacloud_clouds: + description: Names of clouds to get hosts from + type: list + default: [] + yacloud_folders: + description: Names of folders to get hosts from + type: list + default: [] + yacloud_group_label: + description: VM's label used for group assignment + type: string + default: "" +""" + +EXAMPLES = """ +""" + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_native +from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, Constructable +from ansible.utils.display import Display + +try: + import yandexcloud + from google.protobuf.json_format import MessageToDict + from yandex.cloud.compute.v1.instance_service_pb2 import ListInstancesRequest + from yandex.cloud.compute.v1.instance_service_pb2_grpc import InstanceServiceStub + from yandex.cloud.resourcemanager.v1.cloud_service_pb2 import ListCloudsRequest + from yandex.cloud.resourcemanager.v1.cloud_service_pb2_grpc import CloudServiceStub + from yandex.cloud.resourcemanager.v1.folder_service_pb2 import ListFoldersRequest + from yandex.cloud.resourcemanager.v1.folder_service_pb2_grpc import ( + FolderServiceStub, + ) +except ImportError: + raise AnsibleError("The yacloud dynamic inventory plugin requires yandexcloud") + +display = Display() + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + NAME = "yacloud_compute" + + def verify_file(self, path): + if super(InventoryModule, self).verify_file(path): + if path.endswith(("yacloud_compute.yml", "yacloud_compute.yaml")): + return True + display.debug( + "yacloud_compute inventory filename must end with 'yacloud_compute.yml' or 'yacloud_compute.yaml'" + ) + return False + + def _get_ip_for_instance(self, instance): + interfaces = instance["networkInterfaces"] + for interface in interfaces: + address = interface["primaryV4Address"] + if address: + if address.get("oneToOneNat"): + return address["oneToOneNat"]["address"] + else: + return address["address"] + return None + + def _get_clouds(self): + all_clouds = MessageToDict(self.cloud_service.List(ListCloudsRequest()))[ + "clouds" + ] + if self.get_option("yacloud_clouds"): + all_clouds[:] = [ + x for x in all_clouds if x["name"] in self.get_option("yacloud_clouds") + ] + self.clouds = all_clouds + + def _get_folders(self): + all_folders = [] + for cloud in self.clouds: + all_folders += MessageToDict( + self.folder_service.List(ListFoldersRequest(cloud_id=cloud["id"])) + )["folders"] + + if self.get_option("yacloud_folders"): + all_folders[:] = [ + x + for x in all_folders + if x["name"] in self.get_option("yacloud_folders") + ] + + self.folders = all_folders + + def _get_all_hosts(self): + self.hosts = [] + for folder in self.folders: + hosts = self.instance_service.List( + ListInstancesRequest(folder_id=folder["id"]) + ) + dict_ = MessageToDict(hosts) + + if dict_: + self.hosts += dict_["instances"] + + def _init_client(self): + file = self.get_option("yacloud_token_file") + if file is not None: + token = open(file).read().strip() + else: + token = self.get_option("yacloud_token") + if not token: + raise AnsibleError( + "token it empty. provide either `yacloud_token_file` or `yacloud_token`" + ) + sdk = yandexcloud.SDK(token=token) + + self.instance_service = sdk.client(InstanceServiceStub) + self.folder_service = sdk.client(FolderServiceStub) + self.cloud_service = sdk.client(CloudServiceStub) + + def _process_hosts(self): + group_label = str(self.get_option("yacloud_group_label")) + + for instance in self.hosts: + if group_label and group_label in instance["labels"]: + group = instance["labels"][group_label] + else: + group = "yacloud" + + self.inventory.add_group(group=group) + if instance["status"] == "RUNNING": + ip = self._get_ip_for_instance(instance) + if ip: + self.inventory.add_host(instance["name"], group=group) + self.inventory.set_variable( + instance["name"], "ansible_host", to_native(ip) + ) + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + + self._read_config_data(path) + self._init_client() + + self._get_clouds() + self._get_folders() + + self._get_all_hosts() + self._process_hosts() diff --git a/ansible/playbooks/dev/app_python/main.yml b/ansible/playbooks/dev/app_python/main.yml new file mode 100644 index 0000000000..5891fb352c --- /dev/null +++ b/ansible/playbooks/dev/app_python/main.yml @@ -0,0 +1,10 @@ +- name: Deploy app_python + hosts: all + become: true + roles: + - web_app + vars: + web_app_name: moscow-time + web_app_internal_port: 8080 + web_app_external_port: 8080 + web_app_full_wipe: true diff --git a/ansible/playbooks/dev/app_ruby/main.yml b/ansible/playbooks/dev/app_ruby/main.yml new file mode 100644 index 0000000000..4c7d4ee35d --- /dev/null +++ b/ansible/playbooks/dev/app_ruby/main.yml @@ -0,0 +1,10 @@ +- name: Deploy app_ruby + hosts: all + become: true + roles: + - web_app + vars: + web_app_name: omsk-time + web_app_internal_port: 4567 + web_app_external_port: 8081 + web_app_full_wipe: true diff --git a/ansible/roles/docker/README.md b/ansible/roles/docker/README.md new file mode 100644 index 0000000000..8392a19749 --- /dev/null +++ b/ansible/roles/docker/README.md @@ -0,0 +1,18 @@ +# Docker Role + +This role installs and configures Docker and Docker Compose. + +## Requirements + +- Ansible 2.18+ +- Ubuntu 24.04 LTS + +## Usage + +```yaml +- name: Install and configure Docker + hosts: all + roles: + - role: docker + become: true +``` diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml new file mode 100644 index 0000000000..ed97d539c0 --- /dev/null +++ b/ansible/roles/docker/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml new file mode 100644 index 0000000000..a8a0bf67c7 --- /dev/null +++ b/ansible/roles/docker/handlers/main.yml @@ -0,0 +1,4 @@ +- name: Restart Docker + ansible.builtin.service: + name: docker + state: restarted diff --git a/ansible/roles/docker/tasks/install_docker.yml b/ansible/roles/docker/tasks/install_docker.yml new file mode 100644 index 0000000000..fd8e831fff --- /dev/null +++ b/ansible/roles/docker/tasks/install_docker.yml @@ -0,0 +1,59 @@ +- name: Update apt package index + ansible.builtin.apt: + update_cache: true + +- name: Install required system packages + ansible.builtin.apt: + name: "{{ item }}" + state: present + loop: + - apt-transport-https + - ca-certificates + - curl + - gnupg-agent + - software-properties-common + +- name: Add Docker's official GPG key + ansible.builtin.apt_key: + url: https://download.docker.com/linux/ubuntu/gpg + state: present + +- name: Add Docker's official apt repository + ansible.builtin.apt_repository: + repo: deb https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable + state: present + +- name: Install Docker and dependencies + ansible.builtin.apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + state: present + update_cache: true + +- name: Add user to docker group + ansible.builtin.user: + name: "{{ ansible_user }}" + groups: docker + append: true + when: ansible_user != 'root' + notify: Restart Docker + +- name: Configure Docker security settings + ansible.builtin.copy: + dest: /etc/docker/daemon.json + content: | + { + "userns-remap": "default" + } + owner: root + group: root + mode: '0644' + notify: Restart Docker + +- name: Enable Docker service to start on boot + ansible.builtin.systemd: + name: docker + enabled: true + state: started diff --git a/ansible/roles/docker/tasks/install_docker_compose.yml b/ansible/roles/docker/tasks/install_docker_compose.yml new file mode 100644 index 0000000000..066e1b70e9 --- /dev/null +++ b/ansible/roles/docker/tasks/install_docker_compose.yml @@ -0,0 +1,4 @@ +- name: Install Docker Compose + ansible.builtin.package: + name: docker-compose + state: present diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml new file mode 100644 index 0000000000..545706f87f --- /dev/null +++ b/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,5 @@ +- name: Install Docker + ansible.builtin.include_tasks: install_docker.yml + +- name: Install Docker Compose + ansible.builtin.include_tasks: install_docker_compose.yml diff --git a/ansible/roles/web_app/README.md b/ansible/roles/web_app/README.md new file mode 100644 index 0000000000..182c0f6119 --- /dev/null +++ b/ansible/roles/web_app/README.md @@ -0,0 +1,50 @@ +# Docker Role + +This role deploys application in Docker container using docker compose + +## Requirements + +- Ansible 2.18+ +- Ubuntu 24.04 LTS +- docker role (`../docker/`) + +## Variables + +| Variable Name | Description | Example | +|------------------------|-------------------------------------------------------------------------------------------------------|------------------------| +| web_app_name | The name of the web application. | "web_app" | +| web_app_dir | The directory where the web application is installed, using the value of web_app_name. | "/opt/{{ web_app_name }}/ " | +| web_app_docker_registry | The Docker registry where the web application image is hosted. | "docker.io" | +| web_app_docker_username | The username for accessing the Docker registry. | "ebob" | +| web_app_full_wipe | Determines whether a full wipe of the web application is required. | false | +| web_app_image | The full name of the web application image, including the registry, username, and application name. | "{{ web_app_docker_registry }}/{{ web_app_docker_username }}/{{ web_app_name }}" | +| web_app_image_tag | The tag for the web application image. | "latest" | +| web_app_internal_port | The internal port on which the web application operates within the container. | 80 | +| web_app_external_port | The external port on which the web application is accessible outside the container. | 8080 | + +This table provides a clear and organized documentation for each variable, including their descriptions and examples. + +## Tags + +We support tags for wipe only and deploy only, just add them at the end of `ansible-playbook` command: + +```bash +--tags=wipe + +--tags=deploy +``` + +## Usage + +```yaml +- name: Deploy web_app + hosts: all + become: true + roles: + - web_app + vars: + web_app_name: web_app + web_app_internal_port: 8080 + web_app_external_port: 8080 + web_app_full_wipe: true +``` diff --git a/ansible/roles/web_app/defaults/main.yml b/ansible/roles/web_app/defaults/main.yml new file mode 100644 index 0000000000..f186ef4d0d --- /dev/null +++ b/ansible/roles/web_app/defaults/main.yml @@ -0,0 +1,11 @@ +web_app_name: "web_app" +web_app_dir: "/opt/{{ web_app_name }}/" + +web_app_docker_registry: docker.io +web_app_docker_username: ebob + +web_app_full_wipe: false +web_app_image: "{{ web_app_docker_registry }}/{{ web_app_docker_username }}/{{ web_app_name }}" +web_app_image_tag: "v1.0" +web_app_internal_port: 8080 +web_app_external_port: 8080 diff --git a/ansible/roles/web_app/handlers/main.yml b/ansible/roles/web_app/handlers/main.yml new file mode 100644 index 0000000000..ed97d539c0 --- /dev/null +++ b/ansible/roles/web_app/handlers/main.yml @@ -0,0 +1 @@ +--- diff --git a/ansible/roles/web_app/meta/main.yml b/ansible/roles/web_app/meta/main.yml new file mode 100644 index 0000000000..6ad37f8159 --- /dev/null +++ b/ansible/roles/web_app/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - role: docker diff --git a/ansible/roles/web_app/tasks/0-wipe.yml b/ansible/roles/web_app/tasks/0-wipe.yml new file mode 100644 index 0000000000..aa63489446 --- /dev/null +++ b/ansible/roles/web_app/tasks/0-wipe.yml @@ -0,0 +1,25 @@ +- name: Ensure web_app_dir exists + ansible.builtin.file: + path: "{{ web_app_dir }}" + state: directory + mode: '0755' + when: web_app_full_wipe + +- name: Check if docker-compose.yml exists + ansible.builtin.stat: + path: "{{ web_app_dir }}/docker-compose.yml" + register: compose_file + +- name: Wipe images + community.docker.docker_compose_v2: + project_src: "{{ web_app_dir }}" + remove_orphans: true + remove_volumes: true + remove_images: all + state: absent + when: compose_file.stat.exists + +- name: Remove app directory + ansible.builtin.file: + path: "{{ web_app_dir }}" + state: absent diff --git a/ansible/roles/web_app/tasks/1-deploy.yml b/ansible/roles/web_app/tasks/1-deploy.yml new file mode 100644 index 0000000000..23dede38a4 --- /dev/null +++ b/ansible/roles/web_app/tasks/1-deploy.yml @@ -0,0 +1,28 @@ +- name: Setup configuration files + block: + - name: Create app directory + ansible.builtin.file: + path: "{{ web_app_dir }}" + state: directory + owner: "{{ ansible_user }}" + group: "{{ ansible_user }}" + mode: "0755" + + - name: Copy Docker Compose template + ansible.builtin.template: + src: docker-compose.yml.j2 + dest: "{{ web_app_dir }}/docker-compose.yml" + mode: "0644" + +- name: Start dockerized app + block: + - name: Ensure docker service is OK + ansible.builtin.service: + name: docker + enabled: true + state: started + - name: Create and start the services + community.docker.docker_compose_v2: + project_src: "{{ web_app_dir }}" + remove_orphans: true + state: present diff --git a/ansible/roles/web_app/tasks/main.yml b/ansible/roles/web_app/tasks/main.yml new file mode 100644 index 0000000000..8aa0af5364 --- /dev/null +++ b/ansible/roles/web_app/tasks/main.yml @@ -0,0 +1,14 @@ +- name: Full wipe + when: web_app_full_wipe is defined and web_app_full_wipe + tags: wipe + ansible.builtin.include_tasks: + file: "0-wipe.yml" + apply: + tags: wipe + +- name: Deploy dockerized app + tags: deploy + ansible.builtin.include_tasks: + file: "1-deploy.yml" + apply: + tags: deploy diff --git a/ansible/roles/web_app/templates/docker-compose.yml.j2 b/ansible/roles/web_app/templates/docker-compose.yml.j2 new file mode 100644 index 0000000000..b4124f1262 --- /dev/null +++ b/ansible/roles/web_app/templates/docker-compose.yml.j2 @@ -0,0 +1,6 @@ +services: + app: + image: "{{ web_app_image }}:{{ web_app_image_tag }}" + ports: + - target: "{{ web_app_internal_port }}" + published: "{{ web_app_external_port }}" diff --git a/app_python/.dockerignore b/app_python/.dockerignore new file mode 100644 index 0000000000..93403c3ebc --- /dev/null +++ b/app_python/.dockerignore @@ -0,0 +1,33 @@ +# Ignore Python bytecode and cache files +__pycache__/ +*.mypy_cache/ +*.pytest_cache/ +*.pyc +*.pyo + +# Ignore virtual environments +venv/ +env/ + +# Ignore editor-specific files +*.idea/ +*.vscode/ +*.DS_Store + +# Ignore documentation files +*.md + +# Ignore Docker-related files +Dockerfile +.dockerignore + +# Ignore testing files +test_app.py + +# Ignore git related files +.git/ +.gitignore +.gitattributes +.gitmodules +.gitkeep +.pre-commit-config.yaml diff --git a/app_python/.gitignore b/app_python/.gitignore new file mode 100644 index 0000000000..b2e1518ce2 --- /dev/null +++ b/app_python/.gitignore @@ -0,0 +1,7 @@ +# Cache +__pycache__/ +.mypy_cache/ +.pytest_cache/ + +# Virtual Environment +venv/ diff --git a/app_python/.pre-commit-config.yaml b/app_python/.pre-commit-config.yaml new file mode 100644 index 0000000000..b9122dbc13 --- /dev/null +++ b/app_python/.pre-commit-config.yaml @@ -0,0 +1,50 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files + + + - repo: https://github.com/psf/black + rev: 24.10.0 + hooks: + - id: black + + - repo: https://github.com/PyCQA/flake8 + rev: 7.1.1 + hooks: + - id: flake8 + args: ["--max-line-length=88"] + + - repo: https://github.com/pre-commit/mirrors-isort + rev: v5.10.1 + hooks: + - id: isort + + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.14.1 + hooks: + - id: mypy + additional_dependencies: + - bottle==0.13.2 + - pytest==8.3.4 + - requests==2.32.3 + - "types-requests" + + - repo: https://github.com/pycqa/bandit + rev: 1.8.2 + hooks: + - id: bandit + args: ["--severity-level", "high"] + + - repo: https://github.com/PyCQA/pylint + rev: v3.3.3 + hooks: + - id: pylint + additional_dependencies: + - bottle==0.13.2 + - pytest==8.3.4 + - requests==2.32.3 diff --git a/app_python/CI.md b/app_python/CI.md new file mode 100644 index 0000000000..22ebb6527e --- /dev/null +++ b/app_python/CI.md @@ -0,0 +1,30 @@ +# CI Workflow for `app_python` + +[![CI for app_python](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_python.yml/badge.svg?branch=lab3)](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_python.yml) + +This project uses a CI workflow to automatically lint, test, check security, and build/push Docker images whenever there are changes to the application. Below are the key steps that the workflow performs: + +## 1. **Code Quality Check** + +- **Linting**: The code is checked using **Pylint** to ensure it follows best practices. +- **Code Formatting**: **Black** is used to automatically check that the code is properly formatted. + +## 2. **Testing** + +- **Pytest** is used to run tests to verify that the code behaves as expected. + +## 3. **Security Scan** + +- **Snyk** is used to scan the dependencies for known vulnerabilities and security risks. + +## 4. **Docker Build & Push** + +- The application is built into a Docker image using **Docker Buildx**. +- The image is then pushed to **GitHub Container Registry** and **DockerHub**. + +## Key Features of This CI Workflow + +- **Caching**: We cache dependencies and Docker layers to speed up the workflow. +- **Security**: Credentials (like tokens and passwords) are stored securely using GitHub Secrets. +- **Fast Feedback**: The workflow runs fast by stopping early on errors and continuing with other checks. +- **Docker Integration**: The app is automatically built into a Docker image and pushed to registries. diff --git a/app_python/DOCKER.md b/app_python/DOCKER.md new file mode 100644 index 0000000000..d9adc1322f --- /dev/null +++ b/app_python/DOCKER.md @@ -0,0 +1,42 @@ +# Docker best practices + +## **Use of an Official Base Image** + +I used lightweight Alpine-based official Python image `python:3.13.1-alpine3.21`, which significantly reduces the image size with pinned version tag to ensure consistency across builds and avoids unexpected issues due to updates in the base image. + +## **Non-Root User** + +I create user with limited permissions and use it in the container. This mitigates the risk of privilege escalation in case of an exploit. + +## **Optimized build process with layer caching** + +This layer will only be rebuilt if `requirements.txt` changes, leveraging Docker’s layer caching and reducing build times. + +## **.dockerignore** + +I keep the `.dockerignore` file clean and avoid using the `COPY . .` in the Dockerfile, to make sure that only the necessary files will be added to the image. + +## **Use Haskell Dockerfile Linter** + +[Hadolint](https://github.com/hadolint/hadolint) is a smarter Dockerfile linter that helps to build best practice Docker images. + +## **Docker Scout** + +I analyze image with Docker Scout to find out possible vulnerabilities and fix them. + +Docker Scout + +## **DockerHub** + +Image is available on [DockerHub](https://hub.docker.com/repository/docker/ebob/moscow-time/tags/v1.0/sha256-963767cb63ad8759727d0507f84fa4891bffe760742a9509bd899a49a7873757) + +## **Distroless Image** + +Additionaly, I build distroless image. I didn't create an additional user because I used a container with a `nonroot` tag. Distroless image appeared to be 20 MB larger than the original one. I think this is because we don't compile the python program into a binary file, so reducing the size is not an advantage. But the distroless container is a very good solution in terms of security: it does not contain shell and other utilities that reduce attack surface. + +Here is image size comparison: + +![image](https://github.com/user-attachments/assets/37cbc610-a7a2-4da1-bcab-34a81515347b) + + +I upload it on [DockerHub](https://hub.docker.com/repository/docker/ebob/moscow-time/tags/v1.0-distroless/sha256-cee4db447ea129aca4c6a05e045e3de5758d01343a68345abbdd93b6affae59d) too diff --git a/app_python/Dockerfile b/app_python/Dockerfile new file mode 100644 index 0000000000..c044e50929 --- /dev/null +++ b/app_python/Dockerfile @@ -0,0 +1,29 @@ +# Use an official Python image as the base +FROM python:3.13.1-alpine3.21 + +# Set environment variables to make the image more secure +ENV PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 + +# Create non-root user +RUN addgroup -S appgroup && \ + adduser -S -G appgroup appuser + +# Set the working directory in the container +WORKDIR /app + +# Copy the requirements file and install dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy the rest of the application code into the container +COPY app.py . + +# Switch to non-root user +USER appuser + +# Expose the port that the app will run on +EXPOSE 8080 + +# Command to run the app using Gunicorn +CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:8080", "app:app"] diff --git a/app_python/PYTHON.md b/app_python/PYTHON.md new file mode 100644 index 0000000000..fcc82266d3 --- /dev/null +++ b/app_python/PYTHON.md @@ -0,0 +1,63 @@ +# Python Web Application + +## Framework choice + +To develop this web application, I chose the [**bottle**](https://bottlepy.org/docs/dev/) web framework for Python. Bottle is a lightweight Python framework, making it ideal for small, straightforward applications like this one. + +## Best practicies + +I implemented following Python best practices: + +* Follow PEP 8 Code Style + +* Documenting code with Docstrings and Comments + +* Using pre-commit hooks with following tools: + + * [`black`](https://github.com/psf/black) code formatter + + * [`flake8`](https://github.com/PyCQA/flake8) code checker + + * [`isort`](https://github.com/PyCQA/isort) library sorter + + * [`mypy`](https://github.com/python/mypy) static type checker + + * [`bandit`](https://github.com/PyCQA/bandit) security analyzer + + * [`pylint`](https://github.com/pylint-dev/pylint) static code analyser + +* Maintain a clean `.gitignore` file + +* Manage Dependencies with `requirements.txt` + +* Using [gunicorn](https://gunicorn.org/) WSGI server instead of bottle's default one + +## Testing code + +For automatic code testing I use [`pytest`](https://docs.pytest.org/) framework. + +Here are a few code testing best practices I've learned and applied: + +1. Use a Clear Testing Strategy + * Apply **unit tests** for individual functions or components. + * Use **integration tests** to verify how different parts interact. + * Implement **end-to-end tests** to ensure the whole application behaves correctly. + +2. Isolate Unit Tests + * Unit tests should not depend on external systems (e.g., databases, APIs, file systems). + * Mock external dependencies where needed. + +3. Test Edge Cases + * Validate boundary conditions (e.g., leap years). + +### How to run tests? + +I use `pytest 8.3.4`, so first of all we need to install it: + +* ``` pip install pytest==8.3.4 ``` + +Also for testing we need `requests` + +* ``` pip install requests==2.32.3 ``` + +* ``` pytest test_app.py ``` diff --git a/app_python/README.md b/app_python/README.md new file mode 100644 index 0000000000..5a5ab20eab --- /dev/null +++ b/app_python/README.md @@ -0,0 +1,130 @@ +# Moscow Time Web Application + +[![CI for app_python](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_python.yml/badge.svg?branch=lab3)](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_python.yml) + +## Overview + +This is a web app that shows current date and time in Moscow. It is written in Python with Bottle framework. + +## Requirements + +* Python 3.12 + +## Installation + +Clone this repository: + +```bash +git clone https://github.com/cuprum-acid/devops-labs.git -b lab1 +``` + +Open directory: + +```bash +cd devops-labs/app_python +``` + +Install virtual environment and dependencies: + +```bash +python -m venv venv +``` + +```bash +source venv/bin/activate # Linux/Mac +``` + +```bash +venv\Scripts\activate # Windows +``` + +```bash +pip install -r requirements.txt +``` + +Run the app: + +```bash +python app.py +``` + +Now it is available on `localhost:8080` in browser. Or you can run in terminal: + +```bash +curl localhost:8080 +``` + +## Run tests + +If you want to run automatic tests, then you need to install additional packages: + +```bash +pip install pytest==8.3.4 +``` + +```bash +pip install requests==2.32.3 +``` + +They were not included in `requirements.txt` because they are not required to run application + +After that run: + +```bash +pytest test_app.py +``` + +## Docker + +### Build + +```bash +cd devops-labs/app_python +``` + +```bash +docker build -t ebob/moscow-time:v1.0 . +``` + +### Pull and Run + +```bash +docker pull ebob/moscow-time:v1.0 +``` + +```bash +docker run -d --name msk -p 8080:8080 ebob/moscow-time:v1.0 +``` + +Now it is available on `localhost:8080` + +## Distroless Docker Image + +### Build + +```bash +docker build -t ebob/moscow-time:v1.0-distroless -f distroless.Dockerfile . +``` + +### Pull and Run + +```bash +docker pull ebob/moscow-time:v1.0-distroless +``` + +```bash +docker run -d --name msk-distroless -p 8081:8080 ebob/moscow-time:v1.0-distroless +``` + +Now it is available on `localhost:8081` + +## Continuous Integration + +This repository contains a CI pipeline configuration for the python application. The CI pipeline is managed with `GitHub Actions` and includes multiple jobs to ensure the code quality, functionality, security, and successful deployment of the application. + +The pipeline consists of these main jobs: + +1. Lint and Format: Ensures the code follows linting and formatting standards. +2. Test: Runs tests to verify the correctness of the application. +3. Security Scan: Checks for security vulnerabilities in the codebase using `Snyk` tool. +4. Docker Build and Push: Builds and pushes a Docker image to the DockerHub and ghcr. diff --git a/app_python/app.py b/app_python/app.py new file mode 100644 index 0000000000..ddff24a5e0 --- /dev/null +++ b/app_python/app.py @@ -0,0 +1,36 @@ +"""Bottle web application that shows the current time and date in Moscow. +Author: Evgeny B. +""" + +from datetime import datetime, timedelta, timezone + +from bottle import Bottle, response, run + +# Create a Bottle app instance +app = Bottle() + +# Define the MSK timezone (UTC+3) +MSK_TIMEZONE = timezone(timedelta(hours=3)) + + +@app.route("/") +def show_time(): + """Show the current time and date in Moscow.""" + # Get the current time in Moscow + now = datetime.now(MSK_TIMEZONE) + formatted_time = now.strftime("%H:%M:%S") + formatted_date = now.strftime("%d.%m.%Y") + + # Set the response content type to HTML + response.content_type = "text/html; charset=utf-8" + return ( + f"

Current time and date in Moscow

" + f"

Time: {formatted_time}

" + f"

Date: {formatted_date}

" + ) + + +# Run the Bottle app +if __name__ == "__main__": + # Run the Bottle app on the server + run(app, host="0.0.0.0", port=8080, debug=False, reloader=False) diff --git a/app_python/distroless.Dockerfile b/app_python/distroless.Dockerfile new file mode 100644 index 0000000000..e0147de79a --- /dev/null +++ b/app_python/distroless.Dockerfile @@ -0,0 +1,31 @@ +# Use an official Python image as the base for building the app +FROM python:3.13.1-alpine3.21 AS builder + +# Set environment variables to make the image more secure +ENV PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 + +# Set the working directory in the container +WORKDIR /app + +# Copy the requirements file and install dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt && cp "$(which gunicorn)" /app + +# Copy the rest of the application code into the container +COPY app.py . + +# Use Distroless as the runtime image +FROM gcr.io/distroless/python3-debian12:nonroot AS runtime + +# Copy application files from builder +WORKDIR /app +COPY --from=builder /app /app +COPY --from=builder /usr/local/lib/python3.13/site-packages /usr/local/lib/python3.11/site-packages +ENV PYTHONPATH=/usr/local/lib/python3.11/site-packages + +# Expose the port that the app will run on +EXPOSE 8080 + +# Command to run the app using Gunicorn +CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:8080", "app:app"] diff --git a/app_python/requirements.txt b/app_python/requirements.txt new file mode 100644 index 0000000000..3fa8ad4c04 --- /dev/null +++ b/app_python/requirements.txt @@ -0,0 +1,2 @@ +bottle==0.13.2 +gunicorn==23.0.0 diff --git a/app_python/test_app.py b/app_python/test_app.py new file mode 100644 index 0000000000..076fc1596f --- /dev/null +++ b/app_python/test_app.py @@ -0,0 +1,114 @@ +"""Tests for the Bottle application.""" + +import re +import threading +import time +import unittest +from datetime import datetime, timedelta +from subprocess import Popen + +import pytest +import requests +from app import MSK_TIMEZONE, app + +BASE_URL = "http://127.0.0.1:8080/" + + +def wait_for_server(url, timeout=5): + """Utility function to wait for the server to be up.""" + start_time = time.time() + while time.time() - start_time < timeout: + try: + response = requests.get(url, timeout=1) + if response.status_code == 200: + return True + except requests.exceptions.ConnectionError: + time.sleep(0.5) + raise RuntimeError("Server did not start in time") + + +class TestAppUnit(unittest.TestCase): + """Unit tests for the Bottle application.""" + + def test_msk_timezone(self): + """Test if the Moscow timezone is correctly set.""" + self.assertEqual(MSK_TIMEZONE.utcoffset(None), timedelta(hours=3)) + + def test_show_time_format(self): + """Test if the time is formatted correctly.""" + now = datetime(2025, 1, 1, 15, 30, 45, tzinfo=MSK_TIMEZONE) + formatted_time = now.strftime("%H:%M:%S") + self.assertEqual(formatted_time, "15:30:45") + + def test_show_date_format(self): + """Test if the date is formatted correctly.""" + now = datetime(2025, 1, 1, tzinfo=MSK_TIMEZONE) + formatted_date = now.strftime("%d.%m.%Y") + self.assertEqual(formatted_date, "01.01.2025") + + def test_leap_year(self): + """Test if the application handles leap years correctly.""" + leap_date = datetime(2024, 2, 29, tzinfo=MSK_TIMEZONE) + formatted_date = leap_date.strftime("%d.%m.%Y") + self.assertEqual(formatted_date, "29.02.2024") + + def test_midnight_rollover(self): + """Test if the application correctly handles midnight rollover.""" + before_midnight = datetime(2025, 1, 1, 23, 59, 59, tzinfo=MSK_TIMEZONE) + after_midnight = before_midnight + timedelta(seconds=1) + self.assertEqual(after_midnight.strftime("%H:%M:%S"), "00:00:00") + + +@pytest.fixture(scope="module", autouse=True) +def start_server(): + """Fixture to start the Bottle server before tests and stop after.""" + + with Popen(["python", "app.py"]) as process: + wait_for_server(BASE_URL, timeout=5) + yield + process.terminate() + + +def test_root_route(): + """Test if the root route returns the correct HTML response.""" + response = requests.get(BASE_URL, timeout=5) + + # Verify response status and headers + assert response.status_code == 200 + assert response.headers["Content-Type"].startswith("text/html") + + # Verify response content + assert "Current time and date in Moscow" in response.text + assert re.search(r"

Time: \d{2}:\d{2}:\d{2}

", response.text) + assert re.search(r"

Date: \d{2}\.\d{2}\.\d{4}

", response.text) + + +class TestAppE2E(unittest.TestCase): + """End-to-end tests for the Bottle application.""" + + @classmethod + def setUpClass(cls): + """Start the server in a separate thread.""" + cls.server_thread = threading.Thread( + target=lambda: app.run( + host="127.0.0.1", port=8080, debug=False, quiet=True + ), + daemon=True, + ) + cls.server_thread.start() + wait_for_server(BASE_URL, timeout=5) + + @classmethod + def tearDownClass(cls): + """Stop the server.""" + cls.server_thread.join(0) + + def test_root_endpoint(self): + """Test if the '/' endpoint returns the correct response.""" + response = requests.get(BASE_URL, timeout=5) + self.assertEqual(response.status_code, 200) + self.assertIn("Current time and date in Moscow", response.text) + self.assertIn("Time:", response.text) + self.assertIn("Date:", response.text) + self.assertRegex(response.text, r"

Time: \d{2}:\d{2}:\d{2}

") + self.assertRegex(response.text, r"

Date: \d{2}\.\d{2}\.\d{4}

") diff --git a/app_ruby/.dockerignore b/app_ruby/.dockerignore new file mode 100644 index 0000000000..66125b81ce --- /dev/null +++ b/app_ruby/.dockerignore @@ -0,0 +1,25 @@ +# Ignore editor-specific files +*.idea/ +*.vscode/ +*.DS_Store + +# Ignore documentation files +*.md + +# Ignore Docker-related files +Dockerfile +.dockerignore + +# Ignore testing files +spec/ + +# Ignore git related files +.git/ +.gitignore +.gitattributes +.gitmodules +.gitkeep + +# Ignore configuration files +.pre-commit-config.yaml +.robocop.yml diff --git a/app_ruby/.gitignore b/app_ruby/.gitignore new file mode 100644 index 0000000000..e43b0f9889 --- /dev/null +++ b/app_ruby/.gitignore @@ -0,0 +1 @@ +.DS_Store diff --git a/app_ruby/.pre-commit-config.yaml b/app_ruby/.pre-commit-config.yaml new file mode 100644 index 0000000000..1e3661b360 --- /dev/null +++ b/app_ruby/.pre-commit-config.yaml @@ -0,0 +1,32 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files + + - repo: local + hooks: + - id: rubocop + name: RuboCop + entry: rubocop --auto-correct + language: system + files: \.rb$ + + - repo: local + hooks: + - id: reek + name: Reek + entry: reek + language: system + files: ^app_ruby/ + + - repo: local + hooks: + - id: rails_best_practices + name: Rails Best Practices + entry: rails_best_practices + language: system + files: ^app_ruby/ diff --git a/app_ruby/.robocop.yml b/app_ruby/.robocop.yml new file mode 100644 index 0000000000..747f8af4ea --- /dev/null +++ b/app_ruby/.robocop.yml @@ -0,0 +1,9 @@ +AllCops: + NewCops: enable + TargetRubyVersion: 3.4 + +Metrics/ClassLength: + Max: 100 + +Style/Documentation: + Enabled: false diff --git a/app_ruby/CI.md b/app_ruby/CI.md new file mode 100644 index 0000000000..8b0d84e08e --- /dev/null +++ b/app_ruby/CI.md @@ -0,0 +1,30 @@ +# CI Workflow for `app_ruby` + +[![CI for app_ruby](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_ruby.yml/badge.svg)](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_ruby.yml) + +This project uses a CI workflow to automatically lint, test, check security, and build/push Docker images whenever there are changes to the application. Below are the key steps that the workflow performs: + +## 1. **Code Quality Check** + +- **Linting**: The code is checked using **RuboCop** to enforce Ruby style and best practices. +- **Code Formatting**: **RuboCop** also ensures that the code is properly formatted. + +## 2. **Testing** + +- **RSpec** is used to run tests and verify that the application works as expected. + +## 3. **Security Scan** + +- **Snyk** is used to scan the dependencies for known vulnerabilities and security risks. + +## 4. **Docker Build & Push** + +- The application is built into a Docker image using **Docker Buildx**. +- The image is then pushed to **GitHub Container Registry** and **DockerHub**. + +## Key Features of This CI Workflow + +- **Caching**: We cache dependencies and Docker layers to speed up the workflow. +- **Security**: Credentials (like tokens and passwords) are stored securely using GitHub Secrets. +- **Fast Feedback**: The workflow runs fast by stopping early on errors and continuing with other checks. +- **Docker Integration**: The app is automatically built into a Docker image and pushed to registries. diff --git a/app_ruby/DOCKER.md b/app_ruby/DOCKER.md new file mode 100644 index 0000000000..c2189f28d4 --- /dev/null +++ b/app_ruby/DOCKER.md @@ -0,0 +1,41 @@ +# Docker best practices + +## **Use of an Official Base Image** + +I used lightweight Alpine-based official Ruby image `ruby:3.4.1-alpine3.21`, which significantly reduces the image size with pinned version tag to ensure consistency across builds and avoids unexpected issues due to updates in the base image. + +## **Non-Root User** + +I create user with limited permissions and use it in the container. This mitigates the risk of privilege escalation in case of an exploit. + +## **Optimized build process with layer caching** + +This layer will only be rebuilt if `Gemfile` changes, leveraging Docker’s layer caching and reducing build times. + +## **.dockerignore** + +I keep the `.dockerignore` file clean and avoid using the `COPY . .` in the Dockerfile, to make sure that only the necessary files will be added to the image. + +## **Use Haskell Dockerfile Linter** + +[Hadolint](https://github.com/hadolint/hadolint) is a smarter Dockerfile linter that helps to build best practice Docker images. + +## **Docker Scout** + +I analyze image with Docker Scout to find out possible vulnerabilities and fix them. + +Docker Scout + +## **DockerHub** + +Image is available on [DockerHub](https://hub.docker.com/repository/docker/ebob/omsk-time/tags/v1.0/sha256-0d436c0125cf7307f573fa7f7cf3b7ab2671ba3fe1455babeb08ee45f213ec11) + +## **Distroless Image** + +Additionaly, I build distroless image. I didn't create an additional user because I used a container with a `nonroot` tag. Distroless image appeared to be 70 MB larger than the original one. I think this is because we don't compile the ruby program into a binary file, so reducing the size is not an advantage. Also `alpine` base image did not work in distroless environment, so I used `slim` which is a little bit larger. But the distroless container is a very good solution in terms of security: it does not contain shell and other utilities that reduce attack surface. + +Here is image size comparison: + +Comparison Size + +I upload it on [DockerHub](https://hub.docker.com/repository/docker/ebob/omsk-time/tags/v1.0-distroless/sha256-f7e2aba76f6b08839e08129c95aa371841a48f780116d12e9e8f66840b20c3f8) too diff --git a/app_ruby/Dockerfile b/app_ruby/Dockerfile new file mode 100644 index 0000000000..0f8ea66cec --- /dev/null +++ b/app_ruby/Dockerfile @@ -0,0 +1,31 @@ +FROM ruby:3.4.1-alpine3.21 + +ENV BUNDLE_WITHOUT=development:test \ + LANG=C.UTF-8 + +# Install system dependencies +RUN apk add --no-cache \ + gcc=14.2.0-r4 \ + musl-dev=1.2.5-r8 \ + tzdata=2024b-r1 \ + make=4.4.1-r2 + +RUN addgroup -S appgroup && \ + adduser -S -G appgroup appuser + +WORKDIR /app + +RUN gem install nio4r:2.7.4 -- --use-system-libraries && \ + gem install bundler:2.6.3 + +COPY Gemfile Gemfile.lock ./ + +RUN bundle install + +COPY public/styles.css views/index.erb app.rb config.ru ./ + +USER appuser + +EXPOSE 4567 + +CMD ["ruby", "app.rb"] diff --git a/app_ruby/Gemfile b/app_ruby/Gemfile new file mode 100644 index 0000000000..1171256386 --- /dev/null +++ b/app_ruby/Gemfile @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +source 'https://rubygems.org' + +# Gems for running the app (production) +gem 'puma', '~> 6.5' +gem 'rack', '~> 3.1', '>= 3.1.10' +gem 'rackup', '~> 2.2' +gem 'sinatra', '~> 4.1' +gem 'time', '~> 0.4' +gem 'tzinfo', '~> 2.0' + +# Gems for development and testing +group :development, :test do + gem 'rack-test', '~> 2.2' + gem 'rspec' + gem 'rubocop', '~> 1.71', require: false +end diff --git a/app_ruby/Gemfile.lock b/app_ruby/Gemfile.lock new file mode 100644 index 0000000000..9791672043 --- /dev/null +++ b/app_ruby/Gemfile.lock @@ -0,0 +1,95 @@ +GEM + remote: https://rubygems.org/ + specs: + ast (2.4.2) + base64 (0.2.0) + concurrent-ruby (1.3.5) + date (3.4.1) + diff-lcs (1.5.1) + json (2.9.1) + language_server-protocol (3.17.0.3) + logger (1.6.5) + mustermann (3.0.3) + ruby2_keywords (~> 0.0.1) + nio4r (2.7.4) + parallel (1.26.3) + parser (3.3.7.0) + ast (~> 2.4.1) + racc + puma (6.5.0) + nio4r (~> 2.0) + racc (1.8.1) + rack (3.1.10) + rack-protection (4.1.1) + base64 (>= 0.1.0) + logger (>= 1.6.0) + rack (>= 3.0.0, < 4) + rack-session (2.1.0) + base64 (>= 0.1.0) + rack (>= 3.0.0) + rack-test (2.2.0) + rack (>= 1.3) + rackup (2.2.1) + rack (>= 3) + rainbow (3.1.1) + regexp_parser (2.10.0) + rspec (3.13.0) + rspec-core (~> 3.13.0) + rspec-expectations (~> 3.13.0) + rspec-mocks (~> 3.13.0) + rspec-core (3.13.2) + rspec-support (~> 3.13.0) + rspec-expectations (3.13.3) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.13.0) + rspec-mocks (3.13.2) + diff-lcs (>= 1.2.0, < 2.0) + rspec-support (~> 3.13.0) + rspec-support (3.13.2) + rubocop (1.71.0) + json (~> 2.3) + language_server-protocol (>= 3.17.0) + parallel (~> 1.10) + parser (>= 3.3.0.2) + rainbow (>= 2.2.2, < 4.0) + regexp_parser (>= 2.9.3, < 3.0) + rubocop-ast (>= 1.36.2, < 2.0) + ruby-progressbar (~> 1.7) + unicode-display_width (>= 2.4.0, < 4.0) + rubocop-ast (1.37.0) + parser (>= 3.3.1.0) + ruby-progressbar (1.13.0) + ruby2_keywords (0.0.5) + sinatra (4.1.1) + logger (>= 1.6.0) + mustermann (~> 3.0) + rack (>= 3.0.0, < 4) + rack-protection (= 4.1.1) + rack-session (>= 2.0.0, < 3) + tilt (~> 2.0) + tilt (2.6.0) + time (0.4.1) + date + tzinfo (2.0.6) + concurrent-ruby (~> 1.0) + unicode-display_width (3.1.4) + unicode-emoji (~> 4.0, >= 4.0.4) + unicode-emoji (4.0.4) + +PLATFORMS + arm64-darwin-23 + ruby + +DEPENDENCIES + puma (~> 6.5) + rack (~> 3.1, >= 3.1.10) + rack-test (~> 2.2) + rackup (~> 2.2) + rspec + rubocop (~> 1.71) + sinatra (~> 4.1) + time (~> 0.4) + tzinfo (~> 2.0) + +BUNDLED WITH + 2.6.3 diff --git a/app_ruby/README.md b/app_ruby/README.md new file mode 100644 index 0000000000..4cbd95cf7e --- /dev/null +++ b/app_ruby/README.md @@ -0,0 +1,112 @@ +# Ruby Web Application + +[![CI for app_ruby](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_ruby.yml/badge.svg)](https://github.com/cuprum-acid/devops-labs/actions/workflows/app_ruby.yml) + +## Overview + +This application shows current time in **Omsk** + +## Requirements + +* Ruby 3.4 + +## Installation + +Clone this repository: + +```bash +git clone https://github.com/cuprum-acid/devops-labs.git -b lab1 +``` + +Open directory: + +```bash +cd devops-labs/app_ruby +``` + +Install bundler: + +```bash +gem install bundler +``` + +Install dependencies from `Gemfile`: + +```bash +bundle install +``` + +Run the app: + +```bash +ruby app.rb +``` + +Open `localhost:4567` in browser or run: + +```bash +curl localhost:4567 +``` + +## Test + +To run auto-tests: + +```bash +rspec spec/app_spec.rb +``` + +## Docker + +### Build + +```bash +cd devops-labs/app_ruby +``` + +```bash +docker build -t ebob/omsk-time:v1.0 . +``` + +### Pull and Run + +```bash +docker pull ebob/omsk-time:v1.0 +``` + +```bash +docker run -d --name omsk -p 4567:4567 ebob/omsk-time:v1.0 +``` + +Now it is available on `localhost:4567` + +## Distroless Docker Image + +### Build + +```bash +docker build -t ebob/omsk-time:v1.0-distroless -f distroless.Dockerfile . +``` + +### Pull and Run + +```bash +docker pull ebob/omsk-time:v1.0-distroless +``` + +```bash +docker run -d --name omsk-distroless -p 4568:4567 ebob/omsk-time:v1.0-distroless +``` + +Now it is available on `localhost:4568` + +## Continuous Integration + +This repository contains a CI pipeline configuration for the python application. The CI pipeline is managed with `GitHub Actions` and includes multiple jobs to ensure the code quality, functionality, security, and successful deployment of the application. + +The pipeline consists of these main jobs: + +1. Lint and Format: Ensures the code follows linting and formatting standards. +2. Test: Runs tests to verify the correctness of the application. +3. Security Scan: Checks for security vulnerabilities in the codebase using `Snyk` tool. +4. Docker Build and Push: Builds and pushes a Docker image to the DockerHub and ghcr. diff --git a/app_ruby/RUBY.md b/app_ruby/RUBY.md new file mode 100644 index 0000000000..c8245fe38d --- /dev/null +++ b/app_ruby/RUBY.md @@ -0,0 +1,25 @@ +# Ruby Web Application + +## Framework choice + +To develop this web application, I chose the [**Sinatra**](https://sinatrarb.com/) web framework for Ruby. Sinatra is a lightweight web framework that is perfect for small applications. + +## Best practicies + +I implemented the following best practices in the development of this Ruby web application: + +* MVC architectural pattern + +* Tools for Code Quality & Security + + * [`RuboCop`](https://github.com/rubocop/rubocop): A static code analyzer and style checker for Ruby + + * [`Reek`](https://github.com/troessner/reek): A code smell detector + + * [`rails_best_practices`](https://github.com/flyerhzm/rails_best_practices) : A tool that checks for best practices in Ruby on Rails applications, even if you're using Sinatra framework + +* I use `Gemfile` to manage dependencies + +## Testing code + +In addition to manually testing the application by running it and verifying the output, I have written automated tests using the `RSpec` testing framework. These tests ensure that the application behaves as expected and that future changes do not introduce bugs. diff --git a/app_ruby/app.rb b/app_ruby/app.rb new file mode 100644 index 0000000000..13cbeb0a24 --- /dev/null +++ b/app_ruby/app.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +require 'sinatra' +require 'tzinfo' + +set :bind, '0.0.0.0' + +get '/' do + content_type 'text/plain' + # Set the timezone to Omsk + timezone = TZInfo::Timezone.get('Asia/Omsk') + + # Get the current time in Omsk timezone + omsk_time = timezone.now.strftime('%Y-%m-%d %H:%M:%S') + + # Display the time + "Current time in Omsk: #{omsk_time}" +rescue TZInfo::InvalidTimezoneIdentifier => e + "Error: Invalid timezone identifier - #{e.message}" +rescue StandardError => e + "Error: #{e.message}" +end diff --git a/app_ruby/config.ru b/app_ruby/config.ru new file mode 100644 index 0000000000..bfdd0f5848 --- /dev/null +++ b/app_ruby/config.ru @@ -0,0 +1,5 @@ +# frozen_string_literal: true + +require './app' + +run Sinatra::Application diff --git a/app_ruby/distroless.Dockerfile b/app_ruby/distroless.Dockerfile new file mode 100644 index 0000000000..f1ee7c8fe1 --- /dev/null +++ b/app_ruby/distroless.Dockerfile @@ -0,0 +1,40 @@ +FROM ruby:3.4.1-slim AS builder + +ENV BUNDLE_WITHOUT=development:test \ + LANG=C.UTF-8 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + gcc=4:12.2.0-3 \ + musl-dev=1.2.3-1 \ + tzdata=2024b-0+deb12u1 \ + make=4.3-4.1 && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /app + +RUN gem install nio4r:2.7.4 -- --use-system-libraries && \ + gem install bundler:2.6.3 + +COPY Gemfile Gemfile.lock ./ + +RUN bundle install && cp "$(which ruby)" /app + +COPY public/styles.css views/index.erb app.rb config.ru ./ + +FROM gcr.io/distroless/base-debian12:nonroot AS runtime + +WORKDIR /app +COPY --from=builder /app /app +COPY --from=builder /usr/local /usr/local +COPY --from=builder /usr/lib /usr/lib +COPY --from=builder /usr/local/bin/ruby /usr/local/bin/ruby +COPY --from=builder /usr/local/lib/ruby/3.4.0 /usr/local/lib/ruby/3.4.0 +COPY --from=builder /usr/local/bundle /usr/local/bundle + +ENV PATH=/usr/local/bundle/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +ENV GEM_HOME=/usr/local/bundle + +EXPOSE 4567 + +CMD ["./ruby", "app.rb"] diff --git a/app_ruby/public/styles.css b/app_ruby/public/styles.css new file mode 100644 index 0000000000..8116798834 --- /dev/null +++ b/app_ruby/public/styles.css @@ -0,0 +1,13 @@ +body { + font-family: Arial, sans-serif; + margin: 0 auto; + max-width: 600px; + } + + h1 { + text-align: center; + } + + form { + margin-bottom: 10px; + } diff --git a/app_ruby/spec/app_spec.rb b/app_ruby/spec/app_spec.rb new file mode 100644 index 0000000000..1ead1ad0c6 --- /dev/null +++ b/app_ruby/spec/app_spec.rb @@ -0,0 +1,74 @@ +# frozen_string_literal: true + +require 'rack/test' +require './app' + +describe 'Omsk Time Web App - Basic Responses' do + include Rack::Test::Methods + + def app + Sinatra::Application + end + + describe 'GET /' do + it 'responds successfully' do + header 'Host', 'localhost' + get '/' + expect(last_response).to be_ok + end + + it 'displays the current time in Omsk' do + header 'Host', 'localhost' + get '/' + expect(last_response.body).to match(/Current time in Omsk: \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}/) + end + + it 'returns text/plain content type' do + header 'Host', 'localhost' + get '/' + expect(last_response.headers['Content-Type']).to include('text/plain') + end + end +end + +describe 'Omsk Time Web App - Error Handling' do + include Rack::Test::Methods + + def app + Sinatra::Application + end + + it 'handles invalid timezone errors gracefully' do + allow(TZInfo::Timezone).to receive(:get).and_raise(TZInfo::InvalidTimezoneIdentifier, 'Invalid timezone') + + header 'Host', 'localhost' + get '/' + + expect(last_response).to be_ok + expect(last_response.body).to include('Error: Invalid timezone identifier - Invalid timezone') + end + + it 'handles unexpected errors gracefully' do + allow(TZInfo::Timezone).to receive(:get).and_raise(StandardError, 'Unexpected error') + + header 'Host', 'localhost' + get '/' + + expect(last_response).to be_ok + expect(last_response.body).to include('Error: Unexpected error') + end +end + +describe 'Omsk Time Web App - Time Format' do + include Rack::Test::Methods + + def app + Sinatra::Application + end + + it 'displays time in YYYY-MM-DD HH:MM:SS format' do + header 'Host', 'localhost' + get '/' + expect(last_response.body).to match(/\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}/) + end +end diff --git a/app_ruby/views/index.erb b/app_ruby/views/index.erb new file mode 100644 index 0000000000..3a0b84b503 --- /dev/null +++ b/app_ruby/views/index.erb @@ -0,0 +1,31 @@ + + + + + Omsk Current Time + + + +
+

Current Time in Omsk

+

<%= @current_time.strftime("%Y-%m-%d %H:%M:%S %Z") %>

+
+ + diff --git a/lab7.md b/lab7.md new file mode 100644 index 0000000000..48e65eb202 --- /dev/null +++ b/lab7.md @@ -0,0 +1,59 @@ +# Lab 7: Monitoring and Logging + +## Overview + +In this lab, you will become familiar with a logging stack that includes Promtail, Loki, and Grafana. Your goal is to create a Docker Compose configuration and configuration files to set up this logging stack. + +## Task 1: Logging Stack Setup + +**6 Points:** + +1. Study the Logging Stack: + - Begin by researching the components of the logging stack: + - [Grafana Webinar: Loki Getting Started](https://grafana.com/go/webinar/loki-getting-started/) + - [Loki Overview](https://grafana.com/docs/loki/latest/overview/) + - [Loki GitHub Repository](https://github.com/grafana/loki) + +2. Create a Monitoring Folder: + - Start by creating a new folder named `monitoring` in your project directory. + +3. Docker Compose Configuration: + - Inside the `monitoring` folder, prepare a `docker-compose.yml` file that defines the entire logging stack along with your application. + - To assist you in this task, refer to these resources for sample Docker Compose configurations: + - [Example Docker Compose Configuration from Loki Repository](https://github.com/grafana/loki/blob/main/production/docker-compose.yaml) + - [Promtail Configuration Example](https://github.com/black-rosary/loki-nginx/blob/master/promtail/promtail.yml) (Adapt it as needed) + +4. Testing: + - Verify that the configured logging stack and your application work as expected. + +## Task 2: Documentation and Reporting + +**4 Points:** + +1. Logging Stack Report: + - Create a new file named `LOGGING.md` to document how the logging stack you've set up functions. + - Provide detailed explanations of each component's role within the stack. + +2. Screenshots: + - Capture screenshots that demonstrate the successful operation of your logging stack. + - Include these screenshots in your `LOGGING.md` report for reference. + +## Bonus Task: Additional Configuration + +**2.5 Points:** + +1. Integrating Your Extra App: + - Extend the `docker-compose.yml` configuration to include your additional application. + +2. Configure Stack for Comprehensive Logging: + - Modify the logging stack's configuration to collect logs from all containers defined in the `docker-compose.yml`. + - Include screenshots in your `LOGGING.md` report to demonstrate your success. + +### Guidelines + +- Ensure that your documentation in `LOGGING.md` is well-structured and comprehensible. +- Follow proper naming conventions for files and folders. +- Use code blocks and Markdown formatting where appropriate. +- Create pull requests (PRs) as needed: from your fork to the main branch of this repository, and from your fork's branch to your fork's master branch. + +> Note: Thoroughly document your work, and ensure the logging stack functions correctly. Utilize the bonus points opportunity to enhance your understanding and the completeness of your setup. diff --git a/lab8.md b/lab8.md new file mode 100644 index 0000000000..8eb0752ec7 --- /dev/null +++ b/lab8.md @@ -0,0 +1,71 @@ +# Lab 8: Monitoring with Prometheus + +## Overview + +In this lab, you will become acquainted with Prometheus, set it up, and configure applications to collect metrics. + +## Task 1: Prometheus Setup + +**6 Points:** + +1. Learn About Prometheus: + - Begin by reading about Prometheus and its fundamental concepts: + - [Prometheus Overview](https://prometheus.io/docs/introduction/overview/) + - [Prometheus Naming Best Practices](https://prometheus.io/docs/practices/naming/) + +2. Integration with Docker Compose: + - Expand your existing `docker-compose.yml` file from the previous lab to include Prometheus. + +3. Prometheus Configuration: + - Configure Prometheus to collect metrics from both Loki and Prometheus containers. + +4. Verify Prometheus Targets: + - Access `http://localhost:9090/targets` to ensure that Prometheus is correctly scraping metrics. + - Capture screenshots that confirm the successful setup and place them in a file named `METRICS.md` within the monitoring folder. + +## Task 2: Dashboard and Configuration Enhancements + +**4 Points:** + +1. Grafana Dashboards: + - Set up dashboards in Grafana for both Loki and Prometheus. + - You can use examples as references: + - [Example Dashboard for Loki](https://grafana.com/grafana/dashboards/13407) + - [Example Dashboard for Prometheus](https://grafana.com/grafana/dashboards/3662) + - Capture screenshots displaying your successful dashboard configurations and include them in `METRICS.md`. + +2. Service Configuration Updates: + - Enhance the configuration of all services in the `docker-compose.yml` file: + - Add log rotation mechanisms. + - Specify memory limits for containers. + - Ensure these changes are documented within your `METRICS.md` file. + +3. Metrics Gathering: + - Extend Prometheus to gather metrics from all services defined in the `docker-compose.yml` file. + +## Bonus Task: Metrics and Health Checks + +**To Earn 2.5 Additional Points:** + +1. Application Metrics: + - Integrate metrics into your applications. You can refer to Python examples like: + - [Monitoring a Synchronous Python Web Application](https://dzone.com/articles/monitoring-your-synchronous-python-web-application) + - [Metrics Monitoring in Python](https://opensource.com/article/18/4/metrics-monitoring-and-python) + +2. Obtain Application Metrics: + - Configure your applications to export metrics. + +3. METRICS.md Update: + - Document your progress with the bonus tasks, including screenshots, in the `METRICS.md` file. + +4. Health Checks: + - Further enhance the `docker-compose.yml` file's service configurations by adding health checks for the containers. + +### Guidelines + +- Maintain a well-structured and comprehensible `METRICS.md` document. +- Adhere to file and folder naming conventions. +- Utilize code blocks and Markdown formatting where appropriate. +- Create pull requests (PRs) as needed: from your fork to the main branch of this repository, and from your fork's branch to your fork's master branch. + +> Note: Ensure thorough documentation of your work, and guarantee that Prometheus correctly collects metrics. Take advantage of the bonus tasks to deepen your understanding and enhance the completeness of your setup. diff --git a/terraform/.gitignore b/terraform/.gitignore new file mode 100644 index 0000000000..ecac1dfa7b --- /dev/null +++ b/terraform/.gitignore @@ -0,0 +1,5 @@ +.terraform/ +*.tfstate +*.tfstate.backup +*.tfvars +.terraform.lock.hcl diff --git a/terraform/TF.md b/terraform/TF.md new file mode 100644 index 0000000000..afb1e7ec1b --- /dev/null +++ b/terraform/TF.md @@ -0,0 +1,722 @@ +# Terraform + +## Best Practicies + +* Use `.gitignore` in order not to push sensitive data from `terraform.tfstate` file (It is better to keep it in your S3 bucket). + +* Use `terraform fmt` and `terraform validate` to format and check correctness of your code. + +* It is better to review changes with `terraform plan` first before agree on `terraform apply` + +* Use `variables.tf` in order not to hardcode variables and mark sensitive with flag `sensitive = true` + +* Specify exact versions in `terraform required_providers` + +* Define variables in `.tfvars` or environment variables, but do not push them in repo + +## Docker Infrastructure Using Terraform + +### `terraform state list` command output + +```bash +ebob@laptop docker_terraform % terraform state list + +docker_container.app_python_container +docker_container.app_ruby_container +``` + +### `terraform state show ` command output + +```bash +ebob@laptop docker_terraform % terraform state show docker_container.app_python_container +# docker_container.app_python_container: +resource "docker_container" "app_python_container" { + attach = false + bridge = null + command = [ + "gunicorn", + "-w", + "4", + "-b", + "0.0.0.0:8080", + "app:app", + ] + container_read_refresh_timeout_milliseconds = 15000 + cpu_set = null + cpu_shares = 0 + domainname = null + entrypoint = [] + env = [] + hostname = "49cc30c669fa" + id = "49cc30c669fa8a39fab6ee8f89f43c269dd1a245a6f5629dafc7fa478dc69292" + image = "sha256:6dbe2f8b0f5e842457c6d2a4df1cae14e8f07dde54194a3b67fa6671be7d8d3b" + init = false + ipc_mode = "private" + log_driver = "json-file" + logs = false + max_retry_count = 0 + memory = 0 + memory_swap = 0 + must_run = true + name = "msk" + network_data = [ + { + gateway = "172.17.0.1" + global_ipv6_address = null + global_ipv6_prefix_length = 0 + ip_address = "172.17.0.2" + ip_prefix_length = 16 + ipv6_gateway = null + mac_address = "02:42:ac:11:00:02" + network_name = "bridge" + }, + ] + network_mode = "bridge" + pid_mode = null + privileged = false + publish_all_ports = false + read_only = false + remove_volumes = true + restart = "no" + rm = false + runtime = "runc" + security_opts = [] + shm_size = 64 + start = true + stdin_open = false + stop_signal = null + stop_timeout = 0 + tty = false + user = "appuser" + userns_mode = null + wait = false + wait_timeout = 60 + working_dir = "/app" + + ports { + external = 8080 + internal = 8080 + ip = "0.0.0.0" + protocol = "tcp" + } +} +``` + +### `terraform output` command + +```bash +ebob@laptop docker_terraform % terraform output +container_id_python = "49cc30c669fa8a39fab6ee8f89f43c269dd1a245a6f5629dafc7fa478dc69292" +container_id_ruby = "053cc71da7897e90ef78158ff045377e56e1d228b340788a16ade2c91f49460c" +container_image_python = "ebob/moscow-time:v1.0" +container_image_ruby = "ebob/omsk-time:v1.0" +container_name_python = "msk" +container_name_ruby = "omsk" +container_port_python = tolist([ + { + "external" = 8080 + "internal" = 8080 + "ip" = "0.0.0.0" + "protocol" = "tcp" + }, +]) +container_port_ruby = tolist([ + { + "external" = 8081 + "internal" = 4567 + "ip" = "0.0.0.0" + "protocol" = "tcp" + }, +]) +``` + +## Yandex Cloud Using Terraform + +### Getting started + +First of all, read [official guide from Yandex Cloud about Terraform](https://yandex.cloud/en-ru/docs/tutorials/infrastructure-management/terraform-quickstart). Then, visit [Yandex Cloud Provider on Terraform Registry](https://registry.terraform.io/providers/yandex-cloud/yandex/latest/docs). After that, we can start by creating service account and getting [IAM token](https://yandex.cloud/en-ru/docs/iam/operations/iam-token/create-for-sa). + +### `terraform plan` + +```bash +ebob@laptop yandex_cloud_terraform % terraform plan +var.cloud_id + Yandex Cloud ID + + Enter a value: + +var.folder_id + Yandex Folder ID + + Enter a value: + +var.iam_token + Enter a value: + + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # yandex_compute_disk.disk-1 will be created + + resource "yandex_compute_disk" "disk-1" { + + block_size = 4096 + + created_at = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + image_id = "fd8k2ed4jspu35gfde1u" + + name = "disk-1" + + product_ids = (known after apply) + + size = 20 + + status = (known after apply) + + type = "network-hdd" + + zone = "ru-central1-b" + + + disk_placement_policy (known after apply) + + + hardware_generation (known after apply) + } + + # yandex_compute_instance.vm-1 will be created + + resource "yandex_compute_instance" "vm-1" { + + created_at = (known after apply) + + folder_id = (known after apply) + + fqdn = (known after apply) + + gpu_cluster_id = (known after apply) + + hardware_generation = (known after apply) + + hostname = "vm-1" + + id = (known after apply) + + maintenance_grace_period = (known after apply) + + maintenance_policy = (known after apply) + + metadata = { + + "ssh-keys" = (sensitive value) + } + + name = "vm-1" + + network_acceleration_type = "standard" + + platform_id = "standard-v2" + + service_account_id = (known after apply) + + status = (known after apply) + + zone = "ru-central1-b" + + + boot_disk { + + auto_delete = true + + device_name = (known after apply) + + disk_id = (known after apply) + + mode = (known after apply) + + + initialize_params (known after apply) + } + + + metadata_options (known after apply) + + + network_interface { + + index = (known after apply) + + ip_address = (known after apply) + + ipv4 = true + + ipv6 = (known after apply) + + ipv6_address = (known after apply) + + mac_address = (known after apply) + + nat = true + + nat_ip_address = (known after apply) + + nat_ip_version = (known after apply) + + security_group_ids = (known after apply) + + subnet_id = (known after apply) + } + + + placement_policy (known after apply) + + + resources { + + core_fraction = 20 + + cores = 2 + + memory = 2 + } + + + scheduling_policy { + + preemptible = true + } + } + + # yandex_vpc_network.network-1 will be created + + resource "yandex_vpc_network" "network-1" { + + created_at = (known after apply) + + default_security_group_id = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + labels = (known after apply) + + name = "network-1" + + subnet_ids = (known after apply) + } + + # yandex_vpc_subnet.subnet-1 will be created + + resource "yandex_vpc_subnet" "subnet-1" { + + created_at = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + labels = (known after apply) + + name = "subnet-1" + + network_id = (known after apply) + + v4_cidr_blocks = [ + + "192.168.1.0/24", + ] + + v6_cidr_blocks = (known after apply) + + zone = "ru-central1-b" + } + +Plan: 4 to add, 0 to change, 0 to destroy. +``` + +### `terraform apply` + +```bash +ebob@laptop yandex_cloud_terraform % terraform apply +var.cloud_id + Yandex Cloud ID + + Enter a value: + +var.folder_id + Yandex Folder ID + + Enter a value: + +var.iam_token + Enter a value: + + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # yandex_compute_disk.disk-1 will be created + + resource "yandex_compute_disk" "disk-1" { + + block_size = 4096 + + created_at = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + image_id = "fd8k2ed4jspu35gfde1u" + + name = "disk-1" + + product_ids = (known after apply) + + size = 20 + + status = (known after apply) + + type = "network-hdd" + + zone = "ru-central1-b" + + + disk_placement_policy (known after apply) + + + hardware_generation (known after apply) + } + + # yandex_compute_instance.vm-1 will be created + + resource "yandex_compute_instance" "vm-1" { + + created_at = (known after apply) + + folder_id = (known after apply) + + fqdn = (known after apply) + + gpu_cluster_id = (known after apply) + + hardware_generation = (known after apply) + + hostname = "vm-1" + + id = (known after apply) + + maintenance_grace_period = (known after apply) + + maintenance_policy = (known after apply) + + metadata = { + + "ssh-keys" = (sensitive value) + } + + name = "vm-1" + + network_acceleration_type = "standard" + + platform_id = "standard-v2" + + service_account_id = (known after apply) + + status = (known after apply) + + zone = "ru-central1-b" + + + boot_disk { + + auto_delete = true + + device_name = (known after apply) + + disk_id = (known after apply) + + mode = (known after apply) + + + initialize_params (known after apply) + } + + + metadata_options (known after apply) + + + network_interface { + + index = (known after apply) + + ip_address = (known after apply) + + ipv4 = true + + ipv6 = (known after apply) + + ipv6_address = (known after apply) + + mac_address = (known after apply) + + nat = true + + nat_ip_address = (known after apply) + + nat_ip_version = (known after apply) + + security_group_ids = (known after apply) + + subnet_id = (known after apply) + } + + + placement_policy (known after apply) + + + resources { + + core_fraction = 20 + + cores = 2 + + memory = 2 + } + + + scheduling_policy { + + preemptible = true + } + } + + # yandex_vpc_network.network-1 will be created + + resource "yandex_vpc_network" "network-1" { + + created_at = (known after apply) + + default_security_group_id = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + labels = (known after apply) + + name = "network-1" + + subnet_ids = (known after apply) + } + + # yandex_vpc_subnet.subnet-1 will be created + + resource "yandex_vpc_subnet" "subnet-1" { + + created_at = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + labels = (known after apply) + + name = "subnet-1" + + network_id = (known after apply) + + v4_cidr_blocks = [ + + "192.168.1.0/24", + ] + + v6_cidr_blocks = (known after apply) + + zone = "ru-central1-b" + } + +Plan: 4 to add, 0 to change, 0 to destroy. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +yandex_vpc_network.network-1: Creating... +yandex_compute_disk.disk-1: Creating... +yandex_vpc_network.network-1: Creation complete after 5s [id=enp5ntrp4t5tvdbp0052] +yandex_vpc_subnet.subnet-1: Creating... +yandex_vpc_subnet.subnet-1: Creation complete after 0s [id=e2l1hqpsrv83hq97m16t] +yandex_compute_disk.disk-1: Still creating... [10s elapsed] +yandex_compute_disk.disk-1: Creation complete after 12s [id=epdh7buhebqifudk67p4] +yandex_compute_instance.vm-1: Creating... +yandex_compute_instance.vm-1: Still creating... [10s elapsed] +yandex_compute_instance.vm-1: Still creating... [20s elapsed] +yandex_compute_instance.vm-1: Still creating... [30s elapsed] +yandex_compute_instance.vm-1: Still creating... [40s elapsed] +yandex_compute_instance.vm-1: Creation complete after 43s [id=epd6avjtflh4nqkrg2an] + +Apply complete! Resources: 4 added, 0 changed, 0 destroyed. +``` + +### `terraform state list` + +```bash +ebob@laptop yandex_cloud_terraform % terraform state list +yandex_compute_disk.disk-1 +yandex_compute_instance.vm-1 +yandex_vpc_network.network-1 +yandex_vpc_subnet.subnet-1 +``` + +### `terraform state show` + +#### `yandex_vpc_network.network-1` + +```bash +ebob@laptop yandex_cloud_terraform % terraform state show yandex_vpc_network.network-1 +# yandex_vpc_network.network-1: +resource "yandex_vpc_network" "network-1" { + created_at = "2025-02-03T20:35:24Z" + default_security_group_id = "enpno7pvi66b7gepf4sr" + description = null + folder_id = "b1ghr0ljvdknal1p1q6g" + id = "enp5ntrp4t5tvdbp0052" + labels = {} + name = "network-1" + subnet_ids = [] +} +``` + +#### `yandex_vpc_subnet.subnet-1` + +```bash +ebob@laptop yandex_cloud_terraform % terraform state show yandex_vpc_subnet.subnet-1 +# yandex_vpc_subnet.subnet-1: +resource "yandex_vpc_subnet" "subnet-1" { + created_at = "2025-02-03T20:35:27Z" + description = null + folder_id = "b1ghr0ljvdknal1p1q6g" + id = "e2l1hqpsrv83hq97m16t" + labels = {} + name = "subnet-1" + network_id = "enp5ntrp4t5tvdbp0052" + route_table_id = null + v4_cidr_blocks = [ + "192.168.1.0/24", + ] + v6_cidr_blocks = [] + zone = "ru-central1-b" +} +``` + +#### `yandex_compute_disk.disk-1` + +```bash +ebob@laptop yandex_cloud_terraform % terraform state show yandex_compute_disk.disk-1 +# yandex_compute_disk.disk-1: +resource "yandex_compute_disk" "disk-1" { + block_size = 4096 + created_at = "2025-02-03T20:35:24Z" + description = null + folder_id = "b1ghr0ljvdknal1p1q6g" + id = "epdh7buhebqifudk67p4" + image_id = "fd8k2ed4jspu35gfde1u" + name = "disk-1" + product_ids = [ + "f2ekpu3f32a5gg9e40kq", + ] + size = 20 + snapshot_id = null + status = "ready" + type = "network-hdd" + zone = "ru-central1-b" + + disk_placement_policy { + disk_placement_group_id = null + } + + hardware_generation { + legacy_features { + pci_topology = "PCI_TOPOLOGY_V1" + } + } +} +``` + +#### `yandex_compute_instance.vm-1` + +```bash +ebob@laptop yandex_cloud_terraform % terraform state show yandex_compute_instance.vm-1 +# yandex_compute_instance.vm-1: +resource "yandex_compute_instance" "vm-1" { + created_at = "2025-02-03T20:35:35Z" + description = null + folder_id = "b1ghr0ljvdknal1p1q6g" + fqdn = "vm-1.ru-central1.internal" + gpu_cluster_id = null + hardware_generation = [ + { + generation2_features = [] + legacy_features = [ + { + pci_topology = "PCI_TOPOLOGY_V1" + }, + ] + }, + ] + hostname = "vm-1" + id = "epd6avjtflh4nqkrg2an" + maintenance_grace_period = null + metadata = { + "ssh-keys" = (sensitive value) + } + name = "vm-1" + network_acceleration_type = "standard" + platform_id = "standard-v2" + service_account_id = null + status = "running" + zone = "ru-central1-b" + + boot_disk { + auto_delete = true + device_name = "epdh7buhebqifudk67p4" + disk_id = "epdh7buhebqifudk67p4" + mode = "READ_WRITE" + + initialize_params { + block_size = 4096 + description = null + image_id = "fd8k2ed4jspu35gfde1u" + kms_key_id = null + name = "disk-1" + size = 20 + snapshot_id = null + type = "network-hdd" + } + } + + metadata_options { + aws_v1_http_endpoint = 1 + aws_v1_http_token = 2 + gce_http_endpoint = 1 + gce_http_token = 1 + } + + network_interface { + index = 0 + ip_address = "192.168.1.29" + ipv4 = true + ipv6 = false + ipv6_address = null + mac_address = "d0:0d:65:7e:7d:7d" + nat = true + nat_ip_address = "84.201.163.253" + nat_ip_version = "IPV4" + security_group_ids = [] + subnet_id = "e2l1hqpsrv83hq97m16t" + } + + placement_policy { + host_affinity_rules = [] + placement_group_id = null + placement_group_partition = 0 + } + + resources { + core_fraction = 20 + cores = 2 + gpus = 0 + memory = 2 + } + + scheduling_policy { + preemptible = true + } +} +``` + +Yandex Cloud VM + +## GitHub Terraform + +### Import with `terraform import` + +```bash +ebob@laptop github_terraform % terraform import "github_repository.repo" "devops-labs" +var.github_token + GitHub personal access token + + Enter a value: + +github_repository.repo: Importing from ID "devops-labs"... +github_repository.repo: Import prepared! + Prepared github_repository for import +github_repository.repo: Refreshing state... [id=devops-labs] + +Import successful! + +The resources that were imported are shown above. These resources are now in +your Terraform state and will henceforth be managed by Terraform. +``` + +### Apply changes + +```bash +ebob@laptop github_terraform % terraform apply +var.github_token + GitHub personal access token + + Enter a value: + +github_repository.repo: Refreshing state... [id=devops-labs] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + ~ update in-place + +Terraform will perform the following actions: + + # github_branch_default.master will be created + + resource "github_branch_default" "master" { + + branch = "master" + + etag = (known after apply) + + id = (known after apply) + + rename = false + + repository = "devops-labs" + } + + # github_branch_protection.master will be created + + resource "github_branch_protection" "master" { + + allows_deletions = false + + allows_force_pushes = false + + enforce_admins = true + + id = (known after apply) + + lock_branch = false + + pattern = "master" + + repository_id = "R_kgDONuYNyA" + + require_conversation_resolution = false + + require_signed_commits = false + + required_linear_history = false + + + required_pull_request_reviews { + + dismiss_stale_reviews = true + + require_code_owner_reviews = true + + require_last_push_approval = false + + required_approving_review_count = 1 + } + + + required_status_checks { + + strict = true + } + } + + # github_repository.repo will be updated in-place + ~ resource "github_repository" "repo" { + + description = "Innopolis University DevOps Course Labs" + ~ has_projects = true -> false + ~ has_wiki = true -> false + id = "devops-labs" + name = "devops-labs" + # (33 unchanged attributes hidden) + + # (1 unchanged block hidden) + } + +Plan: 2 to add, 1 to change, 0 to destroy. + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +github_repository.repo: Modifying... [id=devops-labs] +github_repository.repo: Modifications complete after 2s [id=devops-labs] +github_branch_default.master: Creating... +github_branch_protection.master: Creating... +github_branch_default.master: Creation complete after 3s [id=devops-labs] +github_branch_protection.master: Creation complete after 5s [id=BPR_kwDONuYNyM4DiLyy] + +Apply complete! Resources: 2 added, 1 changed, 0 destroyed. +``` + +## GitHub Teams Terraform + +Organization: [Bobkunov](https://github.com/Bobkunov) + +Repo: [phoenix-project](https://github.com/Bobkunov/phoenix-project) + +Teams: [Developers, DevOps, QA](https://github.com/orgs/Bobkunov/teams) + +```bash +ebob@laptop github_teams_terraform % terraform state list +github_branch_default.main +github_branch_protection.repo_protection +github_repository.repo +github_team.developers +github_team.devops +github_team.qa +github_team_repository.developers_access +github_team_repository.devops_access +github_team_repository.qa_access +``` diff --git a/terraform/docker_terraform/main.tf b/terraform/docker_terraform/main.tf new file mode 100644 index 0000000000..e74aa80731 --- /dev/null +++ b/terraform/docker_terraform/main.tf @@ -0,0 +1,28 @@ +terraform { + required_providers { + docker = { + source = "kreuzwerker/docker" + version = "~> 3.0.2" + } + } +} + +provider "docker" {} + +resource "docker_container" "app_python_container" { + image = var.docker_image_python + name = var.container_name_python + ports { + internal = var.internal_port_python + external = var.external_port_python + } +} + +resource "docker_container" "app_ruby_container" { + image = var.docker_image_ruby + name = var.container_name_ruby + ports { + internal = var.internal_port_ruby + external = var.external_port_ruby + } +} diff --git a/terraform/docker_terraform/outputs.tf b/terraform/docker_terraform/outputs.tf new file mode 100644 index 0000000000..0a5062d427 --- /dev/null +++ b/terraform/docker_terraform/outputs.tf @@ -0,0 +1,31 @@ +output "container_name_python" { + value = docker_container.app_python_container.name +} + +output "container_id_python" { + value = docker_container.app_python_container.id +} + +output "container_image_python" { + value = docker_container.app_python_container.image +} + +output "container_port_python" { + value = docker_container.app_python_container.ports +} + +output "container_name_ruby" { + value = docker_container.app_ruby_container.name +} + +output "container_id_ruby" { + value = docker_container.app_ruby_container.id +} + +output "container_image_ruby" { + value = docker_container.app_ruby_container.image +} + +output "container_port_ruby" { + value = docker_container.app_ruby_container.ports +} diff --git a/terraform/docker_terraform/variables.tf b/terraform/docker_terraform/variables.tf new file mode 100644 index 0000000000..7f75edb92d --- /dev/null +++ b/terraform/docker_terraform/variables.tf @@ -0,0 +1,47 @@ +variable "container_name_python" { + description = "Docker container name for Python app" + type = string + default = "msk" +} + +variable "docker_image_python" { + description = "Docker image for Python app" + type = string + default = "ebob/moscow-time:v1.0" +} + +variable "internal_port_python" { + description = "Internal port for Python app" + type = number + default = 8080 +} + +variable "external_port_python" { + description = "External port for Python app" + type = number + default = 8080 +} + +variable "container_name_ruby" { + description = "Docker container name for Ruby app" + type = string + default = "omsk" +} + +variable "docker_image_ruby" { + description = "Docker image for Ruby app" + type = string + default = "ebob/omsk-time:v1.0" +} + +variable "internal_port_ruby" { + description = "Internal port for Ruby app" + type = number + default = 4567 +} + +variable "external_port_ruby" { + description = "External port for Ruby app" + type = number + default = 8081 +} diff --git a/terraform/github_teams_terraform/main.tf b/terraform/github_teams_terraform/main.tf new file mode 100644 index 0000000000..a3282b0441 --- /dev/null +++ b/terraform/github_teams_terraform/main.tf @@ -0,0 +1,79 @@ +terraform { + required_providers { + github = { + source = "integrations/github" + version = "~> 6.5.0" + } + } +} + +provider "github" { + owner = var.github_organization + token = var.github_token +} + + +resource "github_team" "developers" { + name = "Development Team" + description = "We write code" + privacy = "closed" + parent_team_id = null +} + +resource "github_team" "devops" { + name = "DevOps Team" + description = "We deploy code" + privacy = "closed" + parent_team_id = null +} + +resource "github_team" "qa" { + name = "QA Team" + description = "We test code" + privacy = "closed" + parent_team_id = null +} + +resource "github_repository" "repo" { + name = "phoenix-project" + description = "The Phoenix Project" + visibility = "public" + has_issues = true + has_wiki = true + auto_init = true + license_template = "mit" +} + +resource "github_branch_default" "main" { + repository = github_repository.repo.name + branch = "main" +} + +resource "github_branch_protection" "repo_protection" { + repository_id = github_repository.repo.id + pattern = github_branch_default.main.branch + require_conversation_resolution = true + enforce_admins = true + + required_pull_request_reviews { + required_approving_review_count = 1 + } +} + +resource "github_team_repository" "devops_access" { + team_id = github_team.devops.id + repository = github_repository.repo.name + permission = "admin" +} + +resource "github_team_repository" "developers_access" { + team_id = github_team.developers.id + repository = github_repository.repo.name + permission = "push" +} + +resource "github_team_repository" "qa_access" { + team_id = github_team.qa.id + repository = github_repository.repo.name + permission = "pull" +} diff --git a/terraform/github_teams_terraform/variables.tf b/terraform/github_teams_terraform/variables.tf new file mode 100644 index 0000000000..654e58a400 --- /dev/null +++ b/terraform/github_teams_terraform/variables.tf @@ -0,0 +1,11 @@ +variable "github_token" { + type = string + description = "GitHub token" + sensitive = true +} + +variable "github_organization" { + type = string + description = "Organization" + default = "Bobkunov" +} diff --git a/terraform/github_terraform/main.tf b/terraform/github_terraform/main.tf new file mode 100644 index 0000000000..ee61049209 --- /dev/null +++ b/terraform/github_terraform/main.tf @@ -0,0 +1,41 @@ +terraform { + required_providers { + github = { + source = "integrations/github" + version = "~> 6.5.0" + } + } +} + +provider "github" { + token = var.github_token +} + +resource "github_repository" "repo" { + name = var.repository_name + description = var.repository_description + visibility = var.repository_visibility + has_downloads = var.has_downloads + has_issues = var.has_issues + has_wiki = var.has_wiki + has_projects = var.has_projects +} + +resource "github_branch_default" "master" { + repository = github_repository.repo.name + branch = var.default_branch +} + +resource "github_branch_protection" "master" { + repository_id = github_repository.repo.node_id + pattern = var.default_branch + required_status_checks { + strict = var.strict + contexts = [] + } + enforce_admins = var.enforce_admins + required_pull_request_reviews { + dismiss_stale_reviews = var.dismiss_stale_reviews + require_code_owner_reviews = var.require_code_owner_reviews + } +} diff --git a/terraform/github_terraform/variables.tf b/terraform/github_terraform/variables.tf new file mode 100644 index 0000000000..79e44947be --- /dev/null +++ b/terraform/github_terraform/variables.tf @@ -0,0 +1,77 @@ +variable "github_token" { + description = "GitHub personal access token" + type = string + sensitive = true +} + +variable "repository_name" { + default = "devops-labs" + description = "GitHub repository name" + type = string +} + +variable "repository_description" { + default = "Innopolis University DevOps Course Labs" + description = "GitHub repository description" + type = string +} + +variable "repository_visibility" { + default = "public" + description = "GitHub repository visibility" + type = string +} + +variable "has_downloads" { + default = true + description = "Enable GitHub downloads" + type = bool +} + +variable "has_issues" { + default = false + description = "Enable GitHub issues" + type = bool +} + +variable "has_wiki" { + default = false + description = "Enable GitHub wiki" + type = bool +} + +variable "has_projects" { + default = false + description = "Enable GitHub projects" + type = bool +} + +variable "default_branch" { + default = "master" + description = "GitHub default branch" + type = string +} + +variable "strict" { + default = true + description = "Require branches to be up to date before merging" + type = bool +} + +variable "enforce_admins" { + default = true + description = "Enforce all configured restrictions for administrators" + type = bool +} + +variable "dismiss_stale_reviews" { + default = true + description = "Dismiss approved reviews when someone pushes a new commit" + type = bool +} + +variable "require_code_owner_reviews" { + default = true + description = "Require an approved review in pull requests including files with a designated code owner" + type = bool +} diff --git a/terraform/yandex_cloud_terraform/main.tf b/terraform/yandex_cloud_terraform/main.tf new file mode 100644 index 0000000000..e7eaefbc00 --- /dev/null +++ b/terraform/yandex_cloud_terraform/main.tf @@ -0,0 +1,65 @@ +terraform { + required_providers { + yandex = { + source = "yandex-cloud/yandex" + version = "0.136.0" + } + } +} + +provider "yandex" { + zone = var.zone + token = var.iam_token + cloud_id = var.cloud_id + folder_id = var.folder_id +} + +resource "yandex_compute_instance" "vm-1" { + name = var.vm_name + platform_id = var.platform_id + zone = var.zone + hostname = var.hostname + + resources { + cores = var.cores + core_fraction = var.core_fraction + memory = var.memory + } + + scheduling_policy { + preemptible = var.preemptible + } + + boot_disk { + disk_id = yandex_compute_disk.disk-1.id + } + + network_interface { + subnet_id = yandex_vpc_subnet.subnet-1.id + nat = var.nat + } + + metadata = { + "ssh-keys" = format("%s:%s", var.vm_username, file(var.ssh_pubkey_path)) + } + +} + +resource "yandex_compute_disk" "disk-1" { + name = var.disk_name + zone = var.zone + size = var.disk_size + type = var.disk_type + image_id = var.image_id +} + +resource "yandex_vpc_network" "network-1" { + name = var.network_name +} + +resource "yandex_vpc_subnet" "subnet-1" { + name = var.subnet_name + zone = var.zone + network_id = yandex_vpc_network.network-1.id + v4_cidr_blocks = ["192.168.1.0/24"] +} diff --git a/terraform/yandex_cloud_terraform/variables.tf b/terraform/yandex_cloud_terraform/variables.tf new file mode 100644 index 0000000000..48ec36dadc --- /dev/null +++ b/terraform/yandex_cloud_terraform/variables.tf @@ -0,0 +1,120 @@ +variable "iam_token" { + type = string + sensitive = true +} + +variable "cloud_id" { + description = "Yandex Cloud ID" + type = string + sensitive = true +} + +variable "folder_id" { + description = "Yandex Folder ID" + type = string + sensitive = true +} + +variable "zone" { + description = "Yandex Cloud availability zone" + type = string + default = "ru-central1-b" +} + +variable "image_id" { + description = "ID image" + type = string + default = "fd8k2ed4jspu35gfde1u" +} + +variable "vm_name" { + description = "Name of the virtual machine" + type = string + default = "vm-1" +} + +variable "platform_id" { + description = "ID of the platform" + type = string + default = "standard-v2" +} + +variable "hostname" { + description = "Hostname of the virtual machine" + type = string + default = "vm-1" +} + +variable "cores" { + description = "Number of CPU cores" + type = number + default = 2 +} + +variable "core_fraction" { + description = "CPU core fraction" + type = number + default = 20 +} + +variable "memory" { + description = "Amount of memory in GB" + type = number + default = 2 +} + +variable "preemptible" { + description = "Preemptible instance" + type = bool + default = true +} + +variable "disk_name" { + description = "Name of the disk" + type = string + default = "disk-1" +} + +variable "disk_size" { + description = "Size of the disk in GB" + type = number + default = 20 +} + +variable "disk_type" { + description = "Type of the disk" + type = string + default = "network-hdd" +} + +variable "network_name" { + description = "Name of the network" + type = string + default = "network-1" +} + +variable "subnet_name" { + description = "Name of the subnet" + type = string + default = "subnet-1" +} + +variable "nat" { + description = "Enable NAT" + type = bool + default = true +} + +variable "vm_username" { + description = "Username for SSH access" + type = string + default = "ubuntu" + sensitive = true +} + +variable "ssh_pubkey_path" { + description = "Path to the SSH public key" + type = string + default = "~/.ssh/id_rsa.pub" + sensitive = true +}