diff --git a/.github/workflows/go-ci.yml b/.github/workflows/go-ci.yml new file mode 100644 index 0000000000..d5ca552d6f --- /dev/null +++ b/.github/workflows/go-ci.yml @@ -0,0 +1,88 @@ +name: go-ci + +on: + push: + branches: [ "main", "master", "lab*" ] + paths: + - "app_go/**" + - ".github/workflows/go-ci.yml" + pull_request: + paths: + - "app_go/**" + - ".github/workflows/go-ci.yml" + +concurrency: + group: go-ci-${{ github.ref }} + cancel-in-progress: true + +jobs: + test: + runs-on: ubuntu-latest + defaults: + run: + working-directory: app_go + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-go@v5 + with: + go-version-file: app_go/go.mod + cache: true + cache-dependency-path: app_go/go.sum + + - name: fmt check (print files) + run: | + echo "Go version:" + go version + + files="$(gofmt -l .)" + if [ -n "$files" ]; then + echo "::error::gofmt wants to reformat these files:" + echo "$files" + exit 1 + fi + + - name: vet + run: go vet ./... + + - name: test + coverage + run: go test ./... -coverprofile=coverage.out + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + files: app_go/coverage.out + flags: app_go + + # Docker push if tests passed + docker: + needs: [test] + if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/lab')) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: docker/setup-buildx-action@v3 + id: buildx + + - name: Set tags + run: | + echo "CALVER=$(date -u +'%Y.%m.%d')" >> $GITHUB_ENV + echo "SHA_SHORT=${GITHUB_SHA::7}" >> $GITHUB_ENV + + - uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_TOKEN }} + + - uses: docker/build-push-action@v6 + with: + context: ./app_go + push: true + builder: ${{ steps.buildx.outputs.name }} + cache-from: type=gha + cache-to: type=gha,mode=max + tags: | + ${{ secrets.DOCKER_USERNAME }}/app-go:${{ env.CALVER }} + ${{ secrets.DOCKER_USERNAME }}/app-go:sha-${{ env.SHA_SHORT }} + ${{ secrets.DOCKER_USERNAME }}/app-go:latest diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml new file mode 100644 index 0000000000..c70710d919 --- /dev/null +++ b/.github/workflows/python-ci.yml @@ -0,0 +1,140 @@ +name: Python CI + +on: + push: + # Run CI only if something has changed in the application folder or in the workflow itself + paths: + - "app_python/**/*.py" + - "app_python/requirements*.txt" + - "app_python/Dockerfile" + - ".github/workflows/python-ci.yml" + + pull_request: + paths: + - "app_python/**/*.py" + - "app_python/requirements*.txt" + - "app_python/Dockerfile" + - ".github/workflows/python-ci.yml" + +# Prevents old launches from replaying when new pushes are made +concurrency: + group: python-ci-${{ github.ref }} + cancel-in-progress: true + +# Minimum required privileges are provided +permissions: + contents: read + +env: + # To avoid duplicating paths/versions throughout the file + APP_DIR: app_python + PYTHON_VERSION: "3.12" + # Image repository on Docker Hub (tags added separately) + IMAGE_NAME: ${{ secrets.DOCKER_USERNAME }}/app_python + # SNYK token + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + +jobs: + test: + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + # We're grabbing the repository code for the runner + - name: Checkout + uses: actions/checkout@v4 + + # Prepare the required version of Python and enable the pip cache (to speed up the work during subsequent launches) + - name: Setup Python (pip cache) + uses: actions/setup-python@v6 + with: + python-version: ${{ env.PYTHON_VERSION }} + cache: "pip" + cache-dependency-path: | + app_python/requirements.txt + app_python/requirements-dev.txt + + # Installing dependencies + - name: Install deps + working-directory: ${{ env.APP_DIR }} + run: | + python -m pip install --upgrade pip + pip install -r requirements-dev.txt + + # Code Quality Check: Style + Typical Errors (Before Testing) + - name: Lint (flake8) + working-directory: ${{ env.APP_DIR }} + run: flake8 app.py tests + + # Running unit tests + - name: Tests (pytest) + working-directory: ${{ env.APP_DIR }} + run: pytest -q + + # Security scan + - name: Setup Snyk + # Run Snyk only on the push branches we need and only if SNYK_TOKEN is specified (otherwise skip this step) + if: ${{ github.event_name == 'push' && (github.ref_name == 'main' || github.ref_name == 'master' || startsWith(github.ref_name, 'lab')) && env.SNYK_TOKEN != '' }} + uses: snyk/actions/setup@master + + # Checking dependencies for vulnerabilities (build stops at high+) + - name: Snyk dependency scan (fail on high+) + if: ${{ github.event_name == 'push' && (github.ref_name == 'main' || github.ref_name == 'master' || startsWith(github.ref_name, 'lab')) && env.SNYK_TOKEN != '' }} + working-directory: ${{ env.APP_DIR }} + env: + SNYK_TOKEN: ${{ env.SNYK_TOKEN }} + run: snyk test --severity-threshold=high + + # Checking code for vulnerabilities (build stops at high+) + - name: Snyk code scan (SAST) + if: ${{ github.event_name == 'push' && (github.ref_name == 'main' || github.ref_name == 'master' || startsWith(github.ref_name, 'lab')) && env.SNYK_TOKEN != '' }} + working-directory: ${{ env.APP_DIR }} + env: + SNYK_TOKEN: ${{ env.SNYK_TOKEN }} + run: snyk code test --severity-threshold=high + + docker: + # Docker job is started only if tests/linter passed + needs: [test] + runs-on: ubuntu-latest + timeout-minutes: 15 + + # push image only on push + if: ${{ github.event_name == 'push' && (github.ref_name == 'main' || github.ref_name == 'master' || startsWith(github.ref_name, 'lab')) }} + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Docker Buildx (for GHA cache) + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + install: true + + # CalVer - version by date + # SHA — a unique tag for each commit + - name: Generate CalVer + SHA tag + run: | + echo "CALVER=$(date -u +'%Y.%m.%d')" >> $GITHUB_ENV + echo "SHORT_SHA=${GITHUB_SHA::7}" >> $GITHUB_ENV + + # Authorization in Docker Hub using a token from GitHub Secrets + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_TOKEN }} + + # Build and push the image (Layers are cached to make rebuilds faster) + - name: Build and push (with cache) + uses: docker/build-push-action@v6 + with: + context: ./${{ env.APP_DIR }} + push: true + tags: | + ${{ env.IMAGE_NAME }}:${{ env.CALVER }} + ${{ env.IMAGE_NAME }}:sha-${{ env.SHORT_SHA }} + ${{ env.IMAGE_NAME }}:latest + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/terraform-ci.yml b/.github/workflows/terraform-ci.yml new file mode 100644 index 0000000000..753bda177a --- /dev/null +++ b/.github/workflows/terraform-ci.yml @@ -0,0 +1,66 @@ +name: Terraform CI + +on: + pull_request: + paths: + - "terraform/**/*.tf" + - "terraform/**/*.hcl" + - "terraform/**/.terraform.lock.hcl" + - "terraform/**/.tflint.hcl" + - ".github/workflows/terraform-ci.yml" + - "!terraform/docs/**" + +# Prevent old runs from continuing if new commits are pushed to the same PR +concurrency: + group: terraform-ci-${{ github.ref }} + cancel-in-progress: true + +# Minimum required privileges +permissions: + contents: read + +env: + TF_IN_AUTOMATION: "true" + TF_INPUT: "false" + TF_DIR: terraform + +jobs: + validate: + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + + - name: Terraform fmt (check) + run: terraform fmt -check -recursive + working-directory: ${{ env.TF_DIR }} + + - name: Terraform init (no backend) + run: terraform init -backend=false -input=false + working-directory: ${{ env.TF_DIR }} + + - name: Terraform validate + run: terraform validate -no-color + working-directory: ${{ env.TF_DIR }} + + - name: Setup TFLint + uses: terraform-linters/setup-tflint@v6 + with: + tflint_version: latest + cache: false + tflint_config_path: terraform/.tflint.hcl + + - name: TFLint init + run: tflint --init + working-directory: ${{ env.TF_DIR }} + env: + GITHUB_TOKEN: ${{ github.token }} + + - name: TFLint + run: tflint -f compact + working-directory: ${{ env.TF_DIR }} diff --git a/ansible/.gitignore b/ansible/.gitignore new file mode 100644 index 0000000000..f16aa580de --- /dev/null +++ b/ansible/.gitignore @@ -0,0 +1,9 @@ +# Python venv +.venv/ + +# Ansible +*.retry +.vault_pass + +# Dynamic inventory (may contain secrets / generated) +inventory/yc_compute.yml \ No newline at end of file diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000000..40a06ca779 --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,14 @@ +[defaults] +inventory = inventory/yandex_cloud.py +roles_path = roles +host_key_checking = False +retry_files_enabled = False +stdout_callback = ansible.builtin.default +result_format = yaml +interpreter_python = auto_silent +vault_password_file = .vault_pass + +[privilege_escalation] +become = True +become_method = sudo +become_user = root \ No newline at end of file diff --git a/ansible/docs/LAB05.md b/ansible/docs/LAB05.md new file mode 100644 index 0000000000..cb551038c4 --- /dev/null +++ b/ansible/docs/LAB05.md @@ -0,0 +1,200 @@ +# LAB05 — Ansible Fundamentals + +## 1. Architecture Overview + +### Ansible version used +![Ansible version output](./screenshoots/LAB05/01_ansible_version_command.png) + +### Target VM OS and version +- **OS:** Linux +- **Distro**: Ubuntu +- **Version:** 22.04 LTS + +### Role structure explanation +The project is organized in "layers" (from the base OS to the application), which allows roles to share responsibilities (OS, platform, application), and also simplifies reuse, keeps playbooks short and readable, and helps ensure idempotency. + +### Motivation for using roles instead of monolithic playbooks +- **Reusability:** a role can be applied across multiple projects/environments without copying YAML. +- **Maintainability:** smaller files are easier to read, test, and review. +- **Composition:** roles can be used to create different playbooks for different environments. +- **Idempotency and handlers:** it's easier to structure dependencies and service restarts. + +--- + +## 2. Roles Documentation + +### 2.1 Role: `common` + +**Purpose** - basic VM preparation + +**Variables (`roles/common/defaults/main.yml` file):** + - `common_packages` — list of packages to install (`curl`, `git`, `vim`, `htop`, `python3-pip`, etc.); + - `common_timezone` — time zone (if `common_set_timezone`: is `true`). + +**Tasks (`roles/common/tasks/main.yml` file):** +- Updating `apt` cache (`apt update_cache`) +- Installing common packages (`apt state: present`) +- Setting timezone + +**Handlers** - Not required + +**Dependencies** - Not required + + +### 2.2 Role: `docker` + +**Purpose** - installing and configuring Docker Engine. + +**Variables (`roles/docker/defaults/main.yml` file):** + - `docker_apt_repo` — the source of the packages; + - `docker_packages` — a list of Docker packages (`docker-ce`, `docker-ce-cli`, `containerd.io`, etc.); + - `docker_users` — a list of users to add to the `docker` group. + +**Tasks (`roles/docker/tasks/main.yml` file):** + - Creating `/etc/apt/keyrings` directory (`file state: directory`, mode `0755`); + - Downloading Docker GPG key (ASCII) to `/etc/apt/keyrings/docker.asc` (`get_url`, `register: docker_key_download`) ; + - Dearmoring GPG key to `/etc/apt/keyrings/docker.gpg` (`command: gpg --dearmor`, only when `docker_key_download.changed`); + - Adding Docker APT repository (`apt_repository repo: {{ docker_apt_repo }}`, `filename: docker`); + - Updating `apt` cache after repo add (`apt update_cache: true`); + - Installing Docker packages (`apt name: {{ docker_packages }} state: present`, `notify: restart docker`); + - Ensuring Docker service is running and enabled (`service name: docker state: started enabled: true`); + - Installing Python Docker SDK (`apt name: python3-docker state: present`) + - Adding users to `docker` group (`user groups: docker append: true`, `loop: {{ docker_users }}`); + +**Handlers (`roles/docker/handlers/main.yml` file)** - `restart docker` — restart the docker service (called by `notify` after config/package changes) + +**Dependencies** - It's better to run it after `common`, but there is no hard dependency. + +### 2.3 Role: `app_deploy` + +**Purpose** - deploying a containerized application. + +**Tasks (`roles/app_deploy/tasks/main.yml` file):** + - Logging in to Docker Hub (`docker_login` with `dockerhub_username`/`dockerhub_password`, `no_log: true`) + - Pulling application image (`docker_image name: {{ docker_image }} tag: {{ docker_image_tag }} source: pull`) + - Stopping existing container if running (`docker_container state: stopped`, `failed_when: false`) + - Removing old container if exists (`docker_container state: absent`, `failed_when: false`) + - Running application container (`docker_container state: started`, ports `{{ app_port }}:{{ app_port }}`, env `{{ app_env }}`, `notify: restart app container`) + - Waiting for application port to become available (`wait_for host: 127.0.0.1 port: {{ app_port }} timeout: {{ app_wait_timeout }}`) + - Verifying health endpoint (`uri url: http://127.0.0.1:{{ app_port }}{{ app_health_path }}` expecting `status_code: 200`) + + +**Handlers (`roles/app_deploy/handlers/main.yml` file)** - `restart app container` (optional, if you need to restart the container when changes are made) + +**Dependencies** - Depends on `docker` role (Docker Engine + python docker sdk must be installed). +--- + +## 3. Idempotency Demonstration + +### Terminal output from FIRST provision.yml run +![Terminal output from FIRST provision.yml run](./screenshoots/LAB05/02_first_provision.png) + +### Terminal output from SECOND provision.yml run +![Terminal output from SECOND provision.yml run](./screenshoots/LAB05/03_second_provision.png) + + +### Analysis +In the first run, Ansible was building the server: updating APT, installing base packages, adding the GPG key, and so on, so almost all fields were in the `changed` state. In the second run, the target state had already been reached, so almost all steps were `ok`, the key's dearmor was skipped (`skipping`, since the key hadn't changed), and `apt update_cache` again showed `changed`, because APT cache updates are often recorded as changes even when no actual package changes were made. + +### Achieving role idempotency + - Use stateful modules (`apt`, `service`, `user`, `file`, `apt_repository`) instead of "blind" shell commands. + - Avoid using `force: true` unless necessary (otherwise it will be `changed` every time). + - Commands like `gpg --dearmor` are executed using `creates:` or other conditions to avoid changing the state on reruns. + - Handlers are run only when there are changes (`notify`). + +--- + +## 4. Ansible Vault Usage +### Principle of secure storage of credentials +Secrets (Docker Hub username and token, image parameters, etc.) are stored in `group_vars/all.yml`, encrypted by Ansible Vault. + +### Vault password management strategy +- The `.vault_pass` file is used (`chmod 600`), added to `.gitignore`. +- `vault_password_file = .vault_pass` is enabled in `ansible.cfg`. + +### Encrypted file +![Encrypted file](./screenshoots/LAB05/04_encrypted_file.png) + + +### The Importance of Ansible Vault +- Allows you to store secrets securely in the repository (encrypted). +- Prevents token/password leaks from plain-text configs. +- Simplifies team work (secrets are centralized, access is controlled by a Vault password). + +--- + +## 5. Deployment Verification + +### Terminal output from deploy.yml run and Health-check +![deploy.yml run and Health-check](./screenshoots/LAB05/05_deploy_run_and_health.png) + +### Handler execution +In the previous screenshot, you can see that when changing the container/config, the handler (in this case `restart app container`) was executed once at the end, which shows up as `RUNNING HANDLER` in the Ansible output. + +### Container status +![Container status (`docker ps` command)](./screenshoots/LAB05/06_docker_ps_command.png) + +--- + +## 6. Key Decisions + +### Why use roles instead of plain playbooks? +Roles allow you to separate logic from playbooks, reuse code, and maintain your infrastructure as a modular set of “building blocks.” + +### How do roles improve reusability? +A single role (e.g., `docker`, `common`) can be used for different projects/VMs. Only the variables change, while the logic remains the same. + +### What makes a task idempotent? +A task is idempotent if running it again doesn't change the system once the desired state has been reached. In Ansible, this is achieved using modules that compare the before and after states. + +### How do handlers improve efficiency? +The handler is executed only when real changes occur and once at the end of play, avoiding unnecessary service restarts. + +### Why is Ansible Vault necessary? +Vault prevents secrets from being stored in plaintext and reduces the risk of secrets being compromised when publishing a repository. + +--- + +## 7. Challenges + +The main issue was implementing the bonus task, which involved the **YC Inventory plugin**. In my environment, `community.general.yc_compute` returned an empty inventory (0 hosts) even with correct authorization and FolderId value, so I used an alternative dynamic inventory based on the yc CLI and Python code. + +--- + +# Bonus — Dynamic Inventory (Yandex Cloud) + +## What is used and why + - Target platform: **Yandex Cloud (Compute Cloud)**. + - An attempt to use the proposed `yandex.cloud.yandex_compute` collection resulted in installation errors:![collection errors](./screenshoots/LAB05/07_collection_error.png) + - An attempt to use the [open source](https://raw.githubusercontent.com/st8f/community.general/yc_compute/plugins/inventory/yc_compute.py) community.general.yc_compute resulted in unstable operation in the current environment (inventory returned 0 hosts). + - A dynamic inventory script based on the `yc` CLI was chosen as a stable solution: + - The CLI is guaranteed to see the VM in the correct folder and correctly outputs JSON. + - The script generates inventory JSON for Ansible (group `webservers`, `ansible_host` = public NAT IP). + +## Authentication +- Authentication is performed via the `yc` CLI (profile `sa-test`/service account key). +- The service account key is stored locally and is not committed to the repository. + +## Mapping cloud metadata to Ansible variables +- `ansible_host` is taken from the public NAT IP (`one_to_one_nat.address`); if it's not available, it falls back to the private IP. +- `ansible_user` = `ubuntu` +- `ansible_python_interpreter` = `/usr/bin/python3` +- You can also pass `yc_instance_id` as a hostvar. + +## Terminal output from `ansible-inventory --graph` +![Terminal output from `ansible-inventory --graph`](./screenshoots/LAB05/08_bonus_graph.png) + + +## Terminal output from running playbooks with dynamic inventory +![Playbook running](./screenshoots/LAB05/09_bonus_playbook_running.png) +![Deploy running](./screenshoots/LAB05/10_bonus_deploy_running.png) + +## What happens when VM IP changes? +Each time `ansible-inventory` is run, the inventory is rebuilt based on the current YC data, so manually updating the IP in `hosts.ini` is not required. + +## Benefits compared to static inventory +- No need to manually maintain up-to-date IP addresses. +- Convenient scaling across multiple VMs. +- Less human error when changing infrastructure. + + diff --git a/ansible/docs/screenshoots/LAB05/01_ansible_version_command.png b/ansible/docs/screenshoots/LAB05/01_ansible_version_command.png new file mode 100644 index 0000000000..8054a716eb Binary files /dev/null and b/ansible/docs/screenshoots/LAB05/01_ansible_version_command.png differ diff --git a/ansible/docs/screenshoots/LAB05/02_first_provision.png b/ansible/docs/screenshoots/LAB05/02_first_provision.png new file mode 100644 index 0000000000..04fa331e98 Binary files /dev/null and b/ansible/docs/screenshoots/LAB05/02_first_provision.png differ diff --git a/ansible/docs/screenshoots/LAB05/03_second_provision.png b/ansible/docs/screenshoots/LAB05/03_second_provision.png new file mode 100644 index 0000000000..4bed000d87 Binary files /dev/null and b/ansible/docs/screenshoots/LAB05/03_second_provision.png differ diff --git a/ansible/docs/screenshoots/LAB05/04_encrypted_file.png b/ansible/docs/screenshoots/LAB05/04_encrypted_file.png new file mode 100644 index 0000000000..bbd18277b9 Binary files /dev/null and b/ansible/docs/screenshoots/LAB05/04_encrypted_file.png differ diff --git a/ansible/docs/screenshoots/LAB05/05_deploy_run_and_health.png b/ansible/docs/screenshoots/LAB05/05_deploy_run_and_health.png new file mode 100644 index 0000000000..84a35bf589 Binary files /dev/null and b/ansible/docs/screenshoots/LAB05/05_deploy_run_and_health.png differ diff --git a/ansible/docs/screenshoots/LAB05/06_docker_ps_command.png b/ansible/docs/screenshoots/LAB05/06_docker_ps_command.png new file mode 100644 index 0000000000..a8fe29f058 Binary files /dev/null and b/ansible/docs/screenshoots/LAB05/06_docker_ps_command.png differ diff --git a/ansible/docs/screenshoots/LAB05/07_collection_error.png b/ansible/docs/screenshoots/LAB05/07_collection_error.png new file mode 100644 index 0000000000..b39c548206 Binary files /dev/null and b/ansible/docs/screenshoots/LAB05/07_collection_error.png differ diff --git a/ansible/docs/screenshoots/LAB05/08_bonus_graph.png b/ansible/docs/screenshoots/LAB05/08_bonus_graph.png new file mode 100644 index 0000000000..b4a0ca84c5 Binary files /dev/null and b/ansible/docs/screenshoots/LAB05/08_bonus_graph.png differ diff --git a/ansible/docs/screenshoots/LAB05/09_bonus_playbook_running.png b/ansible/docs/screenshoots/LAB05/09_bonus_playbook_running.png new file mode 100644 index 0000000000..0c1c4a4344 Binary files /dev/null and b/ansible/docs/screenshoots/LAB05/09_bonus_playbook_running.png differ diff --git a/ansible/docs/screenshoots/LAB05/10_bonus_deploy_running.png b/ansible/docs/screenshoots/LAB05/10_bonus_deploy_running.png new file mode 100644 index 0000000000..22346a4237 Binary files /dev/null and b/ansible/docs/screenshoots/LAB05/10_bonus_deploy_running.png differ diff --git a/ansible/docs/screenshoots/LAB05/Screenshot_20260226_101531.png b/ansible/docs/screenshoots/LAB05/Screenshot_20260226_101531.png new file mode 100644 index 0000000000..493ba147df Binary files /dev/null and b/ansible/docs/screenshoots/LAB05/Screenshot_20260226_101531.png differ diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml new file mode 100644 index 0000000000..a327c62583 --- /dev/null +++ b/ansible/group_vars/all.yml @@ -0,0 +1,22 @@ +$ANSIBLE_VAULT;1.1;AES256 +38393664383232346666636238343963616431353562616664336132383638393465336331383833 +3762393464393839393766373961306264376330643862310a626464386337306538396232666365 +62363033663666366166313561333363333464383266303133333363613338363131303533346637 +3134363731396238630a386539656635613863336333633230323464623834646133333932336130 +33373639303137346338366135316436663237383033646163316439393136623264623732313831 +30316330663837313463643263376162396636376433613866373032376633623132633138383839 +37636664326338666630393633333061386237653237633136613165316633303734303035363266 +33396138373835386461666334356236643637663331356564303364636665373261656531366265 +66303630373962336466616263653232363466323565326537366336653037366362636531623233 +35366164623538616164353938633861386331653237383962613261366339623663666561633030 +33626236373032653139643864373662316532346431616234376436616336366635373061343861 +38633539643930313334363938383363366661303236356133633366663639656337623464313130 +62343637323764333330616335313238373761653835343237383263333062376532313937373332 +34666564306165363236303935643532363565343533346238323037383936633233373139353937 +34626361646663666433313962343835643662323264326636666438646131313934333234623734 +66313561356663386663663230393135623463613332383261383334666330343764313537316638 +61306166313038323766383530653438663930613334636462613538393033383135643362636431 +66613164613765326631333933633737386265383535303166373834666234333037393234333130 +37336265356164373735613230636538663530653233623564383266633338653733336166356263 +33306437633039366235363135366463333233396236313166633863373965306365376461366230 +39343735356333383332303334653036343462373739313261363339323538316361 diff --git a/ansible/inventory/hosts.ini b/ansible/inventory/hosts.ini new file mode 100644 index 0000000000..0388a75320 --- /dev/null +++ b/ansible/inventory/hosts.ini @@ -0,0 +1,5 @@ +[webservers] +vm1 ansible_host=93.77.180.9 ansible_user=ubuntu ansible_ssh_private_key_file=~/.ssh/yc-lab04 + +[all:vars] +ansible_python_interpreter=/usr/bin/python3 \ No newline at end of file diff --git a/ansible/inventory/yandex_cloud.py b/ansible/inventory/yandex_cloud.py new file mode 100755 index 0000000000..d5200bc04c --- /dev/null +++ b/ansible/inventory/yandex_cloud.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +import json +import subprocess +import sys + +def sh(*args: str) -> str: + return subprocess.check_output(args, text=True) + +def main() -> None: + # Uses current 'yc' CLI auth/profile (you already configured it) + data = sh("yc", "compute", "instance", "list", "--format", "json") + instances = json.loads(data) + + hosts = [] + hostvars = {} + + for vm in instances: + nis = vm.get("network_interfaces") or [] + if not nis: + continue + + p4 = (nis[0].get("primary_v4_address") or {}) + nat = (p4.get("one_to_one_nat") or {}) + public_ip = nat.get("address") + private_ip = p4.get("address") + + ip = public_ip or private_ip + if not ip: + continue + + name = vm.get("name") or ip + hosts.append(name) + hostvars[name] = { + "ansible_host": ip, + "ansible_user": "ubuntu", + "ansible_python_interpreter": "/usr/bin/python3", + "yc_instance_id": vm.get("id"), + } + + inventory = { + "_meta": {"hostvars": hostvars}, + "all": {"children": ["webservers"]}, + "webservers": {"hosts": hosts}, + } + + print(json.dumps(inventory, indent=2)) + +if __name__ == "__main__": + main() diff --git a/ansible/playbooks/deploy.yml b/ansible/playbooks/deploy.yml new file mode 100644 index 0000000000..81107f6d8b --- /dev/null +++ b/ansible/playbooks/deploy.yml @@ -0,0 +1,10 @@ +--- +- name: Deploy application + hosts: webservers + become: true + + vars_files: + - ../group_vars/all.yml + + roles: + - app_deploy \ No newline at end of file diff --git a/ansible/playbooks/provision.yml b/ansible/playbooks/provision.yml new file mode 100644 index 0000000000..9156774548 --- /dev/null +++ b/ansible/playbooks/provision.yml @@ -0,0 +1,8 @@ +--- +- name: Provision web servers + hosts: webservers + become: true + + roles: + - common + - docker \ No newline at end of file diff --git a/ansible/playbooks/site.yml b/ansible/playbooks/site.yml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ansible/roles/app_deploy/defaults/main.yml b/ansible/roles/app_deploy/defaults/main.yml new file mode 100644 index 0000000000..88393fe4bb --- /dev/null +++ b/ansible/roles/app_deploy/defaults/main.yml @@ -0,0 +1,7 @@ +--- +app_port: 5000 +app_container_name: "devops-app" +app_restart_policy: unless-stopped +app_env: {} +app_health_path: "/health" +app_wait_timeout: 60 \ No newline at end of file diff --git a/ansible/roles/app_deploy/handlers/main.yml b/ansible/roles/app_deploy/handlers/main.yml new file mode 100644 index 0000000000..5d7d97785a --- /dev/null +++ b/ansible/roles/app_deploy/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: restart app container + community.docker.docker_container: + name: "{{ app_container_name }}" + state: started + restart: true \ No newline at end of file diff --git a/ansible/roles/app_deploy/tasks/main.yml b/ansible/roles/app_deploy/tasks/main.yml new file mode 100644 index 0000000000..90cc2ccdeb --- /dev/null +++ b/ansible/roles/app_deploy/tasks/main.yml @@ -0,0 +1,48 @@ +--- +- name: Login to Docker Hub + community.docker.docker_login: + username: "{{ dockerhub_username }}" + password: "{{ dockerhub_password }}" + no_log: true + +- name: Pull application image + community.docker.docker_image: + name: "{{ docker_image }}" + tag: "{{ docker_image_tag }}" + source: pull + +- name: Stop existing container (if running) + community.docker.docker_container: + name: "{{ app_container_name }}" + state: stopped + failed_when: false + +- name: Remove old container (if exists) + community.docker.docker_container: + name: "{{ app_container_name }}" + state: absent + failed_when: false + +- name: Run application container + community.docker.docker_container: + name: "{{ app_container_name }}" + image: "{{ docker_image }}:{{ docker_image_tag }}" + state: started + restart_policy: "{{ app_restart_policy }}" + published_ports: + - "{{ app_port }}:{{ app_port }}" + env: "{{ app_env }}" + notify: restart app container + +- name: Wait for application port + ansible.builtin.wait_for: + host: "127.0.0.1" + port: "{{ app_port }}" + timeout: "{{ app_wait_timeout }}" + +- name: Verify health endpoint + ansible.builtin.uri: + url: "http://127.0.0.1:{{ app_port }}{{ app_health_path }}" + method: GET + status_code: 200 + return_content: true \ No newline at end of file diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml new file mode 100644 index 0000000000..8bbc481ef4 --- /dev/null +++ b/ansible/roles/common/defaults/main.yml @@ -0,0 +1,13 @@ +common_packages: + - python3-pip + - curl + - git + - vim + - htop + - ca-certificates + - gnupg + - lsb-release + + +common_set_timezone: true +common_timezone: "UTC" \ No newline at end of file diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml new file mode 100644 index 0000000000..7cd9e32161 --- /dev/null +++ b/ansible/roles/common/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + +- name: Install common packages + ansible.builtin.apt: + name: "{{ common_packages }}" + state: present + +- name: Set timezone (optional) + community.general.timezone: + name: "{{ common_timezone }}" + when: common_set_timezone | bool \ No newline at end of file diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml new file mode 100644 index 0000000000..0226272124 --- /dev/null +++ b/ansible/roles/docker/defaults/main.yml @@ -0,0 +1,12 @@ +--- +docker_apt_repo: "deb [signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu {{ ansible_facts['distribution_release'] }} stable" + +docker_packages: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin + +docker_users: + - "{{ ansible_user | default('ubuntu') }}" \ No newline at end of file diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml new file mode 100644 index 0000000000..c923140c95 --- /dev/null +++ b/ansible/roles/docker/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart docker + ansible.builtin.service: + name: docker + state: restarted \ No newline at end of file diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml new file mode 100644 index 0000000000..089119f202 --- /dev/null +++ b/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,52 @@ +--- +- name: Ensure apt keyrings dir exists + ansible.builtin.file: + path: /etc/apt/keyrings + state: directory + mode: "0755" + +- name: Download Docker GPG key (ASCII) + ansible.builtin.get_url: + url: https://download.docker.com/linux/ubuntu/gpg + dest: /etc/apt/keyrings/docker.asc + mode: "0644" + register: docker_key_download + +- name: Dearmor Docker GPG key to keyring (only when key changed) + ansible.builtin.command: > + gpg --dearmor -o /etc/apt/keyrings/docker.gpg /etc/apt/keyrings/docker.asc + when: docker_key_download.changed + +- name: Add Docker apt repository + ansible.builtin.apt_repository: + repo: "{{ docker_apt_repo }}" + state: present + filename: docker + +- name: Update apt cache after adding Docker repo + ansible.builtin.apt: + update_cache: true + +- name: Install Docker packages + ansible.builtin.apt: + name: "{{ docker_packages }}" + state: present + notify: restart docker + +- name: Ensure docker service is enabled and running + ansible.builtin.service: + name: docker + state: started + enabled: true + +- name: Install python Docker SDK for Ansible modules + ansible.builtin.apt: + name: python3-docker + state: present + +- name: Add users to docker group + ansible.builtin.user: + name: "{{ item }}" + groups: docker + append: true + loop: "{{ docker_users }}" \ No newline at end of file diff --git a/app_go/.dockerignore b/app_go/.dockerignore new file mode 100644 index 0000000000..00c956b3db --- /dev/null +++ b/app_go/.dockerignore @@ -0,0 +1,51 @@ +# VCS +.git +.gitignore + +# IDE / editors +.vscode/ +.idea/ +*.swp + +# OS junk +.DS_Store +Thumbs.db + +# Docs (not needed for build/run) +docs/ +*.md +README* +LICENSE* + +# Secrets / env files (NEVER ship) +.env +.env.* + +# Logs / reports +*.log +coverage* +*.out +*.prof +*.trace + +# Go build artifacts +bin/ +dist/ +build/ +out/ +*.exe +*.dll +*.so +*.dylib +*.a +*.o + +# Go test cache / tooling caches +**/*_test.go +.golangci.yml +.golangci-lint* +.gotools/ +.tmp/ +tmp/ + + diff --git a/app_go/.gitignore b/app_go/.gitignore new file mode 100644 index 0000000000..bab2935820 --- /dev/null +++ b/app_go/.gitignore @@ -0,0 +1,8 @@ +# Go build outputs +/devops-info-service +*.exe +*.out + +# Go tooling +/bin/ +/dist/ diff --git a/app_go/Dockerfile b/app_go/Dockerfile new file mode 100644 index 0000000000..49c26e775d --- /dev/null +++ b/app_go/Dockerfile @@ -0,0 +1,27 @@ +# ---------- Stage 1: Build ---------- +# Use the full Go toolchain image only for compilation, which will take place in the /app directory +FROM golang:1.25.5-bookworm AS builder +WORKDIR /app + +# Copy and install module metadata to use Docker layer caching (if go.sum appears later, this will speed up rebuilding). +COPY go.mod ./ +RUN go mod download + +# Copy the rest of the source code (excess code is filtered out by .dockerignore) and build the binary +COPY . . +RUN CGO_ENABLED=0 go build -o myapp . + +# ---------- Stage 2: Runtime ---------- +# Define the runtime environment (only necessary for running the binary). +FROM alpine:3.18 + +# Create an unprivileged user (does not run as root). +RUN adduser -D appuser + +# Create an application directory inside the runtime container and copy the binary from the compilation environment into it (assigning the file to the appuser user) +WORKDIR /app +COPY --from=builder --chown=appuser:appuser /app/myapp . +USER appuser + +EXPOSE 5000 +CMD ["./myapp"] \ No newline at end of file diff --git a/app_go/README.md b/app_go/README.md new file mode 100644 index 0000000000..d193cba049 --- /dev/null +++ b/app_go/README.md @@ -0,0 +1,246 @@ +# DevOps Info Service (Lab 01) — Go version + +Small Go web app for DevOps labs (extra points). + +Provides: +- `GET /` — service/system/runtime/request info + list of endpoints +- `GET /health` — simple health check endpoint (for monitoring / K8s probes) + +Configuration is done via environment variables: `HOST`, `PORT`, `DEBUG`. + +--- + +## Overview + +This service returns diagnostic information about: +- service metadata (name/version/description/framework) +- host system (hostname/platform/arch/CPU/Go version) +- runtime (uptime + current UTC time) +- current request (client IP, user-agent, method, path) +- available API endpoints (kept as a registry in the app and returned sorted) + +--- + +## Prerequisites + +- Go **1.20+** (any modern Go should work) +- No third-party dependencies (standard library only) + +--- + +## Quick Start (go run) +If `go.mod` is missing, you can create it: + +```bash +go mod init devops-info-service +``` +From the directory containing `main.go`: + +Default (0.0.0.0:5000): +```bash +go run . +# or: go run main.go +``` + +Custom port: +```bash +PORT=8080 go run . +``` + +Custom host + port: +```bash +HOST=127.0.0.1 PORT=3000 go run . +``` + +Enable debug-style logging: +```bash +DEBUG=true go run . +``` + +--- + +## Build (Binary) + +1. Initialize modules (if needed): +```bash +go mod init devops-info-service +go mod tidy +``` +2. Build a local binary: +```bash +go build -o devops-info-service . +``` +3. Run the binary: +```bash +./devops-info-service +# or with config: +HOST=127.0.0.1 PORT=3000 DEBUG=true ./devops-info-service +``` +--- + +## API Endpoints + +### `GET /` + +Returns full service + runtime info. + +Example: +```bash +curl -s http://127.0.0.1:5000/ | jq '{service, system, runtime, request, endpoints}' +``` + +Response structure: +```json +{ + "service": { + "name": "devops-info-service", + "version": "1.0.0", + "description": "DevOps course info service", + "framework": "Go net/http" + }, + "system": { + "hostname": "SerggAidd", + "platform": "linux", + "platform_version": "6.18.5-arch1-1", + "architecture": "amd64", + "cpu_count": 24, + "go_version": "go1.25.5 X:nodwarf5" + }, + "runtime": { + "uptime_seconds": 3, + "uptime_human": "0 hour, 0 minutes", + "current_time": "2026-01-23T19:08:17Z", + "timezone": "UTC" + }, + "request": { + "client_ip": "127.0.0.1", + "user_agent": "curl/8.18.0", + "method": "GET", + "path": "/" + }, + "endpoints": [ + { + "method": "GET", + "path": "/", + "description": "Root endpoint: returns service metadata and diagnostic information." + }, + { + "method": "GET", + "path": "/health", + "description": "Health check endpoint for monitoring and Kubernetes probes." + } + ] +} +``` + +> Note: JSON object key ordering is not guaranteed by the HTTP/JSON standard. +> Use `python -m json.tool` or `jq` (like in example) only for pretty printing. + +--- + +### `GET /health` + +Health endpoint for monitoring / Kubernetes probes. + +Example: +```bash +curl -s http://127.0.0.1:5000/health | python -m json.tool +``` + +Response: +```json +{ + "status": "healthy", + "timestamp": "2026-01-23T19:08:22Z", + "uptime_seconds": 8 +} +``` + +Always returns HTTP **200** when the service is running. + +--- + +## Error Handling + +- Unknown routes return JSON 404: + +Example: +```bash +curl -s http://127.0.0.1:5000/does-not-exist | python -m json.tool +``` + +Response: +```json +{ + "error": "Not Found", + "message": "Endpoint does not exist" +} +``` + +- Internal errors return JSON 500 (panic is recovered by middleware): + +Example (can be tested by uncommenting the code block with the corresponding endpoint): +```bash +curl -s http://127.0.0.1:5000/crash | python -m json.tool +``` + +Response: +```json +{ + "error": "Internal Server Error", + "message": "An unexpected error occurred" +} +``` + +--- + +## Logging + +The app logs to **stdout**, which is the recommended approach for Docker/Kubernetes. + +Logged events: +- request method/path/client IP/user-agent +- final HTTP status code and request latency +- recovered panics (500) with a short error message + +--- + +## Configuration + +| Variable | Default | Description | +|---------:|-----------|-------------| +| `HOST` | `0.0.0.0` | Bind address | +| `PORT` | `5000` | Listen port | +| `DEBUG` | `False` | `true` enables more verbose log flags | + +## Docker +Below are the basic commands for building and running an application in a container. + +### Local image build +```bash +cd app_go +docker build -t app-go:1.0 . +``` + +### Starting a container +The container listens on port 5000 internally, so we forward it to the host port (in the case bellow 8080) +```bash +docker run --rm -p 8080:5000 app-go:1.0 +``` +Example of running with variables: +```bash +docker run --rm -p 8080:5000 -e PORT=5000 -e DEBUG=false app-go:1.0 +``` +After that you can check endpoints in browser or in defferent terminal: +```bash +curl http://localhost:8080/ +curl http://localhost:8080/health +``` + +### Docker Hub (pull/run) +The image of this container publlished in Docker Hub. Image can be download with the following method: +```bash +docker pull sergey173/app-go:1.0.0 +docker run --rm -p 8080:5000 sergey173/app-go:1.0.0 +``` +Docker Hub repository URL: https://hub.docker.com/repository/docker/sergey173/app-go \ No newline at end of file diff --git a/app_go/docs/GO.md b/app_go/docs/GO.md new file mode 100644 index 0000000000..ff2519d273 --- /dev/null +++ b/app_go/docs/GO.md @@ -0,0 +1,28 @@ +# Language justification + +## Why Go +Go was selected as the implementation language because it offers a strong trade-off for a small, container-oriented HTTP JSON service: +- **Fast implementation**: minimal boilerplate, straightforward concurrency model, and a simple standard toolchain. +- **Standard library coverage**: `net/http`, `encoding/json`, `os`, `runtime`, and `time` are sufficient to implement routing, JSON serialization, environment-based configuration, and uptime without external dependencies. +- **Deployment simplicity**: produces a single static-like executable (depending on build settings), integrates cleanly with Docker/Kubernetes, and favors stdout logging by default. +- **Low operational overhead**: fast startup time and modest memory footprint, which is well-suited for health checks and probe endpoints. +- **Portability**: cross-compilation is first-class and enables easy builds for common targets (e.g., Linux/amd64) from a single development environment. + +## Contrast with other compiled languages + +### Go vs Rust +Rust provides stronger compile-time guarantees around memory safety, but typically increases development complexity (ownership model, lifetimes) and build friction for small services. For this lab-scale HTTP JSON service, Go achieves the required functionality faster while remaining reliable and maintainable. + +### Go vs Java +Java commonly implies a JVM/JRE runtime plus build tooling (Maven/Gradle), which increases packaging complexity and container footprint relative to a single Go binary. For a small service intended for probes/monitoring, Go keeps runtime requirements and deployment steps minimal. + +### Go vs C/C++ +C/C++ can produce small binaries, but requires manual memory management and often more complex build configuration. Go reduces the likelihood of memory-related defects and simplifies maintenance while still providing compiled performance and simple distribution. + +## Trade-offs +- Go provides fewer compile-time memory safety guarantees than Rust. +- Implementing routing/middleware without a third-party framework can require more manual code (though the standard library remains sufficient for the scope of this service). + +## Summary +Go was chosen to minimize dependencies and operational complexity while delivering a compact, portable HTTP JSON service suitable for containerized environments. Compared to Rust, Java, and C/C++, Go reduces implementation and deployment overhead for this specific lab task, with trade-offs that are acceptable given the service’s limited scope and reliance on the standard library. + diff --git a/app_go/docs/LAB01.md b/app_go/docs/LAB01.md new file mode 100644 index 0000000000..94ef578079 --- /dev/null +++ b/app_go/docs/LAB01.md @@ -0,0 +1,294 @@ +# LAB01 — DevOps Info Service (Go) + +This document describes the Go implementation of **DevOps Info Service** for Lab 01. +The service is a small HTTP JSON API that exposes system/runtime/request metadata and a health check endpoint. + +--- + +## Framework Selection + +### Choice: Go standard library (`net/http`) + +This implementation uses only the Go standard library: +- `net/http` — HTTP server +- `encoding/json` — JSON encoding +- `os`, `runtime`, `time` — configuration + system/runtime metadata +- a minimal custom router + middleware for logging and panic recovery + +**Rationale** +- Minimal dependencies (easy to review and reproduce). +- Predictable behavior and portability (single compiled binary). +- Container-friendly defaults (stdout logging, fast startup). + +### Comparison table with alternatives + +| Option | Pros | Cons | Decision | +|---|---|---|---| +| **Standard library (`net/http`)** | Zero deps, small binary, portable | Routing/middleware is manual | **Selected** (fits Lab 01 scope) | +| Gin | Fast, popular, good DX | External dependency, more abstraction | Not required for 2–3 routes | +| Echo | Middleware-rich, ergonomic | External dependency | Not required for Lab 01 | +| chi | Lightweight router | External dependency | Chose zero-deps approach | +| Gorilla/mux | Mature ecosystem | Heavier router, extra dep | Not needed for exact matches | + +--- + +## Best Practices Applied + +Below is a list of practices applied in the implementation, with short code excerpts and the reason each matters. + +### 1) Configuration via environment variables +**Example** +```go +host := os.Getenv("HOST") +if host == "" { host = "0.0.0.0" } + +port := os.Getenv("PORT") +if port == "" { port = "5000" } + +debug := strings.ToLower(os.Getenv("DEBUG")) == "true" +``` + +**Why it matters** +- Matches common container/Kubernetes configuration patterns. +- Allows the same binary to run in different environments without code changes. + +### 2) Consistent JSON errors (404 / 500) +**Examples** +```go +writeJSON(w, http.StatusNotFound, ErrorResponse{ + Error: "Not Found", Message: "Endpoint does not exist", +}) +``` + +```go +defer func() { + if rec := recover(); rec != nil { + writeJSON(w, http.StatusInternalServerError, ErrorResponse{ + Error: "Internal Server Error", + Message: "An unexpected error occurred", + }) + } +}() +``` + +**Why it matters** +- Ensures clients always receive machine-readable error payloads. +- Prevents unexpected crashes from stopping the service. + +### 3) Request logging to stdout +**Example** +```go +log.Printf("Request %s %s from %s UA=%s -> %d (%s)", + r.Method, r.URL.Path, clientIP(r), r.UserAgent(), sw.status, lat) +``` + +**Why it matters** +- Stdout logging is the standard in Docker/Kubernetes. +- Useful for validating health probes and debugging locally. + +### 4) Proxy-aware client IP extraction +**Example** +```go +xff := r.Header.Get("X-Forwarded-For") +if xff != "" { + return strings.TrimSpace(strings.Split(xff, ",")[0]) +} +``` + +**Why it matters** +- Preserves real client IP when the service is behind an ingress/reverse proxy. + +--- + +## API Documentation + +### `GET /` +Returns service metadata, system/runtime details, request metadata, and the list of available endpoints. + +**Request** +```bash +curl -s http://127.0.0.1:5000/ +``` + +**Response** +See *Testing Evidence* below. + +### `GET /health` +Health endpoint for monitoring / Kubernetes probes. Returns HTTP **200** when the service is running. + +**Request** +```bash +curl -s http://127.0.0.1:5000/health +``` + +**Response (example)** +```json +{ + "status": "healthy", + "timestamp": "2026-01-23T19:08:22Z", + "uptime_seconds": 8 +} +``` + +--- + +## Testing Commands + +Run the service: +```bash +HOST=0.0.0.0 PORT=5000 DEBUG=false go run main.go +``` + +Test endpoints: +```bash +curl -s http://127.0.0.1:5000/ +curl -s http://127.0.0.1:5000/health +curl -s http://127.0.0.1:5000/does-not-exist +# You also can use command bellow of uncomment crash endpoint in code +# curl -s http://127.0.0.1:5000/crash +``` + +Pretty-print JSON: +```bash +curl -s http://127.0.0.1:5000/ | python -m json.tool +``` + +--- + +## Testing Evidence + +### Screenshots showing endpoints work + +Required screenshots are stored in `docs/screenshots/`: + +1) **Main endpoint showing complete JSON** +- `docs/screenshots/01_root_complete_json.png` + +![GET / — complete JSON](./screenshots/LAB01/01_root_complete_json.png) + + +2) **Health check response** +- `docs/screenshots/02_health_check.png` + +![GET /health — health probe](./screenshots/LAB01/02_health_check.png) + +3) **Formatted/pretty-printed output** +- `docs/screenshots/03_pretty_print_command.png` + +![Pretty-print example](./screenshots/LAB01/03_pretty_print_command.png) + + +### Terminal output + +```text +$ curl -s http://127.0.0.1:5000/ | python -m json.tool +{ + "service": { + "name": "devops-info-service", + "version": "1.0.0", + "description": "DevOps course info service", + "framework": "Go net/http" + }, + "system": { + "hostname": "SerggAidd", + "platform": "linux", + "platform_version": "6.18.5-arch1-1", + "architecture": "amd64", + "cpu_count": 24, + "go_version": "go1.25.5 X:nodwarf5" + }, + "runtime": { + "uptime_seconds": 3, + "uptime_human": "0 hour, 0 minutes", + "current_time": "2026-01-23T19:08:17Z", + "timezone": "UTC" + }, + "request": { + "client_ip": "127.0.0.1", + "user_agent": "curl/8.18.0", + "method": "GET", + "path": "/" + }, + "endpoints": [ + { + "method": "GET", + "path": "/", + "description": "Root endpoint: returns service metadata and diagnostic information." + }, + { + "method": "GET", + "path": "/health", + "description": "Health check endpoint for monitoring and Kubernetes probes." + } + ] +} +``` + +```text +$ curl -s http://127.0.0.1:5000/health | python -m json.tool +{ + "status": "healthy", + "timestamp": "2026-01-23T19:08:22Z", + "uptime_seconds": 8 +} +``` + +```text +$ curl -s http://127.0.0.1:5000/does-not-exist | python -m json.tool +{ + "error": "Not Found", + "message": "Endpoint does not exist" +} +``` + +```text +$ curl -s http://127.0.0.1:5000/crash | python -m json.tool +{ + "error": "Internal Server Error", + "message": "An unexpected error occurred" +} +``` + +--- + +## Challenges & Solutions + +### 1) Endpoint discovery without a framework +**Problem:** `net/http` does not provide a route registry similar to Flask. +**Solution:** Implemented a minimal router that stores routes and exposes a sorted `endpoints` list for the root response. + +### 2) Correct client IP behind proxies +**Problem:** `RemoteAddr` can reflect only the proxy address. +**Solution:** Prefer `X-Forwarded-For` (first value) and fall back to `RemoteAddr` parsing. + +### 3) Handling internal failures without process exit +**Problem:** A panic would terminate the process by default. +**Solution:** Added panic recovery middleware that converts panics into a JSON 500 response. + +### 4) Readable evidence output +**Problem:** JSON key ordering is not guaranteed by the standard. +**Solution:** Evidence uses pretty-printing tools; the endpoints list is sorted by `(path, method)` for deterministic output. + +--- + +## Compare binary size to Python +To compare the sizes of application binaries, the following commands were executed: + +- Go application (8591356 bytes): +```bash +go mod tidy +go build -o devops-info-service . +stat -c '%n %s bytes' devops-info-service +``` +![Binary size of go app](./screenshots/LAB01/04_go_binary_size.png) + +- Python application (13959512 bytes): +```bash +pip install pyinstaller +pyinstaller --onefile app.py +stat -c '%n %s bytes' dist/app +``` +![Binary size of python app](./screenshots/LAB01/05_python_binary_size.png) + +### Summary +According to measurements, the Go binary (8.19 MiB) is noticeably smaller than a Python onefile via PyInstaller (13.31 MiB) - a difference of about 5.12 MiB (around 38.5%). This is because Go builds a single native executable with runtime and dependencies, while PyInstaller in `--onefile` mode also packages the Python interpreter and a set of libraries, resulting in a larger final artifact. This gives Go an advantage in terms of size and portability for containers and fast deployments. \ No newline at end of file diff --git a/app_go/docs/LAB02.md b/app_go/docs/LAB02.md new file mode 100644 index 0000000000..6025f931b5 --- /dev/null +++ b/app_go/docs/LAB02.md @@ -0,0 +1,65 @@ +# LAB02 — Docker Containerization (app_go) + +## Multi-stage build strategy +A multi-stage build strategy was used to separate the process into two independent stages: build and run. The main goal is to obtain a minimal production image containing only what is needed to run the application, without development tools. + +The strategy is based on the following principles: +1. Isolation of the build from the runtime + - The Go application is compiled in a separate "builder" stage, where the compiler, system utilities, and (in the future) module cache are available. + - The final "runtime" stage does not contain the source code, compiler, or downloaded build dependencies. + +2. Minimization of the final image + - Only the finished binary file is transferred to the runtime image, while the entire build environment is automatically discarded and not included in the final image. + - This directly reduces the image size and speeds up pull/push operations. + +3. Security by default + - The container runs as an unprivileged user to reduce the impact of an application compromise. + - Fewer files and tools within the runtime reduce the attack surface. + +4. Ready for scaling and faster rebuilds + - The Dockerfile structure enables efficient dependency caching by separately copying go.mod (and go.sum, when available), so that code changes don't require repeated, cumbersome steps. + +**The result of this strategy:** a large "builder" image is used only as a temporary environment, while the final "runtime" image is lightweight, reproducible, and more secure, which is the core value of a multi-stage approach for compiled languages. + +## Terminal output showing build process and image sizes +- Complete terminal output from build process: ![Complete terminal output from build process](./screenshots/LAB02/01_complit_build.png) +- Terminal output showing container running: ![Terminal output showing container running](./screenshots/LAB02/02_container_running.png) +- Terminal output from testing endpoints (curl/httpie): ![Terminal output from testing endpoints with curl](./screenshots/LAB02/03_endpoint_check.png) +- Building only the builder stage and output the dimensions of both images:![Building only the builder stage and output the dimensions of both images](./screenshots/LAB02/04_size_comparison.png) + +## Size comparison (builder vs final image) +A comparison of Docker image sizes shows that the final `app-go:1.0` runtime image is 15.7 MB, while the `app-go:builder` image is 898 MB. This means the final image is approximately 57.2 times smaller, representing a size reduction of approximately 98.25% (a savings of approximately 882 MB). This is achieved through a multi-stage approach, resulting in a runtime image that does not contain a compiler, source code, or build dependencies, which will contribute to faster pull and push operations and a reduced attack surface. + +## The importance of multi-stage builds for compiled languages +1. Drastically reduces the final image size + - Without multi-stage, you could accidentally "slip" your entire toolchain (hundreds of megabytes or gigabytes) into production. + - Multi-stage leaves only the binary in the final image, meaning less traffic and faster pull, push, and deploy operations. + +2. Reduced attack surface (security) + - The fewer components in a container, the fewer potential vulnerabilities. + - A runtime image does not include a package manager, compiler, or unnecessary utilities that could make it easier for an attacker to establish or develop an attack. + +3. Cleaner and more "production-like" environment + - The final image adheres to the principle of "only what's needed to run." + - This simplifies maintenance, updates, and analysis of what's actually in production. + +4. Better reproducibility and control + - The build is performed in a fixed environment (builder image), and the run is performed in a minimal environment, which reduces the risk of "works on my machine" and makes the build more predictable. + + +## Technical explanation of each stage's purpose: +This Dockerfile is split into two stages — builder and runtime — to separate compilation from execution and keep the final image minimal. + +1. Builder stage (builder): + - Uses the base image `golang:1.25.5-bookworm`, which includes the full Go toolchain required to compile the application. + - Sets `/app` as the working directory. + - Copies only `go.mod` and runs `go mod download`. This is done for layer caching: when dependencies are added later (and `go.sum` appears), Docker can reuse the cached dependency-download layer as long as `go.mod`/`go.sum` remain unchanged. + - Copies the rest of the source code (`COPY . .`) and compiles the project. + - Builds with `CGO_ENABLED=0`, which disables CGO and typically produces a statically linked binary. This is convenient because it reduces runtime dependencies and allows using a smaller runtime image. + - Output of this stage is a single executable binary: `myapp`. +2. Runtime stage (runtime): + - Uses a lightweight base image `alpine:3.18`. + - Creates an unprivileged user appuser, so the container does not run as root (security hardening). + - Sets `/app` as the working directory and copies only the compiled binary from the builder stage using `COPY --from=builder ...`. + - Applies `--chown=appuser:appuser` during copy, so the binary is owned by the unprivileged user without needing an extra chown layer. + - The final image contains no source code, no Go compiler, no module cache, and no build tools - only the minimal OS and the application binary. diff --git a/app_go/docs/screenshots/LAB01/01_root_complete_json.png b/app_go/docs/screenshots/LAB01/01_root_complete_json.png new file mode 100644 index 0000000000..5e2fb64f16 Binary files /dev/null and b/app_go/docs/screenshots/LAB01/01_root_complete_json.png differ diff --git a/app_go/docs/screenshots/LAB01/02_health_check.png b/app_go/docs/screenshots/LAB01/02_health_check.png new file mode 100644 index 0000000000..722e052290 Binary files /dev/null and b/app_go/docs/screenshots/LAB01/02_health_check.png differ diff --git a/app_go/docs/screenshots/LAB01/03_pretty_print_command.png b/app_go/docs/screenshots/LAB01/03_pretty_print_command.png new file mode 100644 index 0000000000..7269db29c5 Binary files /dev/null and b/app_go/docs/screenshots/LAB01/03_pretty_print_command.png differ diff --git a/app_go/docs/screenshots/LAB01/04_go_binary_size.png b/app_go/docs/screenshots/LAB01/04_go_binary_size.png new file mode 100644 index 0000000000..3c05be8cd8 Binary files /dev/null and b/app_go/docs/screenshots/LAB01/04_go_binary_size.png differ diff --git a/app_go/docs/screenshots/LAB01/05_python_binary_size.png b/app_go/docs/screenshots/LAB01/05_python_binary_size.png new file mode 100644 index 0000000000..54feec58b4 Binary files /dev/null and b/app_go/docs/screenshots/LAB01/05_python_binary_size.png differ diff --git a/app_go/docs/screenshots/LAB02/01_complit_build.png b/app_go/docs/screenshots/LAB02/01_complit_build.png new file mode 100644 index 0000000000..754bc37fac Binary files /dev/null and b/app_go/docs/screenshots/LAB02/01_complit_build.png differ diff --git a/app_go/docs/screenshots/LAB02/02_container_running.png b/app_go/docs/screenshots/LAB02/02_container_running.png new file mode 100644 index 0000000000..0e08928d5b Binary files /dev/null and b/app_go/docs/screenshots/LAB02/02_container_running.png differ diff --git a/app_go/docs/screenshots/LAB02/03_endpoint_check.png b/app_go/docs/screenshots/LAB02/03_endpoint_check.png new file mode 100644 index 0000000000..8c85d85d82 Binary files /dev/null and b/app_go/docs/screenshots/LAB02/03_endpoint_check.png differ diff --git a/app_go/docs/screenshots/LAB02/04_size_comparison.png b/app_go/docs/screenshots/LAB02/04_size_comparison.png new file mode 100644 index 0000000000..e879ffd01a Binary files /dev/null and b/app_go/docs/screenshots/LAB02/04_size_comparison.png differ diff --git a/app_go/go.mod b/app_go/go.mod new file mode 100644 index 0000000000..4f3ceac16b --- /dev/null +++ b/app_go/go.mod @@ -0,0 +1,3 @@ +module devops-info-service + +go 1.25.5 diff --git a/app_go/main.go b/app_go/main.go new file mode 100644 index 0000000000..6e8f6e5728 --- /dev/null +++ b/app_go/main.go @@ -0,0 +1,355 @@ +package main + +import ( + "encoding/json" + "fmt" + "log" + "net" + "net/http" + "os" + "runtime" + "sort" + "strings" + "time" +) + +// Service describes metadata about the running service. +type Service struct { + Name string `json:"name"` + Version string `json:"version"` + Description string `json:"description"` + Framework string `json:"framework"` +} + +// System contains basic host and runtime details. +type System struct { + Hostname string `json:"hostname"` + Platform string `json:"platform"` + PlatformVersion string `json:"platform_version"` + Architecture string `json:"architecture"` + CPUCount int `json:"cpu_count"` + GoVersion string `json:"go_version"` +} + +// RuntimeInfo reports uptime and current timestamp. +type RuntimeInfo struct { + UptimeSeconds int `json:"uptime_seconds"` + UptimeHuman string `json:"uptime_human"` + CurrentTime string `json:"current_time"` + Timezone string `json:"timezone"` +} + +// RequestInfo captures request metadata. +type RequestInfo struct { + ClientIP string `json:"client_ip"` + UserAgent string `json:"user_agent"` + Method string `json:"method"` + Path string `json:"path"` +} + +// Endpoint represents a single API route. +type Endpoint struct { + Method string `json:"method"` + Path string `json:"path"` + Description string `json:"description"` +} + +// RootResponse is the response schema for the root endpoint. +type RootResponse struct { + Service Service `json:"service"` + System System `json:"system"` + Runtime RuntimeInfo `json:"runtime"` + Request RequestInfo `json:"request"` + Endpoints []Endpoint `json:"endpoints"` +} + +// HealthResponse is the response schema for /health. +type HealthResponse struct { + Status string `json:"status"` + Timestamp string `json:"timestamp"` + UptimeSeconds int `json:"uptime_seconds"` +} + +// ErrorResponse is a JSON error payload for non-200 responses. +type ErrorResponse struct { + Error string `json:"error"` + Message string `json:"message"` +} + +// routeKey uniquely identifies a route by method and path. +type routeKey struct { + Method string + Path string +} + +// route binds a handler with route metadata. +type route struct { + Method string + Path string + Description string + Handler http.HandlerFunc +} + +// router is a tiny HTTP router for exact method+path matches. +type router struct { + routes map[routeKey]route + endpoints []Endpoint +} + +// newRouter initializes an empty router instance. +func newRouter() *router { + return &router{ + routes: make(map[routeKey]route), + endpoints: make([]Endpoint, 0), + } +} + +// Handle registers a handler for an exact HTTP method and path. +func (rt *router) Handle(method, path, description string, h http.HandlerFunc) { + key := routeKey{Method: method, Path: path} + rt.routes[key] = route{ + Method: method, + Path: path, + Description: description, + Handler: h, + } + + rt.endpoints = append(rt.endpoints, Endpoint{ + Method: method, + Path: path, + Description: description, + }) +} + +// Endpoints returns a sorted copy of the registered endpoints list. +func (rt *router) Endpoints() []Endpoint { + out := make([]Endpoint, len(rt.endpoints)) + copy(out, rt.endpoints) + sort.Slice(out, func(i, j int) bool { + if out[i].Path == out[j].Path { + return out[i].Method < out[j].Method + } + return out[i].Path < out[j].Path + }) + return out +} + +// ServeHTTP dispatches the request to a registered route or returns a 404 JSON error. +// Note: method mismatch is treated as "not found" in this simplified router. +func (rt *router) ServeHTTP(w http.ResponseWriter, r *http.Request) { + key := routeKey{Method: r.Method, Path: r.URL.Path} + if rr, ok := rt.routes[key]; ok { + rr.Handler(w, r) + return + } + + writeJSON(w, http.StatusNotFound, ErrorResponse{ + Error: "Not Found", + Message: "Endpoint does not exist", + }) +} + +var ( + // startTime is captured once at startup and used to compute uptime. + startTime = time.Now().UTC() + + // service contains static service metadata returned by the root endpoint. + service = Service{ + Name: "devops-info-service", + Version: "1.0.0", + Description: "DevOps course info service", + Framework: "Go net/http", + } +) + +// systemInfo collects basic system/runtime information. +func systemInfo() System { + hostname, _ := os.Hostname() + + return System{ + Hostname: hostname, + Platform: runtime.GOOS, + PlatformVersion: linuxKernelRelease(), + Architecture: runtime.GOARCH, + CPUCount: runtime.NumCPU(), + GoVersion: runtime.Version(), + } +} + +// runtimeInfo computes uptime and generates a UTC timestamp in ISO 8601 format. +func runtimeInfo() RuntimeInfo { + now := time.Now().UTC() + uptime := now.Sub(startTime) + + seconds := int(uptime.Seconds()) + hours := seconds / 3600 + minutes := (seconds % 3600) / 60 + + return RuntimeInfo{ + UptimeSeconds: seconds, + UptimeHuman: fmt.Sprintf("%d hour, %d minutes", hours, minutes), + CurrentTime: now.Format("2006-01-02T15:04:05Z"), + Timezone: "UTC", + } +} + +// requestInfo extracts request metadata for the JSON response payload. +func requestInfo(r *http.Request) RequestInfo { + return RequestInfo{ + ClientIP: clientIP(r), + UserAgent: r.UserAgent(), + Method: r.Method, + Path: r.URL.Path, + } +} + +// clientIP returns the best-effort client IP address. +// If behind a proxy, the first X-Forwarded-For value is preferred. +func clientIP(r *http.Request) string { + xff := r.Header.Get("X-Forwarded-For") + if xff != "" { + parts := strings.Split(xff, ",") + if len(parts) > 0 { + return strings.TrimSpace(parts[0]) + } + } + + host, _, err := net.SplitHostPort(r.RemoteAddr) + if err == nil && host != "" { + return host + } + return r.RemoteAddr +} + +// linuxKernelRelease reads Linux kernel release from /proc as a best-effort value. +// On non-Linux platforms (or if the file is missing), it returns an empty string. +func linuxKernelRelease() string { + if runtime.GOOS != "linux" { + return "" + } + b, err := os.ReadFile("/proc/sys/kernel/osrelease") + if err != nil { + return "" + } + return strings.TrimSpace(string(b)) +} + +// writeJSON writes a JSON response with the given HTTP status code. +func writeJSON(w http.ResponseWriter, status int, v any) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + _ = json.NewEncoder(w).Encode(v) +} + +type statusWriter struct { + http.ResponseWriter + status int +} + +// WriteHeader captures the status code for logging. +func (sw *statusWriter) WriteHeader(code int) { + sw.status = code + sw.ResponseWriter.WriteHeader(code) +} + +// loggingMiddleware logs request metadata and the final status code. +func loggingMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + start := time.Now() + + sw := &statusWriter{ResponseWriter: w, status: http.StatusOK} + next.ServeHTTP(sw, r) + + lat := time.Since(start) + log.Printf("Request %s %s from %s UA=%s -> %d (%s)", + r.Method, + r.URL.Path, + clientIP(r), + r.UserAgent(), + sw.status, + lat, + ) + }) +} + +// recoverMiddleware converts panics into a JSON 500 response. +func recoverMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if rec := recover(); rec != nil { + log.Printf("ERROR 500 Internal Server Error: %v", rec) + writeJSON(w, http.StatusInternalServerError, ErrorResponse{ + Error: "Internal Server Error", + Message: "An unexpected error occurred", + }) + } + }() + next.ServeHTTP(w, r) + }) +} + +// rootHandler returns the service diagnostic payload. +func rootHandler(rt *router) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + payload := RootResponse{ + Service: service, + System: systemInfo(), + Runtime: runtimeInfo(), + Request: requestInfo(r), + Endpoints: rt.Endpoints(), + } + writeJSON(w, http.StatusOK, payload) + } +} + +// healthHandler returns a minimal health probe response (HTTP 200 on success). +func healthHandler(w http.ResponseWriter, r *http.Request) { + rt := runtimeInfo() + writeJSON(w, http.StatusOK, HealthResponse{ + Status: "healthy", + Timestamp: rt.CurrentTime, + UptimeSeconds: rt.UptimeSeconds, + }) +} + +// crashHandler intentionally panics to verify 500 error handling. +// func crashHandler(w http.ResponseWriter, r *http.Request) { +// panic("crash test") +// } + +func main() { + host := os.Getenv("HOST") + if host == "" { + host = "0.0.0.0" + } + port := os.Getenv("PORT") + if port == "" { + port = "5000" + } + debug := strings.ToLower(os.Getenv("DEBUG")) == "true" + + if debug { + log.SetFlags(log.LstdFlags | log.Lmicroseconds) + log.Println("DEBUG enabled") + } else { + log.SetFlags(log.LstdFlags) + } + + rt := newRouter() + rt.Handle(http.MethodGet, "/", "Root endpoint: returns service metadata and diagnostic information.", rootHandler(rt)) + rt.Handle(http.MethodGet, "/health", "Health check endpoint for monitoring and Kubernetes probes.", healthHandler) + // rt.Handle(http.MethodGet, "/crash", "Intentional error to test 500 handler.", crashHandler) + + handler := recoverMiddleware(loggingMiddleware(rt)) + + addr := net.JoinHostPort(host, port) + log.Printf("Listening on http://%s", addr) + + srv := &http.Server{ + Addr: addr, + Handler: handler, + ReadHeaderTimeout: 5 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) +} diff --git a/app_go/main_test.go b/app_go/main_test.go new file mode 100644 index 0000000000..0b3fa5ad3e --- /dev/null +++ b/app_go/main_test.go @@ -0,0 +1,243 @@ +package main + +import ( + "encoding/json" + "io" + "log" + "net/http" + "net/http/httptest" + "runtime" + "strings" + "testing" + "time" +) + +func init() { + log.SetOutput(io.Discard) +} + +func newTestHandler() http.Handler { + rt := newRouter() + rt.Handle(http.MethodGet, "/", "Root endpoint: returns service metadata and diagnostic information.", rootHandler(rt)) + rt.Handle(http.MethodGet, "/health", "Health check endpoint for monitoring and Kubernetes probes.", healthHandler) + + return recoverMiddleware(loggingMiddleware(rt)) +} + +func TestRootEndpoint_OK_JSON_Shape(t *testing.T) { + h := newTestHandler() + + req := httptest.NewRequest(http.MethodGet, "http://example/", nil) + req.Header.Set("User-Agent", "go-test") + req.Header.Set("X-Forwarded-For", "1.2.3.4, 5.6.7.8") + req.RemoteAddr = "9.9.9.9:12345" + + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected %d, got %d, body=%s", http.StatusOK, rr.Code, rr.Body.String()) + } + if ct := rr.Header().Get("Content-Type"); !strings.Contains(ct, "application/json") { + t.Fatalf("expected application/json content-type, got %q", ct) + } + + var got RootResponse + if err := json.Unmarshal(rr.Body.Bytes(), &got); err != nil { + t.Fatalf("invalid json: %v, body=%s", err, rr.Body.String()) + } + + // Service + if got.Service.Name != "devops-info-service" { + t.Fatalf("service.name expected %q, got %q", "devops-info-service", got.Service.Name) + } + if got.Service.Framework != "Go net/http" { + t.Fatalf("service.framework expected %q, got %q", "Go net/http", got.Service.Framework) + } + + // Request (XFF more priority then RemoteAddr) + if got.Request.ClientIP != "1.2.3.4" { + t.Fatalf("request.client_ip expected %q, got %q", "1.2.3.4", got.Request.ClientIP) + } + if got.Request.Method != http.MethodGet { + t.Fatalf("request.method expected %q, got %q", http.MethodGet, got.Request.Method) + } + if got.Request.Path != "/" { + t.Fatalf("request.path expected %q, got %q", "/", got.Request.Path) + } + if got.Request.UserAgent != "go-test" { + t.Fatalf("request.user_agent expected %q, got %q", "go-test", got.Request.UserAgent) + } + + // System: key fields checker + if got.System.Platform != runtime.GOOS { + t.Fatalf("system.platform expected %q, got %q", runtime.GOOS, got.System.Platform) + } + if got.System.Architecture != runtime.GOARCH { + t.Fatalf("system.architecture expected %q, got %q", runtime.GOARCH, got.System.Architecture) + } + if got.System.CPUCount <= 0 { + t.Fatalf("system.cpu_count expected > 0, got %d", got.System.CPUCount) + } + if got.System.GoVersion == "" { + t.Fatalf("system.go_version expected non-empty") + } + + // Runtime + if got.Runtime.Timezone != "UTC" { + t.Fatalf("runtime.timezone expected %q, got %q", "UTC", got.Runtime.Timezone) + } + if got.Runtime.UptimeSeconds < 0 { + t.Fatalf("runtime.uptime_seconds expected >= 0, got %d", got.Runtime.UptimeSeconds) + } + if got.Runtime.CurrentTime == "" { + t.Fatalf("runtime.current_time expected non-empty") + } + + if len(got.Endpoints) != 2 { + t.Fatalf("expected 2 endpoints, got %d: %+v", len(got.Endpoints), got.Endpoints) + } + if got.Endpoints[0].Method != http.MethodGet || got.Endpoints[0].Path != "/" { + t.Fatalf("endpoints[0] expected GET /, got %+v", got.Endpoints[0]) + } + if got.Endpoints[1].Method != http.MethodGet || got.Endpoints[1].Path != "/health" { + t.Fatalf("endpoints[1] expected GET /health, got %+v", got.Endpoints[1]) + } +} + +func TestHealthEndpoint_OK(t *testing.T) { + h := newTestHandler() + + req := httptest.NewRequest(http.MethodGet, "http://example/health", nil) + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected %d, got %d, body=%s", http.StatusOK, rr.Code, rr.Body.String()) + } + + var got HealthResponse + if err := json.Unmarshal(rr.Body.Bytes(), &got); err != nil { + t.Fatalf("invalid json: %v, body=%s", err, rr.Body.String()) + } + + if got.Status != "healthy" { + t.Fatalf("status expected %q, got %q", "healthy", got.Status) + } + if got.UptimeSeconds < 0 { + t.Fatalf("uptime_seconds expected >= 0, got %d", got.UptimeSeconds) + } + if _, err := time.Parse(time.RFC3339, got.Timestamp); err != nil { + t.Fatalf("timestamp is not RFC3339: %q err=%v", got.Timestamp, err) + } +} + +func TestNotFound_ReturnsJSON404(t *testing.T) { + h := newTestHandler() + + req := httptest.NewRequest(http.MethodGet, "http://example/nope", nil) + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected %d, got %d, body=%s", http.StatusNotFound, rr.Code, rr.Body.String()) + } + + var got ErrorResponse + if err := json.Unmarshal(rr.Body.Bytes(), &got); err != nil { + t.Fatalf("invalid json: %v, body=%s", err, rr.Body.String()) + } + + if got.Error != "Not Found" { + t.Fatalf("error expected %q, got %q", "Not Found", got.Error) + } + if got.Message == "" { + t.Fatalf("message expected non-empty") + } +} + +func TestMethodMismatch_TreatedAsNotFound(t *testing.T) { + h := newTestHandler() + + req := httptest.NewRequest(http.MethodPost, "http://example/health", nil) + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Fatalf("expected %d, got %d, body=%s", http.StatusNotFound, rr.Code, rr.Body.String()) + } +} + +func TestRecoverMiddleware_ReturnsJSON500OnPanic(t *testing.T) { + panicHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + panic("boom") + }) + + h := recoverMiddleware(panicHandler) + req := httptest.NewRequest(http.MethodGet, "http://example/", nil) + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusInternalServerError { + t.Fatalf("expected %d, got %d, body=%s", http.StatusInternalServerError, rr.Code, rr.Body.String()) + } + + var got ErrorResponse + if err := json.Unmarshal(rr.Body.Bytes(), &got); err != nil { + t.Fatalf("invalid json: %v, body=%s", err, rr.Body.String()) + } + if got.Error != "Internal Server Error" { + t.Fatalf("error expected %q, got %q", "Internal Server Error", got.Error) + } +} + +func TestClientIP_UsesRemoteAddrWhenNoXFF(t *testing.T) { + r := httptest.NewRequest(http.MethodGet, "http://example/", nil) + r.RemoteAddr = "10.0.0.7:5555" + + if got := clientIP(r); got != "10.0.0.7" { + t.Fatalf("expected %q, got %q", "10.0.0.7", got) + } +} + +func TestClientIP_FallsBackToRawRemoteAddrOnBadFormat(t *testing.T) { + r := httptest.NewRequest(http.MethodGet, "http://example/", nil) + r.RemoteAddr = "not-a-host-port" + + if got := clientIP(r); got != "not-a-host-port" { + t.Fatalf("expected %q, got %q", "not-a-host-port", got) + } +} + +func TestRouterEndpoints_SortedByPathThenMethod(t *testing.T) { + rt := newRouter() + dummy := func(w http.ResponseWriter, r *http.Request) {} + + rt.Handle(http.MethodPost, "/same", "p", dummy) + rt.Handle(http.MethodGet, "/same", "g", dummy) + rt.Handle(http.MethodGet, "/zzz", "z", dummy) + rt.Handle(http.MethodGet, "/aaa", "a", dummy) + + eps := rt.Endpoints() + + got := make([]string, 0, len(eps)) + for _, e := range eps { + got = append(got, e.Method+" "+e.Path) + } + + want := []string{ + "GET /aaa", + "GET /same", + "POST /same", + "GET /zzz", + } + + if len(got) != len(want) { + t.Fatalf("len mismatch: got=%d want=%d", len(got), len(want)) + } + for i := range want { + if got[i] != want[i] { + t.Fatalf("order mismatch at %d: got=%q want=%q\nall=%v", i, got[i], want[i], got) + } + } +} diff --git a/app_python/.dockerignore b/app_python/.dockerignore new file mode 100644 index 0000000000..ab349d5c37 --- /dev/null +++ b/app_python/.dockerignore @@ -0,0 +1,45 @@ +# VCS +.git +.gitignore + +# Python bytecode / cache +__pycache__/ +*.py[cod] +*$py.class +.pytest_cache/ +.mypy_cache/ +.ruff_cache/ + +# Virtual environments +venv/ +.venv/ +env/ +ENV/ + +# Packaging / build artifacts +build/ +dist/ +*.egg-info/ +.eggs/ + +# Logs +*.log +.coverage +htmlcov/ + +# IDEs / editors +.vscode/ +.idea/ +*.swp + +# Docs and misc (optional, but keeps build context small) +docs/ +*.md + +# OS junk +.DS_Store +Thumbs.db + +# Environment files / secrets (IMPORTANT) +.env +.env.* diff --git a/app_python/.gitignore b/app_python/.gitignore new file mode 100644 index 0000000000..e6d498e35d --- /dev/null +++ b/app_python/.gitignore @@ -0,0 +1,40 @@ +# Python bytecode / cache +__pycache__/ +*.py[cod] +*$py.class + +# Virtual environments +venv/ +.venv/ +env/ +ENV/ + +# Packaging / build artifacts +build/ +dist/ +*.egg-info/ +.eggs/ +*.spec + +# Logs +*.log + +# Test / tooling cache +.pytest_cache/ +.coverage +htmlcov/ +.mypy_cache/ +.ruff_cache/ + +# IDEs / editors +.vscode/ +.idea/ +*.swp + +# OS junk +.DS_Store +Thumbs.db + +# Environment files +.env +.env.* \ No newline at end of file diff --git a/app_python/Dockerfile b/app_python/Dockerfile new file mode 100644 index 0000000000..846451649d --- /dev/null +++ b/app_python/Dockerfile @@ -0,0 +1,29 @@ +# Fixed version of Python, slim variant (lightweight, smaller attack surface) +FROM python:3.13-slim + +# Containerized Python behavior settings (no __pycache__/*.pyc, unbuffered output) +ENV PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 + +# Create an unprivileged user (safer, less attacker power) +RUN useradd --create-home --uid 1001 --shell /usr/sbin/nologin appuser + +# All further actions will be performed from the /app directory. +WORKDIR /app + +# Copying the requirements.txt file into the container, installing the dependencies described in it, and deleting the cache +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copying the app.py executable into the container +COPY app.py . + +# Granting ownership rights to the /app directory and all files in it to the user appuser +RUN chown -R appuser:appuser /app +USER appuser + +# Defining the container port on which the application will run +EXPOSE 5000 + +# Define startup command +CMD ["python", "app.py"] \ No newline at end of file diff --git a/app_python/README.md b/app_python/README.md new file mode 100644 index 0000000000..32c6c1b4d8 --- /dev/null +++ b/app_python/README.md @@ -0,0 +1,242 @@ +[![Python CI](https://github.com/SerggAidd/DevOps-Core-Course/actions/workflows/python-ci.yml/badge.svg)](https://github.com/SerggAidd/DevOps-Core-Course/actions/workflows/python-ci.yml) +# DevOps Info Service (Lab 01) + +Small Flask web app for DevOps labs. + +Provides: +- `GET /` — service/system/runtime/request info + list of endpoints +- `GET /health` — simple health check endpoint (for monitoring / K8s probes) + +Configuration is done via environment variables: `HOST`, `PORT`, `DEBUG`. + +--- + +## Overview + +This service returns diagnostic information about: +- service metadata (name/version/description/framework) +- host system (hostname/platform/arch/CPU/python) +- runtime (uptime + current UTC time) +- current request (client IP, user-agent, method, path) +- available API endpoints (generated from Flask URL map) + +--- + +## Prerequisites + +- Python **3.11+** (Flask 3.x) +- pip / venv + +--- + +## Installation + +```bash +cd app_python + +python -m venv venv +source venv/bin/activate + +pip install -r requirements.txt +``` + +--- + +## Running the Application + +Default (0.0.0.0:5000): +```bash +python app.py +``` + +Custom port: +```bash +PORT=8080 python app.py +``` + +Custom host + port: +```bash +HOST=127.0.0.1 PORT=3000 python app.py +``` + +Enable debug-level logging (and Flask debug mode): +```bash +DEBUG=true python app.py +``` + +--- + +## API Endpoints + +### `GET /` + +Returns full service + runtime info. + +Example: +```bash +curl -s http://127.0.0.1:5000/ | jq '{service, system, runtime, request, endpoints}' +``` + +Response structure: +```json +{ + "service": { + "description": "DevOps course info service", + "framework": "Flask", + "name": "devops-info-service", + "version": "1.0.0" + }, + "system": { + "architecture": "x86_64", + "cpu_count": 24, + "hostname": "SerggAidd", + "platform": "Linux", + "platform_version": "6.18.5-arch1-1", + "python_version": "3.14.2" + }, + "runtime": { + "current_time": "2026-01-23T18:16:16Z", + "timezone": "UTC", + "uptime_human": "0 hour, 0 minutes", + "uptime_seconds": 4 + }, + "request": { + "client_ip": "127.0.0.1", + "method": "GET", + "path": "/", + "user_agent": "curl/8.18.0" + }, + "endpoints": [ + { + "description": "Root endpoint: returns service metadata and diagnostic information.", + "method": "GET", + "path": "/" + }, + { + "description": "Health check endpoint for monitoring and Kubernetes probes.", + "method": "GET", + "path": "/health" + } + ] +} +``` + +> Note: JSON object key ordering is not guaranteed by the HTTP/JSON standard. +> Use `python -m json.tool` or `jq` (like in example) only for pretty printing. + +### `GET /health` + +Health endpoint for monitoring / Kubernetes probes. + +Example: +```bash +curl -s http://127.0.0.1:5000/health | python -m json.tool +``` + +Response: +```json +{ + "status": "healthy", + "timestamp": "2026-01-23T21:25:39Z", + "uptime_seconds": 43 +} +``` + +Always returns HTTP **200** when service is running. + +--- + +## Error Handling + +- Unknown routes return JSON 404: + +Example: +```bash +curl -i http://127.0.0.1:5000/does-not-exist +``` + +Response: +```json +{"error":"Not Found","message":"Endpoint does not exist"} +``` + +- Internal errors return JSON 500 (test endpoint is intentionally NOT included by default). + +Example: +```bash +curl -i http://127.0.0.1:5000/crash +``` +Response: +```json +{"error":"Internal Server Error","message":"An unexpected error occurred"} +``` + +--- + +## Logging + +The app logs to **stdout**, which is the recommended approach for Docker/Kubernetes. + +Logged events: +- request metadata before handling (`@app.before_request`) +- response status code after handling (`@app.after_request`) +- custom 404/500 handlers + +--- + +## Configuration + +| Variable | Default | Description | +|---------:|-----------|-------------| +| `HOST` | `0.0.0.0` | Bind address | +| `PORT` | `5000` | Listen port | +| `DEBUG` | `False` | `true` enables Flask debug mode and DEBUG logging | + +--- +## Docker +Below are the basic commands for building and running an application in a container. + +### Local image build +```bash +cd app_python +docker build -t app_python:1.0 . +``` + +### Starting a container +The container listens on port 5000 internally, so we forward it to the host port (in the case bellow 8080) +```bash +docker run --rm -p 8080:5000 app_python:1.0 +``` +Example of running with variables: +```bash +docker run --rm -p 8080:5000 -e PORT=5000 -e DEBUG=false app_python:1.0 +``` +After that you can check endpoints in browser or in defferent terminal: +```bash +curl http://localhost:8080/ +curl http://localhost:8080/health +``` + +### Docker Hub (pull/run) +The image of this container publlished in Docker Hub. Image can be download with the following method: +```bash +docker pull sergey173/app_python:1.0.0 +docker run --rm -p 8080:5000 sergey173/app_python:1.0.0 +``` + +### Local Testing: +1. Install dependencies: +```bash +python3 -m venv venv +source venv/bin/activate +pip install -r requirements.txt +pip install -r requirements-dev.txt +``` +2. Lint: +```bash +flake8 app.py tests +``` +3. Unit tests: +```bash +pytest -q +``` \ No newline at end of file diff --git a/app_python/app.py b/app_python/app.py new file mode 100644 index 0000000000..1a152e338a --- /dev/null +++ b/app_python/app.py @@ -0,0 +1,205 @@ +""" +DevOps Info Service + +Small Flask web app for DevOps labs. +Provides basic system/runtime/request information and a health check endpoint. +Configured via environment variables (HOST, PORT, DEBUG). +""" + +import logging +import os +import platform +import socket +from datetime import datetime, timezone + +from flask import Flask, jsonify, request + +# Flask application instance +app = Flask(__name__) + +# Runtime configuration (can be overridden via environment variables) +HOST = os.getenv("HOST", "0.0.0.0") +PORT = int(os.getenv("PORT", "5000")) +DEBUG = os.getenv("DEBUG", "False").lower() == "true" + +# Timestamp captured at startup (used to calculate uptime) +START_TIME = datetime.now(timezone.utc) + +SERVICE = { + "name": "devops-info-service", + "version": "1.0.0", + "description": "DevOps course info service", + "framework": "Flask", + } + +# Logging configuration (stdout; suitable for Docker/Kubernetes) +logging.basicConfig( + level=logging.DEBUG if DEBUG else logging.INFO, + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", +) + + +# General functions of application +def system_info(): + """Return basic host and Python runtime information.""" + + return { + "hostname": socket.gethostname(), + "platform": platform.system(), + "platform_version": platform.release(), + "architecture": platform.machine(), + "cpu_count": os.cpu_count(), + "python_version": platform.python_version(), + } + + +def runtime_info(): + """Return uptime and current UTC timestamp for the running application.""" + + current_time = datetime.now(timezone.utc) + delta = current_time - START_TIME + timestamp = current_time.strftime("%Y-%m-%dT%H:%M:%SZ") + seconds = int(delta.total_seconds()) + hours = seconds // 3600 + minutes = (seconds % 3600) // 60 + return { + "uptime_seconds": seconds, + "uptime_human": f"{hours} hour, {minutes} minutes", + "current_time": timestamp, + "timezone": "UTC", + } + + +def request_info(): + """ + Extract request metadata. + + If the app is behind a reverse proxy, the client IP may be passed via + X-Forwarded-For header (first IP in the list). Fallback to remote_addr. + """ + + xff = request.headers.get("X-Forwarded-For", "") + client_ip = xff.split(",")[0].strip() if xff else request.remote_addr + + return { + "client_ip": client_ip, + "user_agent": request.headers.get("User-Agent"), + "method": request.method, + "path": request.path, + } + + +def endpoints_info(): + """ + Build an API endpoints list dynamically from Flask URL map. + Description is taken from the first line of each handler's docstring. + """ + + endpoints = [] + for rule in app.url_map.iter_rules(): + if rule.endpoint == "static": + continue + + view_func = app.view_functions.get(rule.endpoint) + doc = getattr(view_func, "__doc__", None) if view_func else None + desc = (doc or "").strip().splitlines()[0] if doc else "No description" + + methods = sorted((rule.methods or set()) - {"HEAD", "OPTIONS"}) + for m in methods: + endpoints.append({ + "method": m, + "path": rule.rule, + "description": desc, + }) + + endpoints.sort(key=lambda e: (e["path"], e["method"])) + return endpoints + + +# General endpoints +@app.get("/") +def index(): + """Root endpoint: returns service metadata and diagnostic information.""" + + payload = { + "service": SERVICE, + "system": system_info(), + "runtime": runtime_info(), + "request": request_info(), + "endpoints": endpoints_info(), + } + return jsonify(payload) + + +@app.get("/health") +def health(): + """Health check endpoint for monitoring and Kubernetes probes.""" + rt = runtime_info() + payload = { + "status": "healthy", + "timestamp": rt["current_time"], + "uptime_seconds": rt["uptime_seconds"], + } + return jsonify(payload) + + +# Test-only endpoint to trigger HTTP 500 (uncomment to verify error handler) +# @app.get("/crash") +# def crash(): +# """Intentional error to test 500 handler.""" +# 1 / 0 + + +# Error Handlers +@app.errorhandler(404) +def not_found(error): + """Return JSON error for unknown endpoints.""" + + app.logger.warning("404 Not Found: %s %s", request.method, request.path) + return jsonify({ + "error": "Not Found", + "message": "Endpoint does not exist", + }), 404 + + +@app.errorhandler(500) +def internal_error(error): + """Return JSON error for unhandled server exceptions.""" + + app.logger.exception("500 Internal Server Error") + return jsonify({ + "error": "Internal Server Error", + "message": "An unexpected error occurred", + }), 500 + + +# Logging endpoints +@app.before_request +def log_requests(): + """Log basic request metadata before handling.""" + + app.logger.info( + "Request %s %s from %s UA=%s", + request.method, + request.path, + request.headers.get("X-Forwarded-For", request.remote_addr), + request.headers.get("User-Agent"), + ) + + +@app.after_request +def log_response(response): + """Log response status code after handling.""" + + app.logger.info( + "Response %s %s -> %s", + request.method, + request.path, + response.status_code, + ) + + return response + + +if __name__ == "__main__": + app.run(host=HOST, port=PORT, debug=DEBUG) diff --git a/app_python/docs/LAB01.md b/app_python/docs/LAB01.md new file mode 100644 index 0000000000..6bf344aaff --- /dev/null +++ b/app_python/docs/LAB01.md @@ -0,0 +1,358 @@ +# LAB01 — DevOps Info Service (Python) + +This document describes the **Python/Flask** implementation of **DevOps Info Service** for Lab 01. +The service is a small HTTP JSON API that exposes system/runtime/request metadata and a health check endpoint. + +--- + +## Framework Selection + +### Choice: Flask + +This implementation uses **Flask** as a lightweight WSGI web framework to build a small JSON API with minimal overhead. + +**Rationale** +- **Small scope fit:** Flask is well-suited for 2–3 endpoints without extra abstractions. +- **Simple request/response model:** easy access to request metadata (`request.method`, `request.path`, headers). +- **Built-in routing + hooks:** `@app.get`, `@app.before_request`, `@app.after_request`, and error handlers reduce boilerplate. +- **Container-friendly logging:** logs can be emitted to stdout and captured by Docker/Kubernetes. + +### Comparison table with alternatives + +| Option | Pros | Cons | Decision | +|---|---|---|---| +| **Flask** | Minimal API, easy routing, simple hooks | Not async-first | **Selected** (best fit for small lab service) | +| FastAPI | Automatic OpenAPI, type hints, async support | More dependencies, more setup | Not needed for Lab 01 | +| Django | Full-featured framework | Heavy for a tiny JSON service | Overkill | + +--- + +## Best Practices Applied + +Below is a list of practices applied in the implementation, with short code excerpts and why each matters. + +### 1) Configuration via environment variables +**Example** +```python +HOST = os.getenv("HOST", "0.0.0.0") +PORT = int(os.getenv("PORT", "5000")) +DEBUG = os.getenv("DEBUG", "False").lower() == "true" +``` + +**Why it matters** +- Matches Docker/Kubernetes configuration conventions. +- The same code runs in different environments without changes. + +### 2) Consistent JSON error responses (404 / 500) +**Examples** +```python +@app.errorhandler(404) +def not_found(error): + return jsonify({"error": "Not Found", "message": "Endpoint does not exist"}), 404 +``` + +```python +@app.errorhandler(500) +def internal_error(error): + return jsonify({"error": "Internal Server Error", "message": "An unexpected error occurred"}), 500 +``` + +**Why it matters** +- Clients always receive machine-readable responses. +- Prevents HTML error pages, which are inconvenient for API consumers. + +### 3) Request/response logging to stdout +**Examples** +```python +@app.before_request +def log_requests(): + app.logger.info("Request %s %s ...", request.method, request.path) +``` + +```python +@app.after_request +def log_response(response): + app.logger.info("Response %s %s -> %s", request.method, request.path, response.status_code) + return response +``` + +**Why it matters** +- Stdout logging is the standard for containers. +- Helps verify probes and debug behavior without extra tooling. + +### 4) Proxy-aware client IP extraction +**Example** +```python +xff = request.headers.get("X-Forwarded-For", "") +client_ip = xff.split(",")[0].strip() if xff else request.remote_addr +``` + +**Why it matters** +- Preserves the real client IP when the service runs behind an ingress/reverse proxy. + +--- + +## API Documentation + +### `GET /` +Returns service metadata, system/runtime details, request metadata, and a list of available endpoints. + +**Request** +```bash +curl -s http://127.0.0.1:5000/ +``` + +**Response (schema)** +```json +{ + "service": { + "description": "DevOps course info service", + "framework": "Flask", + "name": "devops-info-service", + "version": "1.0.0" + }, + "system": { + "architecture": "x86_64", + "cpu_count": 24, + "hostname": "SerggAidd", + "platform": "Linux", + "platform_version": "6.18.5-arch1-1", + "python_version": "3.14.2" + }, + "runtime": { + "current_time": "2026-01-23T18:16:16Z", + "timezone": "UTC", + "uptime_human": "0 hour, 0 minutes", + "uptime_seconds": 4 + }, + "request": { + "client_ip": "127.0.0.1", + "method": "GET", + "path": "/", + "user_agent": "curl/8.18.0" + }, + "endpoints": [ + { + "description": "Root endpoint: returns service metadata and diagnostic information.", + "method": "GET", + "path": "/" + }, + { + "description": "Health check endpoint for monitoring and Kubernetes probes.", + "method": "GET", + "path": "/health" + } + ] +} +``` + +### `GET /health` +Health endpoint for monitoring / Kubernetes probes. Returns HTTP **200** when the service is running. + +**Request** +```bash +curl -s http://127.0.0.1:5000/health +``` + +**Response (example)** +```json +{ + "status": "healthy", + "timestamp": "2026-01-23T19:08:22Z", + "uptime_seconds": 8 +} +``` + +--- + +### Error handling + +**404 Not Found** (unknown routes) +```bash +curl -s http://127.0.0.1:5000/does-not-exist +``` + +**Response** +```json +{"error":"Not Found","message":"Endpoint does not exist"} +``` + +**500 Internal Server Error** (unhandled exceptions) +- A test endpoint can be temporarily enabled by uncommenting the `/crash` handler in the code. + +```bash +curl -s http://127.0.0.1:5000/crash +``` + +**Response** +```json +{"error":"Internal Server Error","message":"An unexpected error occurred"} +``` + +> Note: JSON object key ordering is not guaranteed. Use `python -m json.tool` or `jq` only for pretty-printing. + +--- + +## Testing Commands + +### Setup and run + +Create venv and install dependencies: +```bash +python -m venv venv +source venv/bin/activate +pip install -r requirements.txt +``` + +Run the service: +```bash +HOST=0.0.0.0 PORT=5000 DEBUG=false python app.py +``` + +### Endpoint checks + +```bash +curl -s http://127.0.0.1:5000/ +curl -s http://127.0.0.1:5000/health +curl -s http://127.0.0.1:5000/does-not-exist +# Optional (if /crash is enabled): +# curl -s http://127.0.0.1:5000/crash +``` + +Pretty-print JSON: +```bash +curl -s http://127.0.0.1:5000/ | python -m json.tool +``` + +--- + +## Testing Evidence + +### Screenshots showing endpoints work + +Required screenshots should be stored in `docs/screenshots/`: + +1) **Main endpoint showing complete JSON** +- `docs/screenshots/01_root_complete_json.png` +![GET / — complete JSON](./screenshots/LAB01/01_root_complete_json.png) + +2) **Health check response** +- `docs/screenshots/02_health_check.png` +![GET /health — health probe](./screenshots/LAB01/02_health_check.png) + +3) **Formatted/pretty-printed output** +- `docs/screenshots/03_pretty_print_command.png` +![Pretty-print example](./screenshots/LAB01/03_pretty_print_command.png) + + +### Terminal output + +Include terminal output demonstrating: +```text +curl -s http://127.0.0.1:5000/ | jq '{service, system, runtime, request, endpoints}' +{ + "service": { + "description": "DevOps course info service", + "framework": "Flask", + "name": "devops-info-service", + "version": "1.0.0" + }, + "system": { + "architecture": "x86_64", + "cpu_count": 24, + "hostname": "SerggAidd", + "platform": "Linux", + "platform_version": "6.18.5-arch1-1", + "python_version": "3.14.2" + }, + "runtime": { + "current_time": "2026-01-23T21:25:29Z", + "timezone": "UTC", + "uptime_human": "0 hour, 0 minutes", + "uptime_seconds": 32 + }, + "request": { + "client_ip": "127.0.0.1", + "method": "GET", + "path": "/", + "user_agent": "curl/8.18.0" + }, + "endpoints": [ + { + "description": "Root endpoint: returns service metadata and diagnostic information.", + "method": "GET", + "path": "/" + }, + { + "description": "Health check endpoint for monitoring and Kubernetes probes.", + "method": "GET", + "path": "/health" + } + ] +} +``` + +```text +curl -s http://127.0.0.1:5000/health | python -m json.tool +{ + "status": "healthy", + "timestamp": "2026-01-23T21:25:39Z", + "uptime_seconds": 43 +} +``` + +```text +curl -s http://127.0.0.1:5000/does-not-exist | python -m json.tool +{ + "error": "Not Found", + "message": "Endpoint does not exist" +} +``` + +```text +curl -s http://127.0.0.1:5000/crash | python -m json.tool +{ + "error": "Internal Server Error", + "message": "An unexpected error occurred" +} +``` + +--- + +## Challenges & Solutions + +### 1) Deterministic endpoint list ordering +**Problem:** Flask’s URL map iteration order is not guaranteed to match a desired display order. +**Solution:** Collected endpoint entries and sorted by `(path, method)` before returning. + +### 2) Correct client IP behind reverse proxies +**Problem:** `request.remote_addr` may show only the proxy address. +**Solution:** Prefer the first value from `X-Forwarded-For` and fall back to `remote_addr`. + +### 3) Consistent 500 responses for exceptions +**Problem:** Unhandled exceptions can result in default HTML error pages. +**Solution:** Added a `500` error handler returning a JSON payload. A test-only `/crash` endpoint can be enabled to demonstrate this behavior during validation. + +--- +## GitHub Community Engagement +Starring repositories is a lightweight way to bookmark useful projects and also signals community interest, which improves discovery and encourages maintainers. Following developers and classmates helps track relevant updates, learn from real code activity, and makes collaboration easier by keeping your team’s work visible in one place. + +### My Stars: +- Star the course repository +![Course repository star](./screenshots/LAB01/04_star_for_course.png) +- Star simple-container-com/api +![Simple-container-com/api repository star](./screenshots/LAB01/05_star_for_simple-container-com.png) + +### My Follows: +- Following to Dmitriy Creed (Professor) +![Follow to Professor](./screenshots/LAB01/06_prof_follow.png) +- Following to Du Tham Lieu (TA) +![Follow to TA](./screenshots/LAB01/07_ta1_follow.png) +- Following to Marat Biriushev (TA) +![Follow to TA](./screenshots/LAB01/08_ta2_follow.png) +- Following to Alexander Rozanov (classmate) +![Follow to CM](./screenshots/LAB01/09_cm1_follow.png) +- Following to Ilvina Akhmetzyanova (classmate) +![Follow to CM](./screenshots/LAB01/10_cm2_follow.png) +- Following to Klimentii Chistyakov (classmate) +![Follow to CM](./screenshots/LAB01/11_cm3_follow.png) \ No newline at end of file diff --git a/app_python/docs/LAB02.md b/app_python/docs/LAB02.md new file mode 100644 index 0000000000..31e6fa3a8b --- /dev/null +++ b/app_python/docs/LAB02.md @@ -0,0 +1,145 @@ +# LAB02 — Docker Containerization (app_python) + +This document describes the **Python/Flask** implementation of **DevOps Info Service** for Lab 01. +The service is a small HTTP JSON API that exposes system/runtime/request metadata and a health check endpoint. + +--- + +## Best Practices Applied +### 1) Base image pin and minimal base +**Example** +```dockerfile +FROM python:3.13-slim +``` + +**Why it matters** +- "Pinning" the version makes the build reproducible: you'll get the same environment today and in a month. +- `-slim` is significantly smaller than the "full" image, then faster pull/push, smaller attack surface. + +### 2) Non-root runtime +**Example** +```dockerfile +RUN useradd --create-home --uid 1001 --shell /usr/sbin/nologin appuser +... +USER appuser +``` + +**Why it matters** +- If a process in a container is compromised, the attacker will not gain root privileges. +- Important for Kubernetes/PodSecurity (root containers are often prohibited by policy) + +### 3) Correct layer order (layer caching) +**Example** +First, `requirements.txt` is copied and dependencies are installed, and only then the code is copied: +```dockerfile +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +COPY app.py . +``` + +**Why it matters** +- Docker caches layers. +- If only `app.py` changes, the build runs faster because the dependency layer is reused. + +### 4) Installing dependencies is done with `--no-cache-dir` +**Example** +```dockerfile +RUN pip install --no-cache-dir -r requirements.txt +``` + +**Why it matters** +- The cache increases the final image size and is not required at runtime. +- Faster delivery due to a smaller file size. +- Smaller attack surface. + +### 5) `.dockerignore` file +**Example** + +Added `.dockerignore`.. + +**Why it matters** +- The build context is smaller, meaning faster builds. +- The risk of leaking sensitive files (e.g., .env) is reduced if they are accidentally stored in the directory. + +## Image Information & Decisions +### Base image +`python:3.13-slim` was selected: +- Sufficiently "complete" for most Python web apps; +- Significantly smaller than the full `python:3.13`; +- Compatible with most wheel packages and the glibc environment (unlike Alpine/musl, which sometimes have build/dependency compatibility issues). + +### Final image size and assessment +**Final image size:** 129 MB. + +**Assessment:** For a Python web application based on python:3.13-slim, this is a reasonable size because the image includes not only my code but also the Python interpreter and system libraries. This size ensures fast pull/push and a smaller attack surface compared to a "full" python:3.13 image (around 1 GB). + +### Layer structure +1. `FROM python:3.13-slim` — minimal base +2. `ENV ...` — configure Python behavior in the container. +3. `RUN useradd ...` — create an unprivileged user +4. `WORKDIR /app` — working directory. +5. `COPY requirements.txt` — depends on (cached layer) +6. `RUN pip install ...` — install dependencies +7. `COPY app.py .` — copy application code. +8. `RUN chown -R appuser:appuser /app` — directory and file permissions +9. `USER appuser` — run as an unprivileged user. +10. `EXPOSE 5000` — document the port +11. `CMD["python", "app.py"]` — start command + +### Optimization Choices: +>This section almost completely replicates the Best Practices guidelines, so key patterns will be described here + +- The minimum required for the application to work is used (a slim image is used, only the necessary files are copied, the cache is cleared) +- The correct layer order eliminates unnecessary actions (no reinstallation of dependencies, etc.) + + + +## Build & Run Process + +- Complete terminal output from build process: ![Complete terminal output from build process](./screenshots/LAB02/01_complit_build.png) +- Terminal output showing container running: ![Terminal output showing container running](./screenshots/LAB02/02_container_running.png) +- Terminal output from testing endpoints (curl/httpie): ![Terminal output from testing endpoints with curl](./screenshots/LAB02/03_endpoint_check.png) +- Docker Hub repository URL: https://hub.docker.com/repository/docker/sergey173/app_python + + + + +## Technical Analysis +1. **Why does your Dockerfile work the way it does?** + + **Answer:** Because it pins the runtime environment (base image + dependencies), uses efficient layer caching, and runs the app as a non-root user, which makes the container reproducible, faster to rebuild, and safer to run. + +2. **What would happen if you changed the layer order?** + + **Answer:** This is a complex question, depending on what you're changing. I'll give a couple of examples. + - If I copied the application code before installing dependencies, any code change would invalidate the cache and force pip to reinstall dependencies on every build, making rebuilds much slower (bad for CI/CD). + - If I switched to a non-root user too early, I could run into file permission issues (e.g., the app might not be able to write files under /app if ownership/permissions were not set correctly). + - If I moved ENV instructions to the very end, the app would still work, but it could reduce caching efficiency depending on what changes, and the Dockerfile would be less structured. ENV variables are meant to define runtime behavior early and clearly. + +3. **What security considerations did you implement?** + + **Answer:** + - Run as non-root user (USER appuser) to reduce impact if the application is compromised. + - Use a minimal base image (python:3.13-slim) to reduce attack surface and vulnerability exposure compared to full images. + - Avoid pip cache in the final image (--no-cache-dir) to keep the image smaller and reduce unnecessary files. + - Use .dockerignore to avoid accidentally shipping development artifacts or secrets into the build context/image. + +4. **How does `.dockerignore` improve your build?** + + **Answer:** This file reduces build context and the risk of using unnecessary files and secrets during the build. +--- + +## Challenges & Solutions + +### 1) BuildKit/buildx on Arch +**Problem:** I encountered a problem where `DOCKER_BUILDKIT=1` didn't work due to a buildx call/break. + +**Solution:** Install `docker-buildx` or build using the legacy builder (without `--check`). + +### 2) Unexpected requests in logs +**Problem:** When launching a containerized application, regular requests to a non-existent endpoint appeared in the logs. + +**Solution:** Using diagnostics (command `sudo lsof -nP -iTCP:8080 -sTCP:ESTABLISHED`) it was revealed that the problem was not in the container, but in the cache of the browser being used. + + diff --git a/app_python/docs/LAB03.md b/app_python/docs/LAB03.md new file mode 100644 index 0000000000..d2f19b63ea --- /dev/null +++ b/app_python/docs/LAB03.md @@ -0,0 +1,70 @@ +# LAB03 — Continuous Integration (CI/CD) (app_python) + +--- + +## Overview + +### Testing framework +Pytest was chosen because it is fairly easy to set up, readable, the de facto standard for Python, and works well with Flask via `app.test_client()` (you can test endpoints without running a real server). + +### Tests cover +The tests cover the key functionality of the application's HTTP layer: +- availability and correct HTTP codes for the main endpoints; +- correct responses (body/format/expected values) to typical requests; +- basic negative scenarios (e.g., invalid/missing parameters where relevant). + +The `app.run(...)` command is not tested - it is the starting point of the dev server; unit tests check it using the Flask test client. + +### CI workflow trigger configuration +**Push** and **pull_request**, but only if files inside `app_python/**` (code/requirements/Dockerfile) or the workflow itself have changed. On PRs, this is necessary to ensure that the linter and tests pass before merging, and to avoid breaking the main branch. Docker builds and image pushes are performed only on pushes to `main/master` or the `lab*` branch (after successful tests). + +### Versioning strategy chosen and rationale (**CalVer + SHA**:) +- `YYYY.MM.DD` (CalVer) — удобно и прозрачно для учебных/частых сборок: сразу видно “когда собрали”; +- `sha-` — гарантированно уникальный тег под каждый коммит для точного отката/дебага; +- `latest` — удобный “последний стабильный билд” для быстрого `docker pull/run`. + +--- + +## Workflow Evidence +- **Successful workflow run** - https://github.com/SerggAidd/DevOps-Core-Course/actions/runs/21884873193/job/63177056944 +- **Tests passing locally (terminal output):** ![Tests passing locally](./screenshots/LAB03/01_local_test_passing.png) +- **Docker images on Docker Hub:** ![Docker images on Docker Hub](./screenshots/LAB03/02_Docker_hub.png) + +- **Status badge working in README:**![Status badge](./screenshots/LAB03/03_status_badge_working.png) + +--- + +## Best Practices Implemented +- **Practice 1: Job Dependencies** - docker-job depends on `test` (`needs: [test]`), so the image is never pushed if the linter/tests fail. +- **Practice 2: Pull Request Checks** — A `test` (flake8 + pytest) is run on `pull_request` to catch errors before merging and avoid pulling broken code into the main branch. +- **Practice 3: Workflow Concurrency** — `concurrency` with `cancel-in-progress: true` cancels old runs if you push quickly in a row, saving minutes and eliminating "races" in results. + +- **Caching:** — `setup-python` pip cache and buildx cache (GHA) are enabled. When last During pipeline runs, dependencies/layers are pulled from the cache, and repeat runs are faster due to the elimination of re-downloads/builds. + +- **Snyk:** No vulnerabilities were found and no corrective actions were required. ![Cnyk scan results](./screenshots/LAB03/04_snyk_results.png) + +--- + +## Key Decisions +- **Questions:** SemVer or CalVer? Why did you choose it for your app? + + **Answer:** CalVer was chosen because it's a learning application with no public API and no release process. Clear traceability and a stable tagging scheme are more important for CI, and CalVer immediately displays the build date. + +- **Questions:** What tags does your CI create? (e.g., latest, version number, etc.) + + **Answer:** CI pushes three tags to the same image: + 1. `latest` — the most recent successful build; + 2. `YYYY.MM.DD` — CalVer, build date in UTC; + 3. `sha-` — the "traced" tag for the commit. + +- **Questions:** Why did you choose those triggers? + + **Answer:** The main goal is to ensure that the pipeline is only triggered by changes that impact applications (`app_python/**/*.py`, `requirements*.txt`, `Dockerfile`, and `python-ci.yml` itself), that it doesn't pollute Docker Hub (thus, pushing only to the `main/master` or `lab*` branches can trigger the process), and that basic security checks are run at startup (for example, on pull requests, a quality check/test is run specifically before merging to prevent issues from being passed to the main branch). + +- **Questions:** What's tested vs not tested? + + **Answer:** Test coverage is 98%, only the launch is not covered, as this would already lead to redundant testing. + + + + diff --git a/app_python/docs/screenshots/LAB01/01_root_complete_json.png b/app_python/docs/screenshots/LAB01/01_root_complete_json.png new file mode 100644 index 0000000000..0fdb582e1d Binary files /dev/null and b/app_python/docs/screenshots/LAB01/01_root_complete_json.png differ diff --git a/app_python/docs/screenshots/LAB01/02_health_check.png b/app_python/docs/screenshots/LAB01/02_health_check.png new file mode 100644 index 0000000000..92a1b3c4ef Binary files /dev/null and b/app_python/docs/screenshots/LAB01/02_health_check.png differ diff --git a/app_python/docs/screenshots/LAB01/03_pretty_print_command.png b/app_python/docs/screenshots/LAB01/03_pretty_print_command.png new file mode 100644 index 0000000000..341f696aa1 Binary files /dev/null and b/app_python/docs/screenshots/LAB01/03_pretty_print_command.png differ diff --git a/app_python/docs/screenshots/LAB01/04_star_for_course.png b/app_python/docs/screenshots/LAB01/04_star_for_course.png new file mode 100644 index 0000000000..39126d7366 Binary files /dev/null and b/app_python/docs/screenshots/LAB01/04_star_for_course.png differ diff --git a/app_python/docs/screenshots/LAB01/05_star_for_simple-container-com.png b/app_python/docs/screenshots/LAB01/05_star_for_simple-container-com.png new file mode 100644 index 0000000000..4292b6a506 Binary files /dev/null and b/app_python/docs/screenshots/LAB01/05_star_for_simple-container-com.png differ diff --git a/app_python/docs/screenshots/LAB01/06_prof_follow.png b/app_python/docs/screenshots/LAB01/06_prof_follow.png new file mode 100644 index 0000000000..d6aedbde4c Binary files /dev/null and b/app_python/docs/screenshots/LAB01/06_prof_follow.png differ diff --git a/app_python/docs/screenshots/LAB01/07_ta1_follow.png b/app_python/docs/screenshots/LAB01/07_ta1_follow.png new file mode 100644 index 0000000000..58d8181edc Binary files /dev/null and b/app_python/docs/screenshots/LAB01/07_ta1_follow.png differ diff --git a/app_python/docs/screenshots/LAB01/08_ta2_follow.png b/app_python/docs/screenshots/LAB01/08_ta2_follow.png new file mode 100644 index 0000000000..dde8ddba47 Binary files /dev/null and b/app_python/docs/screenshots/LAB01/08_ta2_follow.png differ diff --git a/app_python/docs/screenshots/LAB01/09_cm1_follow.png b/app_python/docs/screenshots/LAB01/09_cm1_follow.png new file mode 100644 index 0000000000..a9048265d4 Binary files /dev/null and b/app_python/docs/screenshots/LAB01/09_cm1_follow.png differ diff --git a/app_python/docs/screenshots/LAB01/10_cm2_follow.png b/app_python/docs/screenshots/LAB01/10_cm2_follow.png new file mode 100644 index 0000000000..36ab874f38 Binary files /dev/null and b/app_python/docs/screenshots/LAB01/10_cm2_follow.png differ diff --git a/app_python/docs/screenshots/LAB01/11_cm3_follow.png b/app_python/docs/screenshots/LAB01/11_cm3_follow.png new file mode 100644 index 0000000000..a1e0951b2f Binary files /dev/null and b/app_python/docs/screenshots/LAB01/11_cm3_follow.png differ diff --git a/app_python/docs/screenshots/LAB02/01_complit_build.png b/app_python/docs/screenshots/LAB02/01_complit_build.png new file mode 100644 index 0000000000..92b33686b2 Binary files /dev/null and b/app_python/docs/screenshots/LAB02/01_complit_build.png differ diff --git a/app_python/docs/screenshots/LAB02/02_container_running.png b/app_python/docs/screenshots/LAB02/02_container_running.png new file mode 100644 index 0000000000..ccafa6815a Binary files /dev/null and b/app_python/docs/screenshots/LAB02/02_container_running.png differ diff --git a/app_python/docs/screenshots/LAB02/03_endpoint_check.png b/app_python/docs/screenshots/LAB02/03_endpoint_check.png new file mode 100644 index 0000000000..01a82015c6 Binary files /dev/null and b/app_python/docs/screenshots/LAB02/03_endpoint_check.png differ diff --git a/app_python/docs/screenshots/LAB03/01_local_test_passing.png b/app_python/docs/screenshots/LAB03/01_local_test_passing.png new file mode 100644 index 0000000000..ccdb911a04 Binary files /dev/null and b/app_python/docs/screenshots/LAB03/01_local_test_passing.png differ diff --git a/app_python/docs/screenshots/LAB03/02_Docker_hub.png b/app_python/docs/screenshots/LAB03/02_Docker_hub.png new file mode 100644 index 0000000000..fcfc7d91dc Binary files /dev/null and b/app_python/docs/screenshots/LAB03/02_Docker_hub.png differ diff --git a/app_python/docs/screenshots/LAB03/03_status_badge_working.png b/app_python/docs/screenshots/LAB03/03_status_badge_working.png new file mode 100644 index 0000000000..8701070c9e Binary files /dev/null and b/app_python/docs/screenshots/LAB03/03_status_badge_working.png differ diff --git a/app_python/docs/screenshots/LAB03/04_snyk_results.png b/app_python/docs/screenshots/LAB03/04_snyk_results.png new file mode 100644 index 0000000000..0b67538ffe Binary files /dev/null and b/app_python/docs/screenshots/LAB03/04_snyk_results.png differ diff --git a/app_python/requirements-dev.txt b/app_python/requirements-dev.txt new file mode 100644 index 0000000000..0f0aa493a1 --- /dev/null +++ b/app_python/requirements-dev.txt @@ -0,0 +1,5 @@ +-r requirements.txt +pytest +pytest-cov +flake8 + diff --git a/app_python/requirements.txt b/app_python/requirements.txt new file mode 100644 index 0000000000..5fc39647c3 --- /dev/null +++ b/app_python/requirements.txt @@ -0,0 +1,15 @@ +blinker==1.9.0 +certifi==2026.1.4 +charset-normalizer==3.4.4 +click==8.3.1 +DateTime==6.0 +Flask==3.1.2 +idna==3.11 +itsdangerous==2.2.0 +Jinja2==3.1.6 +MarkupSafe==3.0.3 +pytz==2025.2 +requests==2.32.5 +urllib3==2.6.3 +Werkzeug==3.1.5 +zope.interface==8.2 diff --git a/app_python/tests/__init__.py b/app_python/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/app_python/tests/test_app.py b/app_python/tests/test_app.py new file mode 100644 index 0000000000..6e3cad9978 --- /dev/null +++ b/app_python/tests/test_app.py @@ -0,0 +1,129 @@ +# app_python/tests/test_app.py +import re +import pytest +from app import app + + +def _crash(): + """Test-only endpoint to trigger 500.""" + 1 / 0 + + +if "__test_crash__" not in app.view_functions: + app.add_url_rule( + "/__test__/crash", + endpoint="__test_crash__", + view_func=_crash, + methods=["GET"], + ) + + +@pytest.fixture() +def client(): + """ + Flask test client (no real server). + Important: disable exception propagation so errorhandler(500) + returns JSON instead of raising. + """ + app.config["TESTING"] = True + app.config["PROPAGATE_EXCEPTIONS"] = False + with app.test_client() as c: + yield c + + +def test_index_payload_structure(client): + """GET / returns service + system + runtime + request + endpoints.""" + resp = client.get("/") + assert resp.status_code == 200 + + data = resp.get_json() + assert isinstance(data, dict) + + # Root keys + for key in ("service", "system", "runtime", "request", "endpoints"): + assert key in data + + # Service section + assert data["service"]["name"] == "devops-info-service" + assert "version" in data["service"] + assert isinstance(data["service"]["version"], str) + + # System section + assert isinstance(data["system"]["hostname"], str) + assert isinstance(data["system"]["python_version"], str) + + # Runtime section + # values change over time => check type/range, not equality) + assert isinstance(data["runtime"]["uptime_seconds"], int) + assert data["runtime"]["uptime_seconds"] >= 0 + assert isinstance(data["runtime"]["current_time"], str) + assert data["runtime"]["timezone"] == "UTC" + + # Endpoints list contains / and /health + endpoints = data["endpoints"] + assert isinstance(endpoints, list) + paths = {e["path"] for e in endpoints} + assert "/" in paths + assert "/health" in paths + + +def test_health_payload(client): + """GET /health returns healthy + timestamp + uptime.""" + resp = client.get("/health") + assert resp.status_code == 200 + + data = resp.get_json() + assert data["status"] == "healthy" + + assert isinstance(data["uptime_seconds"], int) + assert data["uptime_seconds"] >= 0 + + assert isinstance(data["timestamp"], str) + # ISO-like UTC format: 2026-02-10T12:34:56Z + assert re.match( + r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z$", data["timestamp"] + ) + + +def test_unknown_route_returns_404_json(client): + """GET unknown path returns JSON error via 404 handler.""" + resp = client.get("/no-such-endpoint") + assert resp.status_code == 404 + + data = resp.get_json() + assert data == { + "error": "Not Found", + "message": "Endpoint does not exist", + } + + +def test_x_forwarded_for_sets_client_ip(client): + """X-Forwarded-For first IP should be used as client_ip.""" + resp = client.get( + "/", + headers={"X-Forwarded-For": "203.0.113.10, 10.0.0.1"}) + assert resp.status_code == 200 + + data = resp.get_json() + assert data["request"]["client_ip"] == "203.0.113.10" + + +def test_method_not_allowed_returns_405(client): + """POST /health should be rejected (only GET is allowed).""" + resp = client.post("/health") + assert resp.status_code == 405 + + +def test_500_handler_returns_json(client): + """ + Internal error returns JSON via 500 handler + (triggered by test-only endpoint). + """ + resp = client.get("/__test__/crash") + assert resp.status_code == 500 + + data = resp.get_json() + assert data == { + "error": "Internal Server Error", + "message": "An unexpected error occurred", + } diff --git a/pulumi/.gitignore b/pulumi/.gitignore new file mode 100644 index 0000000000..a3807e5bdb --- /dev/null +++ b/pulumi/.gitignore @@ -0,0 +1,2 @@ +*.pyc +venv/ diff --git a/pulumi/Pulumi.lab04-dev.yaml b/pulumi/Pulumi.lab04-dev.yaml new file mode 100644 index 0000000000..e23a38baa0 --- /dev/null +++ b/pulumi/Pulumi.lab04-dev.yaml @@ -0,0 +1,5 @@ +config: + lab04:sshAllowCidr: 193.32.178.182/32 + lab04:sshUser: ubuntu + lab04:sshPublicKeyPath: /home/serggaidd/.ssh/yc-lab04.pub + lab04:sshPrivateKeyPath: /home/serggaidd/.ssh/yc-lab04 diff --git a/pulumi/Pulumi.yaml b/pulumi/Pulumi.yaml new file mode 100644 index 0000000000..0df015538e --- /dev/null +++ b/pulumi/Pulumi.yaml @@ -0,0 +1,11 @@ +name: lab04 +description: IaC with Pulumi for YC +runtime: + name: python + options: + toolchain: pip + virtualenv: venv +config: + pulumi:tags: + value: + pulumi:template: python diff --git a/pulumi/__main__.py b/pulumi/__main__.py new file mode 100644 index 0000000000..3412df9c30 --- /dev/null +++ b/pulumi/__main__.py @@ -0,0 +1,106 @@ +import pulumi +import pulumi_yandex as yandex + +from pathlib import Path + +cfg = pulumi.Config() + +# === Terraform parity defaults === +zone = cfg.get("zone") or "ru-central1-a" +subnet_cidr = cfg.get("subnetCidr") or "10.10.0.0/24" + +platform_id = cfg.get("platformId") or "standard-v2" +cores = int(cfg.get("cores") or 2) +memory_gb = int(cfg.get("memoryGb") or 1) +core_fraction = int(cfg.get("coreFraction") or 20) + +disk_gb = int(cfg.get("diskGb") or 10) +disk_type = cfg.get("diskType") or "network-hdd" + +ssh_user = cfg.get("sshUser") or "ubuntu" +ssh_allow_cidr = cfg.require("sshAllowCidr") + +# public key path (for metadata) +ssh_pubkey_path = cfg.get("sshPublicKeyPath") or str(Path.home() / ".ssh/yc-lab04.pub") +# private key path (for ssh -i). If not provided, infer by stripping ".pub" +ssh_privkey_path = cfg.get("sshPrivateKeyPath") or ssh_pubkey_path.removesuffix(".pub") + +image_family = cfg.get("imageFamily") or "ubuntu-2204-lts" + +pubkey = Path(ssh_pubkey_path).expanduser().read_text(encoding="utf-8").strip() +img = yandex.get_compute_image(family=image_family) + +net = yandex.VpcNetwork("lab04-net") + +subnet = yandex.VpcSubnet( + "lab04-subnet", + network_id=net.id, + zone=zone, + v4_cidr_blocks=[subnet_cidr], +) + +sg = yandex.VpcSecurityGroup( + "lab04-sg", + network_id=net.id, + ingresses=[ + yandex.VpcSecurityGroupIngressArgs( + protocol="TCP", + description="SSH from my IP", + v4_cidr_blocks=[ssh_allow_cidr], + port=22, + ), + yandex.VpcSecurityGroupIngressArgs( + protocol="TCP", + description="HTTP", + v4_cidr_blocks=["0.0.0.0/0"], + port=80, + ), + yandex.VpcSecurityGroupIngressArgs( + protocol="TCP", + description="App port 5000", + v4_cidr_blocks=["0.0.0.0/0"], + port=5000, + ), + ], + egresses=[ + yandex.VpcSecurityGroupEgressArgs( + protocol="ANY", + description="Allow all egress", + v4_cidr_blocks=["0.0.0.0/0"], + ) + ], +) + +vm = yandex.ComputeInstance( + "lab04-vm", + zone=zone, + platform_id=platform_id, + resources=yandex.ComputeInstanceResourcesArgs( + cores=cores, + memory=memory_gb, + core_fraction=core_fraction, + ), + boot_disk=yandex.ComputeInstanceBootDiskArgs( + initialize_params=yandex.ComputeInstanceBootDiskInitializeParamsArgs( + image_id=img.id, + size=disk_gb, + type=disk_type, + ) + ), + network_interfaces=[ + yandex.ComputeInstanceNetworkInterfaceArgs( + subnet_id=subnet.id, + nat=True, + security_group_ids=[sg.id], + ) + ], + metadata={ + "ssh-keys": f"{ssh_user}:{pubkey}", + }, +) + +public_ip = vm.network_interfaces.apply(lambda nis: nis[0]["nat_ip_address"] if nis else None) + +pulumi.export("public_ip", public_ip) +pulumi.export("ssh_cmd", pulumi.Output.concat("ssh -i ", ssh_privkey_path, " ", ssh_user, "@", public_ip)) +pulumi.export("http_url", pulumi.Output.concat("http://", public_ip)) diff --git a/pulumi/requirements.txt b/pulumi/requirements.txt new file mode 100644 index 0000000000..6f210cdb04 --- /dev/null +++ b/pulumi/requirements.txt @@ -0,0 +1,3 @@ +pulumi>=3,<4 +pulumi-yandex==0.13.0 +setuptools<82 diff --git a/terraform/.gitignore b/terraform/.gitignore new file mode 100644 index 0000000000..5962e0b88a --- /dev/null +++ b/terraform/.gitignore @@ -0,0 +1,12 @@ +.terraform/ +*.tfstate +*.tfstate.* +crash.log +*.tfvars +*.tfvars.json +override.tf +override.tf.json +*_override.tf +*_override.tf.json +.terraformrc +terraform.rc \ No newline at end of file diff --git a/terraform/.terraform.lock.hcl b/terraform/.terraform.lock.hcl new file mode 100644 index 0000000000..30a72a7d8a --- /dev/null +++ b/terraform/.terraform.lock.hcl @@ -0,0 +1,9 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/yandex-cloud/yandex" { + version = "0.187.0" + hashes = [ + "h1:wHAYDfBUlXMx1CmVwNWCr/SA7+CWO8aNC914WXUXNRQ=", + ] +} diff --git a/terraform/.tflint.hcl b/terraform/.tflint.hcl new file mode 100644 index 0000000000..427121c3ef --- /dev/null +++ b/terraform/.tflint.hcl @@ -0,0 +1,4 @@ +plugin "terraform" { + enabled = true + preset = "recommended" +} diff --git a/terraform/docs/LAB04.md b/terraform/docs/LAB04.md new file mode 100644 index 0000000000..6d9c6f859d --- /dev/null +++ b/terraform/docs/LAB04.md @@ -0,0 +1,383 @@ +# LAB04 — Infrastructure as Code (Terraform & Pulumi) + +## 1. Cloud Provider & Infrastructure + +### Cloud provider chosen and rationale +**Yandex Cloud** was chosen as the provider, as it is the simplest and most accessible option in the Russian Federation with a free trial period. + +### Instance type and size +- **Instance (YC Compute)**: standard-v2 +- **vCPU**: 2 +- **RAM**: 1 GB +- **Guaranteed CPU (core_fraction)**: 20% +- **Boot disk**: 10 GB, network-hdd +- **OS image**: ubuntu-2204-lts + +### Selected zone: +The `ru-central1-a` zone was chosen due to the availability of resources and the standard recommended zone from the documentation. + +### Total cost: +The trial period provides free use of the system for 60 days, plus 4,000 rubles for the capacity used. Therefore, the total cost is 0 rubles. + +### Resources created +- ***VPC Network*** +- ***Subnet*** +- ***Security Group*** (firewall rules): + - SSH 22/tcp — allowed **only from my public IP** (`/32`) + - HTTP 80/tcp — permitted from outside + - Custom 5000/tcp — permitted from outside +- ***Compute Instance (VM)*** — has a public IP (NAT) for connecting via SSH + +--- + +## 2. Terraform Implementation + +### Terraform version used: +![Terraform version](./screenshots/LAB04/01_terraform_version.png) + +### Project structure explanation +Структура каталога `terraform/`: +```text +├── .gitignore +├── main.tf +├── outputs.tf +├── .terraform +│ └── ... +├── .terraform.lock.hcl +├── terraform.tfstate +├── terraform.tfstate.backup +├── terraform.tfvars +├── .tflint.hcl +└── variables.tf +``` + +### Key configuration decisions +- A **service account** and authorization key were used to access ***Yandex Cloud***. Connection parameters (cloud/folder/key) were passed via environment variables to avoid hardcoding identifiers and secrets in the code. +- The Terraform configuration is parameterized via variables.tf + terraform.tfvars: the zone, VM properties, subnet CIDR, SSH allowlist, and public key path are set as variables to ensure the setup is reproducible and easily portable. +- Local artifacts were excluded from the repository using the `.gitignore` file. +- The Security Group is configured based on the principle of minimum necessary access. +- For IaC CI validation, terraform init -backend=false is used so that fmt/validate/tflint checks work without access to cloud credentials and without a state backend. + +### Challenges encountered +- **YC permission error:** `PermissionDenied desc = Operation is not permitted in the folder` + + **Solution:** Grant the service account permissions to the required folder in YC. +- **SSH connection error:** `Permission denied (publickey)` + + **Solution:** Explicitly specify the correct private key using `ssh -i ...`. + +### Terminal output from key commands (sanitized) + +#### Command `terraform init`: +![`terraform init` command](./screenshots/LAB04/02_terraform_init_command.png) + + +#### Command `terraform plan` (sanitized): +```text +data.yandex_compute_image.ubuntu: Reading... +data.yandex_compute_image.ubuntu: Read complete after 1s [id=********************] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # yandex_compute_instance.vm will be created + + resource "yandex_compute_instance" "vm" { + + created_at = (known after apply) + + folder_id = (known after apply) + + fqdn = (known after apply) + + gpu_cluster_id = (known after apply) + + hardware_generation = (known after apply) + + hostname = (known after apply) + + id = (known after apply) + + labels = { + + "lab" = "lab04" + } + + maintenance_grace_period = (known after apply) + + maintenance_policy = (known after apply) + + metadata = { + + "ssh-keys" = "ubuntu:ssh-ed25519 ******************************************************************** lab04" + } + + name = "lab04-vm" + + network_acceleration_type = "standard" + + platform_id = "standard-v2" + + status = (known after apply) + + zone = "ru-central1-a" + + + boot_disk { + + auto_delete = true + + device_name = (known after apply) + + disk_id = (known after apply) + + mode = (known after apply) + + + initialize_params { + + block_size = (known after apply) + + description = (known after apply) + + image_id = "***********************" + + name = (known after apply) + + size = 10 + + snapshot_id = (known after apply) + + type = "network-hdd" + } + } + + + metadata_options (known after apply) + + + network_interface { + + index = (known after apply) + + ip_address = (known after apply) + + ipv4 = true + + ipv6 = (known after apply) + + ipv6_address = (known after apply) + + mac_address = (known after apply) + + nat = true + + nat_ip_address = (known after apply) + + nat_ip_version = (known after apply) + + security_group_ids = (known after apply) + + subnet_id = (known after apply) + } + + + placement_policy (known after apply) + + + resources { + + core_fraction = 20 + + cores = 2 + + memory = 1 + } + + + scheduling_policy (known after apply) + } + + # yandex_vpc_network.net will be created + + resource "yandex_vpc_network" "net" { + + created_at = (known after apply) + + default_security_group_id = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + labels = { + + "lab" = "lab04" + } + + name = "lab04-net" + + subnet_ids = (known after apply) + } + + # yandex_vpc_security_group.sg will be created + + resource "yandex_vpc_security_group" "sg" { + + created_at = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + labels = { + + "lab" = "lab04" + } + + name = "lab04-sg" + + network_id = (known after apply) + + status = (known after apply) + + + egress { + + description = "Allow all egress" + + from_port = 0 + + id = (known after apply) + + labels = (known after apply) + + port = -1 + + protocol = "ANY" + + to_port = 65535 + + v4_cidr_blocks = [ + + "0.0.0.0/0", + ] + + v6_cidr_blocks = [] + # (2 unchanged attributes hidden) + } + + + ingress { + + description = "App port 5000" + + from_port = -1 + + id = (known after apply) + + labels = (known after apply) + + port = 5000 + + protocol = "TCP" + + to_port = -1 + + v4_cidr_blocks = [ + + "0.0.0.0/0", + ] + + v6_cidr_blocks = [] + # (2 unchanged attributes hidden) + } + + ingress { + + description = "HTTP" + + from_port = -1 + + id = (known after apply) + + labels = (known after apply) + + port = 80 + + protocol = "TCP" + + to_port = -1 + + v4_cidr_blocks = [ + + "0.0.0.0/0", + ] + + v6_cidr_blocks = [] + # (2 unchanged attributes hidden) + } + + ingress { + + description = "SSH from my IP" + + from_port = -1 + + id = (known after apply) + + labels = (known after apply) + + port = 22 + + protocol = "TCP" + + to_port = -1 + + v4_cidr_blocks = [ + + "***.***.***.***/32", + ] + + v6_cidr_blocks = [] + # (2 unchanged attributes hidden) + } + } + + # yandex_vpc_subnet.subnet will be created + + resource "yandex_vpc_subnet" "subnet" { + + created_at = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + labels = (known after apply) + + name = "lab04-subnet" + + network_id = (known after apply) + + v4_cidr_blocks = [ + + "**.**.*.*/24", + ] + + v6_cidr_blocks = (known after apply) + + zone = "ru-central1-a" + } + +Plan: 4 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + public_ip = (known after apply) + + ssh_cmd = (known after apply) +``` +#### Command `terraform apply`: +![`terraform apply` command](./screenshots/LAB04/03_terraform_apply_command.png) + +#### SSH connection to VM: +![SSH connection to VM](./screenshots/LAB04/04_terraform_ssh.png) + +--- + +## 3. Pulumi Implementation + +### Pulumi version and language used +- **Language used**: Python +- **Pulumi version**: `3.215.0` +![Pulumi version](./screenshots/LAB04/05_pulumi_version.png) + +### How code differs from Terraform +While Terraform uses HCL, which declaratively describes resources, Pulumi uses full-fledged Python code, which allows you to use variables, functions, conditional logic, etc. + +### Advantages you discovered +In Pulumi, it is more convenient to “programmatically” collect configurations (for example, generate rules, naming, conditions). + +### Challenges encountered + +- **`setuptools` version error:** `pulumi-yandex` had problems with `pkg_resources` due to the version of `setuptools`. + + **Solution:** Change the entry in `pulumi/requirements.txt` to `setuptools<82` + + +### Terminal output + +#### Command `pulumi preview`: +![`pulumi preview` command](./screenshots/LAB04/06_pulumi_preview_command.png) + +#### Command `pulumi up`: +![`pulumi up` command](./screenshots/LAB04/07_pulumi_up_command.png) + +#### SSH connection to VM: +![SSH connection to VM](./screenshots/LAB04/08_pulumi_ssh.png) + +--- + +## 4. Terraform vs Pulumi Comparison + +### Ease of Learning +Terraform is easier to understand because HCL is short and templated, while Pulumi requires an understanding of the provider and the nuances of types/Output, but is more convenient in the long run with a good knowledge of the language. + +### Code Readability +For understanding the infrastructure, Terraform is simpler; for complex conditions and reusing logic, Pulumi (Python) is clearer. + +### Debugging +Terraform's errors usually directly point to the problem and are easier to manage in terms of resources and planning. In the case of pulumi, you sometimes have to deal with Python errors and its dependencies. + +### Documentation +While Terraform provides a large number of ready-made examples and standards for almost any case, Pulumi, although it provides good examples, has a smaller number of ready-made templates. + +### Use Case +- **Terraform** — when you need a standard IaC with ready-made modules. +- **Pulumi** — when you need complex logic, configuration generation, and custom modules. + +--- + +## 5. Lab 5 Preparation & Cleanup + +### VM for Lab 5 +- **Keeping VM for Lab 5:** `NO` +- **VM:** Will recreate cloud VM with terraform + +### Cleanup Status +- Terraform: +![Terraform destroy](./screenshots/LAB04/09_terraform_destroy_command.png) +- Pulumi: +![Pulumi destroy](./screenshots/LAB04/10_pulumi_destroy_command.png) + +--- + +# Bonus Task — IaC CI/CD + Infrastructure Import + +## Part 1: GitHub Actions for IaC Validation + +### Workflow file implementation +Pipeline `.github/workflows/terraform-ci.yml` executes the following commands: +- `terraform fmt -check -recursive` +- `terraform init -backend=false` +- `terraform validate` +- `tflint --init` +- `tflint` + +### Path filter configuration +The workflow is triggered only on changes in `terraform/**`, excluding `docs`. + +### tflint results and workflow running +In the first CI run, tflint found two issues: +- Missing terraform.required_version — the minimum Terraform version wasn't specified in the config. +- Missing version constraint for provider Yandex — the provider version wasn't specified in required_providers. + +These errors led to the fall of ci: +![Failed CI](./screenshots/LAB04/11_failed_ci.png) + +After correcting the file `/terraform/main.tf`, the execution was successful: +![Sucess CI](./screenshots/LAB04/12_success_ci.png) + +## Part 2: Import GitHub Repository to Terraform + +### GitHub repository import process +1. Created a Terraform configuration for the GitHub provider (`terraform/github`) +2. Define the `github_repository` resource for the existing course repository +3. Run `terraform import`, after which the repository is added to the state +4. `terraform plan` shows drift, adjusting the configuration to reality +5. After alignment, `plan` should show "No changes" + +### Terminal output + +#### Command `terraform import`: +![Terminal output of import command](./screenshots/LAB04/13_terraform_import_command.png) + +#### Command `terraform plan`: +![Terminal output of plan command](./screenshots/LAB04/14_terraform_plan_command.png) + + +### Why importing matters (brief explanation) +Importing allows you to bring an existing resource (created manually) under IaC control without recreating it. This reduces manual changes, minimizes drift, and makes the configuration "living documentation". + +### Benefits of managing repos with IaC +- Unified repository settings via code +- Change control via PR review +- Preventing manual drift +- Quickly re-create settings for new repositories/projects diff --git a/terraform/docs/screenshots/LAB04/01_terraform_version.png b/terraform/docs/screenshots/LAB04/01_terraform_version.png new file mode 100644 index 0000000000..da0f966276 Binary files /dev/null and b/terraform/docs/screenshots/LAB04/01_terraform_version.png differ diff --git a/terraform/docs/screenshots/LAB04/02_terraform_init_command.png b/terraform/docs/screenshots/LAB04/02_terraform_init_command.png new file mode 100644 index 0000000000..36cb01cf00 Binary files /dev/null and b/terraform/docs/screenshots/LAB04/02_terraform_init_command.png differ diff --git a/terraform/docs/screenshots/LAB04/03_terraform_apply_command.png b/terraform/docs/screenshots/LAB04/03_terraform_apply_command.png new file mode 100644 index 0000000000..7a74e5a105 Binary files /dev/null and b/terraform/docs/screenshots/LAB04/03_terraform_apply_command.png differ diff --git a/terraform/docs/screenshots/LAB04/04_terraform_ssh.png b/terraform/docs/screenshots/LAB04/04_terraform_ssh.png new file mode 100644 index 0000000000..b4d62914a7 Binary files /dev/null and b/terraform/docs/screenshots/LAB04/04_terraform_ssh.png differ diff --git a/terraform/docs/screenshots/LAB04/05_pulumi_version.png b/terraform/docs/screenshots/LAB04/05_pulumi_version.png new file mode 100644 index 0000000000..91a5831aa6 Binary files /dev/null and b/terraform/docs/screenshots/LAB04/05_pulumi_version.png differ diff --git a/terraform/docs/screenshots/LAB04/06_pulumi_preview_command.png b/terraform/docs/screenshots/LAB04/06_pulumi_preview_command.png new file mode 100644 index 0000000000..3257e76066 Binary files /dev/null and b/terraform/docs/screenshots/LAB04/06_pulumi_preview_command.png differ diff --git a/terraform/docs/screenshots/LAB04/07_pulumi_up_command.png b/terraform/docs/screenshots/LAB04/07_pulumi_up_command.png new file mode 100644 index 0000000000..33fd2948cd Binary files /dev/null and b/terraform/docs/screenshots/LAB04/07_pulumi_up_command.png differ diff --git a/terraform/docs/screenshots/LAB04/08_pulumi_ssh.png b/terraform/docs/screenshots/LAB04/08_pulumi_ssh.png new file mode 100644 index 0000000000..fb673c5fa3 Binary files /dev/null and b/terraform/docs/screenshots/LAB04/08_pulumi_ssh.png differ diff --git a/terraform/docs/screenshots/LAB04/09_terraform_destroy_command.png b/terraform/docs/screenshots/LAB04/09_terraform_destroy_command.png new file mode 100644 index 0000000000..d891556290 Binary files /dev/null and b/terraform/docs/screenshots/LAB04/09_terraform_destroy_command.png differ diff --git a/terraform/docs/screenshots/LAB04/10_pulumi_destroy_command.png b/terraform/docs/screenshots/LAB04/10_pulumi_destroy_command.png new file mode 100644 index 0000000000..062fa873da Binary files /dev/null and b/terraform/docs/screenshots/LAB04/10_pulumi_destroy_command.png differ diff --git a/terraform/docs/screenshots/LAB04/11_failed_ci.png b/terraform/docs/screenshots/LAB04/11_failed_ci.png new file mode 100644 index 0000000000..a76aff4d96 Binary files /dev/null and b/terraform/docs/screenshots/LAB04/11_failed_ci.png differ diff --git a/terraform/docs/screenshots/LAB04/12_success_ci.png b/terraform/docs/screenshots/LAB04/12_success_ci.png new file mode 100644 index 0000000000..73e33521a2 Binary files /dev/null and b/terraform/docs/screenshots/LAB04/12_success_ci.png differ diff --git a/terraform/docs/screenshots/LAB04/13_terraform_import_command.png b/terraform/docs/screenshots/LAB04/13_terraform_import_command.png new file mode 100644 index 0000000000..1dbcd3acc2 Binary files /dev/null and b/terraform/docs/screenshots/LAB04/13_terraform_import_command.png differ diff --git a/terraform/docs/screenshots/LAB04/14_terraform_plan_command.png b/terraform/docs/screenshots/LAB04/14_terraform_plan_command.png new file mode 100644 index 0000000000..b650597840 Binary files /dev/null and b/terraform/docs/screenshots/LAB04/14_terraform_plan_command.png differ diff --git a/terraform/github/.gitignore b/terraform/github/.gitignore new file mode 100644 index 0000000000..59e7c20f13 --- /dev/null +++ b/terraform/github/.gitignore @@ -0,0 +1,5 @@ +.terraform/ +*.tfstate +*.tfstate.* +crash.log +*.tfvars \ No newline at end of file diff --git a/terraform/github/.terraform.lock.hcl b/terraform/github/.terraform.lock.hcl new file mode 100644 index 0000000000..5fa98c5aa7 --- /dev/null +++ b/terraform/github/.terraform.lock.hcl @@ -0,0 +1,10 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/integrations/github" { + version = "6.11.1" + constraints = "~> 6.0" + hashes = [ + "h1:Hqvebe3Zc19DxRCHHLIByBvxCm+WJqGyAyYCbJDuHGE=", + ] +} diff --git a/terraform/github/main.tf b/terraform/github/main.tf new file mode 100644 index 0000000000..dbf8f9ff75 --- /dev/null +++ b/terraform/github/main.tf @@ -0,0 +1,24 @@ +terraform { + required_version = ">= 1.5.0" + required_providers { + github = { + source = "integrations/github" + version = "~> 6.0" + } + } +} + +provider "github" { + token = var.github_token + owner = var.github_owner +} + +resource "github_repository" "course_repo" { + name = var.repo_name + description = "DevOps course lab assignments" + visibility = "public" + + has_issues = true + has_wiki = false + has_projects = false +} diff --git a/terraform/github/variables.tf b/terraform/github/variables.tf new file mode 100644 index 0000000000..a08fbe8d27 --- /dev/null +++ b/terraform/github/variables.tf @@ -0,0 +1,13 @@ +variable "github_token" { + type = string + sensitive = true +} + +variable "github_owner" { + type = string +} + +variable "repo_name" { + type = string + default = "DevOps-Core-Course" +} \ No newline at end of file diff --git a/terraform/main.tf b/terraform/main.tf new file mode 100644 index 0000000000..a0cefc5599 --- /dev/null +++ b/terraform/main.tf @@ -0,0 +1,112 @@ +# Provider init +terraform { + required_version = ">= 1.5.0" + + required_providers { + yandex = { + source = "yandex-cloud/yandex" + version = "~> 0.130" + } + } +} + +provider "yandex" {} + +# Actual Ubuntu по family +data "yandex_compute_image" "ubuntu" { + family = var.image_family +} + +# Network/VPC +resource "yandex_vpc_network" "net" { + name = "${var.prefix}-net" + labels = { + lab = "lab04" + } +} + +# Subnet +resource "yandex_vpc_subnet" "subnet" { + name = "${var.prefix}-subnet" + zone = var.zone + network_id = yandex_vpc_network.net.id + v4_cidr_blocks = [var.subnet_cidr] +} + +# Security Group / Firewall rules +resource "yandex_vpc_security_group" "sg" { + name = "${var.prefix}-sg" + network_id = yandex_vpc_network.net.id + + # SSH for personal ip only IP/32 + ingress { + protocol = "TCP" + description = "SSH from my IP" + port = 22 + v4_cidr_blocks = [var.ssh_allowed_cidr] + } + + # HTTP 80 + ingress { + protocol = "TCP" + description = "HTTP" + port = 80 + v4_cidr_blocks = ["0.0.0.0/0"] + } + + # Port 5000 + ingress { + protocol = "TCP" + description = "App port 5000" + port = 5000 + v4_cidr_blocks = ["0.0.0.0/0"] + } + + # Outgoing traffic + egress { + protocol = "ANY" + description = "Allow all egress" + v4_cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + to_port = 65535 + } + + labels = { + lab = "lab04" + } +} + +# VM / Compute Instance +resource "yandex_compute_instance" "vm" { + name = var.vm_name + platform_id = var.platform_id + zone = var.zone + + resources { + cores = var.cores + memory = var.memory_gb + core_fraction = var.core_fraction + } + + boot_disk { + initialize_params { + image_id = data.yandex_compute_image.ubuntu.id + size = var.disk_gb + type = var.disk_type + } + } + + network_interface { + subnet_id = yandex_vpc_subnet.subnet.id + nat = true + security_group_ids = [yandex_vpc_security_group.sg.id] + } + + metadata = { + ssh-keys = "${var.ssh_username}:${trimspace(file(var.ssh_public_key_path))}" + } + + labels = { + lab = "lab04" + } +} diff --git a/terraform/outputs.tf b/terraform/outputs.tf new file mode 100644 index 0000000000..efebcba083 --- /dev/null +++ b/terraform/outputs.tf @@ -0,0 +1,13 @@ +locals { + public_ip = yandex_compute_instance.vm.network_interface[0].nat_ip_address +} + +# Public IP of the virtual machine for remote access +output "public_ip" { + value = local.public_ip +} + +# SSH command for quick connection to VM +output "ssh_cmd" { + value = "ssh ${var.ssh_username}@${local.public_ip}" +} diff --git a/terraform/variables.tf b/terraform/variables.tf new file mode 100644 index 0000000000..c217f4e2c4 --- /dev/null +++ b/terraform/variables.tf @@ -0,0 +1,67 @@ +variable "prefix" { + type = string + default = "lab04" +} + +variable "zone" { + type = string + default = "ru-central1-a" +} + +variable "subnet_cidr" { + type = string + default = "10.10.0.0/24" +} + +variable "platform_id" { + type = string + default = "standard-v2" +} + +variable "cores" { + type = number + default = 2 +} + +variable "memory_gb" { + type = number + default = 1 +} + +variable "core_fraction" { + type = number + default = 20 +} + +variable "disk_gb" { + type = number + default = 10 +} + +variable "disk_type" { + type = string + default = "network-hdd" +} + +variable "image_family" { + type = string + default = "ubuntu-2204-lts" +} + +variable "vm_name" { + type = string + default = "lab04-vm" +} + +variable "ssh_username" { + type = string + default = "ubuntu" +} + +variable "ssh_public_key_path" { + type = string +} + +variable "ssh_allowed_cidr" { + type = string +}