diff --git a/.github/workflows/ansible-deploy.yml b/.github/workflows/ansible-deploy.yml new file mode 100644 index 0000000000..0b5bf9aecd --- /dev/null +++ b/.github/workflows/ansible-deploy.yml @@ -0,0 +1,61 @@ +name: Ansible Deployment + +on: + push: + branches: [main, master, lab06] + paths: + - 'ansible/**' + - '!ansible/docs/**' + - '.github/workflows/ansible-deploy.yml' + pull_request: + branches: [main, master, lab06] + paths: + - 'ansible/**' + +jobs: + lint: + name: Ansible Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install ansible-lint + run: pip install ansible ansible-lint + + - name: Run ansible-lint + run: ansible-lint playbooks/*.yml + working-directory: ansible + + deploy: + name: Deploy Application + needs: lint + runs-on: self-hosted + if: github.event_name == 'push' + steps: + - uses: actions/checkout@v4 + + - name: Install Ansible and dependencies + run: | + pip install --user ansible docker + echo "$HOME/.local/bin" >> $GITHUB_PATH + ansible-galaxy collection install community.docker + + - name: Deploy with Ansible + run: | + cd ansible + echo "${{ secrets.ANSIBLE_VAULT_PASSWORD }}" > /tmp/vault_pass + ansible-playbook playbooks/deploy.yml \ + --vault-password-file /tmp/vault_pass \ + -i inventory/hosts.ini \ + -e "ansible_host=localhost" + rm /tmp/vault_pass + + - name: Verify Deployment + run: | + sleep 10 + curl -f http://localhost:5000 || exit 1 + curl -f http://localhost:5000/health || exit 1 diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml new file mode 100644 index 0000000000..446a765531 --- /dev/null +++ b/.github/workflows/python-ci.yml @@ -0,0 +1,73 @@ +name: Python CI + +on: + push: + branches: [main, lab03] + pull_request: + branches: [main, lab03] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: "3.13" + + - name: Cache pip packages + id: pip-cache + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ hashFiles('app_python/requirements.txt') }} + restore-keys: | + ${{ runner.os }}-pip- + + - name: Install dependencies + run: | + echo "Cache hit: ${{ steps.pip-cache.outputs.cache-hit }}" + pip install -r app_python/requirements.txt + + - name: Lint + run: flake8 . + working-directory: app_python + + - name: Test + run: pytest tests/ -v + working-directory: app_python + + - name: Install Snyk CLI + run: npm install -g snyk + + - name: Run Snyk test + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + run: snyk test --file=requirements.txt --severity-threshold=high + working-directory: app_python + + docker: + needs: test + runs-on: ubuntu-latest + if: github.event_name == 'push' + steps: + - uses: actions/checkout@v4 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Generate version + run: echo "VERSION=$(date +%Y.%m.%d)" >> $GITHUB_ENV + + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: ./app_python + push: true + tags: | + roma3213/info_service:${{ env.VERSION }} + roma3213/info_service:latest diff --git a/.gitignore b/.gitignore index 30d74d2584..cdb4650bf2 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,25 @@ -test \ No newline at end of file +test + +# Terraform +*.tfstate +*.tfstate.* +.terraform/ +terraform.tfvars +*.tfvars +.terraform.lock.hcl + +# Pulumi +pulumi/venv/ +Pulumi.*.yaml + +# Credentials +*.pem +*.key +*-credentials.json +credentials + +# Ansible +*.retry +.vault_pass +ansible/inventory/*.pyc +__pycache__/ \ No newline at end of file diff --git a/README.md b/README.md index a66ee3dc20..e9a0a6161b 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ # DevOps Engineering: Core Practices +[![Ansible Deployment](https://github.com/TurikRoma/DevOps-Core-Course/actions/workflows/ansible-deploy.yml/badge.svg)](https://github.com/TurikRoma/DevOps-Core-Course/actions/workflows/ansible-deploy.yml) [![Labs](https://img.shields.io/badge/Labs-18-blue)](#labs) [![Exam](https://img.shields.io/badge/Exam-Optional-green)](#exam-alternative) [![Duration](https://img.shields.io/badge/Duration-18%20Weeks-lightgrey)](#course-roadmap) diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000000..0ddcbf1672 --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,11 @@ +[defaults] +inventory = inventory/hosts.ini +roles_path = roles +host_key_checking = False +remote_user = ubuntu +retry_files_enabled = False + +[privilege_escalation] +become = True +become_method = sudo +become_user = root diff --git a/ansible/docs/LAB05.md b/ansible/docs/LAB05.md new file mode 100644 index 0000000000..735bd38df7 --- /dev/null +++ b/ansible/docs/LAB05.md @@ -0,0 +1,161 @@ +# Lab 05 — Ansible Fundamentals + +## 1. Architecture Overview + +**Ansible version:** 2.16.3 + +**Target VM:** Ubuntu 22.04 LTS on GCP (e2-micro, us-central1-a) + +**Control node:** WSL Ubuntu on Windows (MINGW64) + +**Role structure:** + +``` +ansible/ +├── inventory/ +│ └── hosts.ini +├── roles/ +│ ├── common/ +│ │ ├── tasks/main.yml +│ │ └── defaults/main.yml +│ ├── docker/ +│ │ ├── tasks/main.yml +│ │ ├── handlers/main.yml +│ │ └── defaults/main.yml +│ └── app_deploy/ +│ ├── tasks/main.yml +│ ├── handlers/main.yml +│ └── defaults/main.yml +├── playbooks/ +│ ├── site.yml +│ ├── provision.yml +│ └── deploy.yml +├── group_vars/ +│ └── all.yml (encrypted) +└── ansible.cfg +``` + +**Why roles instead of monolithic playbooks?** Reusability — each role is independent and can be used across projects. Clear separation of concerns: common setup, Docker installation, and app deployment are logically separate tasks. + +--- + +## 2. Roles Documentation + +### common + +**Purpose:** Basic system setup — update apt cache, install essential packages. + +**Variables:** +- `common_packages` — list of packages (python3-pip, curl, git, vim, htop, wget, unzip) + +**Handlers:** None. + +**Dependencies:** None. + +### docker + +**Purpose:** Install Docker CE from official repository, configure service, add user to docker group. + +**Variables:** +- `docker_user` — user to add to docker group (default: ubuntu) +- `docker_version` — Docker version constraint (default: latest) + +**Handlers:** +- `restart docker` — restarts Docker service when configuration changes + +**Dependencies:** None (but should run after common). + +### app_deploy + +**Purpose:** Deploy containerized application — pull image from Docker Hub, run container, verify health. + +**Variables:** +- `app_port` — application port (default: 5000) +- `app_restart_policy` — container restart policy (default: unless-stopped) + +**Handlers:** +- `restart app container` — restarts application container + +**Dependencies:** docker role must be applied first. + +--- + +## 3. Idempotency Demonstration + +**First run** (`ansible-playbook playbooks/provision.yml`): + +![First provision run](screenshots/04-provision-first-run.png) + +Result: `ok=11, changed=1` — only apt cache update triggered "changed" (expected behavior, cache was stale). All other tasks already in desired state from previous runs. + +**Second run:** + +![Second provision run](screenshots/05-provision-second-run.png) + +Result: `ok=10, changed=0` — zero changes. All tasks report "ok" (green). Cache is still fresh (`cache_valid_time: 3600`). + +**Analysis:** Roles are idempotent because they use stateful Ansible modules (`apt` with `state: present`, `service` with `state: started`, `user` with `append: yes`). These modules check current state before making changes. Running playbooks multiple times is safe — only applies changes when state drifts from desired. + +--- + +## 4. Ansible Vault Usage + +**What is stored:** Docker Hub credentials (`dockerhub_username`, `dockerhub_password`), application configuration (`app_name`, `docker_image`, `app_port`). + +**Vault file:** `group_vars/all.yml` — encrypted with `ansible-vault encrypt`. + +**Password management:** `--ask-vault-pass` flag on each run. No `.vault_pass` file committed. + +**Encrypted file (proof):** + +![Vault encrypted](screenshots/09-vault-encrypted.png) + +**Why Vault is important:** Credentials must never be stored in plaintext in version control. Vault encrypts sensitive data with AES256, allowing safe commits while keeping secrets protected. + +--- + +## 5. Deployment Verification + +**Deploy run** (`ansible-playbook playbooks/deploy.yml --ask-vault-pass`): + +![Deploy output](screenshots/06-deploy-run.png) + +Result: `ok=9, changed=2` — pulled image and started container. Health check passed: `"status": "healthy"`. + +**Container status** (`ansible webservers -a "docker ps"`): + +![Docker ps](screenshots/07-docker-ps.png) + +``` +CONTAINER ID IMAGE STATUS PORTS NAMES +75ac7dc24d34 roma3213/info_service:latest Up 0.0.0.0:5000->5000/tcp info_service +``` + +**Health check verification:** + +![Curl output](screenshots/08-curl-health.png) + +- `curl http://104.197.249.40:5000/health` → `{"status":"healthy","timestamp":"2026-03-05T00:13:57"}` +- `curl http://104.197.249.40:5000/` → Full service info (name, version, system, runtime, endpoints) + +--- + +## 6. Key Decisions + +**Why use roles instead of plain playbooks?** Reusability, organization, maintainability. Use same role across projects, share via Ansible Galaxy. + +**How do roles improve reusability?** Self-contained with own tasks, handlers, defaults. Mix and match in different playbooks without duplication. + +**What makes a task idempotent?** Stateful modules: `apt: state=present`, `service: state=started`, `file: state=directory`. Check current state before acting. + +**How do handlers improve efficiency?** Only run when notified, execute once at end of play. Prevent unnecessary service restarts. + +**Why is Ansible Vault necessary?** Encrypts sensitive data (AES256) for safe storage in version control. `no_log: true` hides credentials from task output. + +--- + +## 7. Challenges + +- **WSL world writable directory:** Ansible ignores `ansible.cfg` in `/mnt/c/` (Windows filesystem). Fix: `export ANSIBLE_CONFIG=/mnt/c/DevOps-Core-Course/ansible/ansible.cfg` +- **Vault variables not loaded by deploy.yml:** `group_vars/all.yml` not picked up automatically. Fix: explicit `-e @group_vars/all.yml` flag +- **WSL terminal inconvenience:** vim opens incorrectly in WSL on Windows, `ansible-vault create` unusable. Fix: created file with `echo` and encrypted with `ansible-vault encrypt` diff --git a/ansible/docs/LAB06.md b/ansible/docs/LAB06.md new file mode 100644 index 0000000000..0d6ded2d8a --- /dev/null +++ b/ansible/docs/LAB06.md @@ -0,0 +1,294 @@ +# Lab 06 — Advanced Ansible & CI/CD + +## 1. Architecture Overview + +**Ansible version:** 2.16.3 + +**Target VM:** Ubuntu 22.04 LTS on GCP (e2-micro, us-central1-a) + +**Control node:** WSL Ubuntu on Windows (MINGW64) + +**CI/CD:** GitHub Actions (self-hosted runner on target VM) + +**Role structure:** + +``` +ansible/ +├── inventory/ +│ └── hosts.ini +├── roles/ +│ ├── common/ +│ │ ├── tasks/main.yml # blocks: packages, users +│ │ └── defaults/main.yml +│ ├── docker/ +│ │ ├── tasks/main.yml # blocks: docker_install, docker_config +│ │ ├── handlers/main.yml +│ │ └── defaults/main.yml +│ └── web_app/ # renamed from app_deploy +│ ├── tasks/main.yml # wipe + compose deploy +│ ├── tasks/wipe.yml # wipe logic (variable + tag gate) +│ ├── templates/docker-compose.yml.j2 +│ ├── handlers/main.yml +│ ├── defaults/main.yml +│ └── meta/main.yml # dependency: docker +├── playbooks/ +│ ├── provision.yml +│ └── deploy.yml +├── group_vars/ +│ └── all.yml (encrypted) +└── ansible.cfg +``` + +--- + +## 2. Blocks & Tags (Task 1) + +### common role + +**Blocks:** + +- `packages` — apt update + install, rescue (apt update retry), always (touch `/tmp/common_role_complete`) +- `users` — create application user + +**Tags:** `packages`, `users`, `common` (role-level) + +### docker role + +**Blocks:** + +- `docker_install` — prerequisites, GPG key, repo, install. Rescue: wait 10s + retry. Always: ensure docker service enabled +- `docker_config` — add user to docker group, install python3-docker + +**Tags:** `docker_install`, `docker_config`, `docker` (role-level) + +### Tag listing + +```bash +ansible-playbook playbooks/provision.yml --list-tags +``` + +![List tags](screenshots/lab06/01-task1-list-tags.png) + +TASK TAGS: `[common, docker, docker_config, docker_install, packages, users]` + +### Selective execution + +**Run only docker tasks:** + +![Tags docker](screenshots/lab06/02-task1-tags-docker.png) + +**Skip common role:** + +![Skip common](screenshots/lab06/03-task1-skip-tags-common.png) + +Both runs: `ok=8, changed=0` — only docker tasks executed, common skipped. + +### Research answers + +**Q: What happens if rescue block also fails?** Playbook fails entirely. Rescue is last chance — no nested rescue. + +**Q: Can you have nested blocks?** Yes, blocks can contain other blocks. + +**Q: How do tags inherit to tasks within blocks?** Tags on a block apply to all tasks inside it (block, rescue, always). + +--- + +## 3. Docker Compose Migration (Task 2) + +### Role rename + +`app_deploy` → `web_app`. More descriptive, aligns with wipe logic naming (`web_app_wipe`). + +### Docker Compose template + +`roles/web_app/templates/docker-compose.yml.j2`: + +```yaml +services: + {{ web_app_name }}: + image: {{ web_app_image }}:{{ web_app_tag }} + container_name: {{ web_app_name }} + ports: + - "{{ web_app_port }}:{{ web_app_internal_port }}" + restart: unless-stopped +``` + +**Variables (defaults/main.yml):** + +- `web_app_name: devops-app` +- `web_app_image: roma3213/info_service` +- `web_app_tag: latest` +- `web_app_port: 5000` +- `web_app_internal_port: 5000` +- `web_app_compose_project_dir: /opt/{{ web_app_name }}` + +### Role dependencies + +`meta/main.yml` declares dependency on `docker` role. Running `deploy.yml` automatically provisions Docker first. + +### Deployment + +**First deploy:** + +![Deploy success](screenshots/lab06/04-task2-deploy-success.png) + +Result: `ok=12, changed=1` — compose file templated, container deployed. + +**Idempotency (second run):** + +![Idempotency](screenshots/lab06/05-task2-idempotency.png) + +Result: `ok=12, changed=1` — only `docker_compose_v2` shows changed due to `pull: always`. + +**Application response:** + +![Curl app](screenshots/lab06/06-task2-curl-app.png) + +`curl http://localhost:5000` returns full service info JSON. + +**Templated file on VM:** + +![Compose file](screenshots/lab06/07-task2-compose-file.png) + +### Research answers + +**Q: `restart: always` vs `restart: unless-stopped`?** Both restart on crash. Difference: `unless-stopped` won't auto-start after manual `docker stop` + daemon restart. `always` will. + +**Q: Docker Compose networks vs Docker bridge?** Default bridge: containers see each other by IP only. Compose creates project-scoped bridge with DNS — containers resolve by service name. Isolation between projects. + +**Q: Can you reference Vault variables in templates?** Yes. Ansible decrypts vault before Jinja2 rendering. Variables appear as plaintext in the rendered file on target — manage file permissions accordingly. + +--- + +## 4. Wipe Logic (Task 3) + +### Implementation + +**Double safety mechanism:** + +1. **Variable gate:** `web_app_wipe: false` (default) +2. **Tag gate:** `--tags web_app_wipe` must be specified + +Both must be true for wipe to execute. + +**File:** `roles/web_app/tasks/wipe.yml` — stops containers (`docker compose down`), removes compose file, removes app directory. + +**Included at top of `main.yml`** (before deploy) to support clean reinstall: wipe → deploy. + +### Test scenarios + +**Scenario 1 — Normal deploy (wipe skipped):** + +![Normal deploy](screenshots/lab06/08-task3-normal-deploy-wipe-skipped.png) + +`skipped=4` — all wipe tasks skipped (variable is false). + +**Scenario 2 — Wipe only:** + +![Wipe only](screenshots/lab06/09-task3-wipe-only.png) + +```bash +ansible-playbook deploy.yml -e "web_app_wipe=true" --tags web_app_wipe +``` + +![Verify empty](screenshots/lab06/10-task3-wipe-verify-empty.png) + +`docker ps` — empty. `/opt` — no app directory. + +**Scenario 3 — Clean reinstall (wipe → deploy):** + +![Clean reinstall](screenshots/lab06/11-task3-clean-reinstall.png) + +```bash +ansible-playbook deploy.yml -e "web_app_wipe=true" +``` + +Wipe runs first, then deploy. App running after: + +![Docker ps](screenshots/lab06/12-task3-reinstall-docker-ps.png) + +**Scenario 4a — Tag specified, variable false (blocked):** + +![Wipe blocked](screenshots/lab06/13-task3-tag-only-wipe-blocked.png) + +`skipped=4` — `when: web_app_wipe | bool` blocks execution even with tag. + +### Research answers + +**Q: Why use both variable AND tag?** Double safety. Variable alone could accidentally trigger on normal runs if set. Tag alone might run if someone uses `--tags all`. Both together = explicit intent required. + +**Q: Difference from `never` tag?** `never` tag is ansible built-in — tasks with it never run unless explicitly tagged. Our approach is more flexible: allows clean reinstall scenario (wipe + deploy in one run) which `never` tag would prevent. + +**Q: Why wipe before deployment in main.yml?** Enables clean reinstall: old app removed, then fresh deploy. Logical flow: remove old → install new. + +**Q: When clean reinstall vs rolling update?** Clean reinstall for major version changes, config structure changes, or debugging. Rolling update for minor updates where state can be preserved. + +**Q: How to extend wipe to images and volumes?** Add `docker image prune -f` and `docker volume rm` tasks to wipe.yml. + +--- + +## 5. CI/CD with GitHub Actions (Task 4) + +### Workflow architecture + +``` +Push to ansible/** → Lint (ubuntu-latest) → Deploy (self-hosted on VM) +``` + +**File:** `.github/workflows/ansible-deploy.yml` + +**Jobs:** + +1. `lint` — `ansible-lint playbooks/*.yml` on GitHub-hosted runner +2. `deploy` — `ansible-playbook deploy.yml` on self-hosted runner (target VM) + +### Self-hosted runner + +Installed on target VM (`104.197.249.40`). Runner executes ansible locally, connects to localhost via SSH. + +### Secrets + +- `ANSIBLE_VAULT_PASSWORD` — vault decryption password + +### Path filters + +```yaml +paths: + - "ansible/**" + - "!ansible/docs/**" + - ".github/workflows/ansible-deploy.yml" +``` + +Docs changes don't trigger deployment. + +### Successful run + +![Workflow success](screenshots/lab06/15-task4-workflow-success.png) + +Ansible Lint (47s) + Deploy Application (6m 43s). Status: **Success**. + +### Status badge + +Added to `README.md`: + +```markdown +[![Ansible Deployment](https://github.com/TurikRoma/DevOps-Core-Course/actions/workflows/ansible-deploy.yml/badge.svg)] +``` + +### Research answers + +**Q: Security implications of SSH keys in GitHub Secrets?** Secrets are encrypted, only exposed during workflow runs. Risk: compromised repo = compromised VM access. Mitigation: use deploy keys with limited scope, rotate regularly. Self-hosted runner avoids this — no SSH key needed. + +**Q: Staging → production pipeline?** Add environments in GitHub: staging deploys on push, production requires manual approval. Separate inventory files per environment. + +**Q: Rollbacks?** Deploy previous image tag: `-e "web_app_tag=previous_version"`. Or git revert + CI/CD auto-deploys. + +**Q: Self-hosted vs GitHub-hosted security?** Self-hosted: no secrets leave your infrastructure, faster, but runner must be secured. GitHub-hosted: secrets transmitted to external runner, but ephemeral (destroyed after run). + +--- + +## 6. Challenges + +- **Self-hosted runner on e2-micro — slow deploys:** First CI/CD run took 17+ minutes — `apt-get update` and package installation very slow on 1GB RAM VM. Subsequent runs faster since packages already cached. +- **Self-hosted runner SSH to localhost:** Runner runs on the same VM it deploys to. Ansible connects via SSH — needed to generate SSH key and add to `authorized_keys` for self-connection: `ssh-keygen` + `cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys`. Workflow passes `-e "ansible_host=localhost"` so inventory stays unchanged for WSL. +- **ansible-lint 54 errors:** FQCN (`apt` → `ansible.builtin.apt`), truthy (`yes/no` → `true/false`), key ordering, variable prefix (`app_name` → `web_app_name`), `ignore_errors` → `failed_when: false`, empty `site.yml`. All fixed in one pass. diff --git a/ansible/docs/screenshots/01-ansible-version.png b/ansible/docs/screenshots/01-ansible-version.png new file mode 100644 index 0000000000..4e44ae83e9 Binary files /dev/null and b/ansible/docs/screenshots/01-ansible-version.png differ diff --git a/ansible/docs/screenshots/02-ping-test.png b/ansible/docs/screenshots/02-ping-test.png new file mode 100644 index 0000000000..cd6646248c Binary files /dev/null and b/ansible/docs/screenshots/02-ping-test.png differ diff --git a/ansible/docs/screenshots/03-uname-test.png b/ansible/docs/screenshots/03-uname-test.png new file mode 100644 index 0000000000..503cd41c49 Binary files /dev/null and b/ansible/docs/screenshots/03-uname-test.png differ diff --git a/ansible/docs/screenshots/04-provision-first-run.png b/ansible/docs/screenshots/04-provision-first-run.png new file mode 100644 index 0000000000..ad9a7eaa57 Binary files /dev/null and b/ansible/docs/screenshots/04-provision-first-run.png differ diff --git a/ansible/docs/screenshots/05-provision-second-run.png b/ansible/docs/screenshots/05-provision-second-run.png new file mode 100644 index 0000000000..60ad4bf8dd Binary files /dev/null and b/ansible/docs/screenshots/05-provision-second-run.png differ diff --git a/ansible/docs/screenshots/06-deploy-run.png b/ansible/docs/screenshots/06-deploy-run.png new file mode 100644 index 0000000000..fbfefd0639 Binary files /dev/null and b/ansible/docs/screenshots/06-deploy-run.png differ diff --git a/ansible/docs/screenshots/07-docker-ps.png b/ansible/docs/screenshots/07-docker-ps.png new file mode 100644 index 0000000000..918df77b50 Binary files /dev/null and b/ansible/docs/screenshots/07-docker-ps.png differ diff --git a/ansible/docs/screenshots/08-curl-health.png b/ansible/docs/screenshots/08-curl-health.png new file mode 100644 index 0000000000..9b03a20db0 Binary files /dev/null and b/ansible/docs/screenshots/08-curl-health.png differ diff --git a/ansible/docs/screenshots/09-vault-encrypted.png b/ansible/docs/screenshots/09-vault-encrypted.png new file mode 100644 index 0000000000..1f3f443006 Binary files /dev/null and b/ansible/docs/screenshots/09-vault-encrypted.png differ diff --git a/ansible/docs/screenshots/lab06/01-task1-list-tags.png b/ansible/docs/screenshots/lab06/01-task1-list-tags.png new file mode 100644 index 0000000000..8bb67c573d Binary files /dev/null and b/ansible/docs/screenshots/lab06/01-task1-list-tags.png differ diff --git a/ansible/docs/screenshots/lab06/02-task1-tags-docker.png b/ansible/docs/screenshots/lab06/02-task1-tags-docker.png new file mode 100644 index 0000000000..6d145db756 Binary files /dev/null and b/ansible/docs/screenshots/lab06/02-task1-tags-docker.png differ diff --git a/ansible/docs/screenshots/lab06/03-task1-skip-tags-common.png b/ansible/docs/screenshots/lab06/03-task1-skip-tags-common.png new file mode 100644 index 0000000000..56f657f477 Binary files /dev/null and b/ansible/docs/screenshots/lab06/03-task1-skip-tags-common.png differ diff --git a/ansible/docs/screenshots/lab06/04-task2-deploy-success.png b/ansible/docs/screenshots/lab06/04-task2-deploy-success.png new file mode 100644 index 0000000000..4e058a3f6a Binary files /dev/null and b/ansible/docs/screenshots/lab06/04-task2-deploy-success.png differ diff --git a/ansible/docs/screenshots/lab06/05-task2-idempotency.png b/ansible/docs/screenshots/lab06/05-task2-idempotency.png new file mode 100644 index 0000000000..018140f7e9 Binary files /dev/null and b/ansible/docs/screenshots/lab06/05-task2-idempotency.png differ diff --git a/ansible/docs/screenshots/lab06/06-task2-curl-app.png b/ansible/docs/screenshots/lab06/06-task2-curl-app.png new file mode 100644 index 0000000000..dd6df01bf2 Binary files /dev/null and b/ansible/docs/screenshots/lab06/06-task2-curl-app.png differ diff --git a/ansible/docs/screenshots/lab06/07-task2-compose-file.png b/ansible/docs/screenshots/lab06/07-task2-compose-file.png new file mode 100644 index 0000000000..0aab2ccc76 Binary files /dev/null and b/ansible/docs/screenshots/lab06/07-task2-compose-file.png differ diff --git a/ansible/docs/screenshots/lab06/08-task3-normal-deploy-wipe-skipped.png b/ansible/docs/screenshots/lab06/08-task3-normal-deploy-wipe-skipped.png new file mode 100644 index 0000000000..e0bf9f8c34 Binary files /dev/null and b/ansible/docs/screenshots/lab06/08-task3-normal-deploy-wipe-skipped.png differ diff --git a/ansible/docs/screenshots/lab06/09-task3-wipe-only.png b/ansible/docs/screenshots/lab06/09-task3-wipe-only.png new file mode 100644 index 0000000000..7d7ae66b3a Binary files /dev/null and b/ansible/docs/screenshots/lab06/09-task3-wipe-only.png differ diff --git a/ansible/docs/screenshots/lab06/10-task3-wipe-verify-empty.png b/ansible/docs/screenshots/lab06/10-task3-wipe-verify-empty.png new file mode 100644 index 0000000000..13d01d536a Binary files /dev/null and b/ansible/docs/screenshots/lab06/10-task3-wipe-verify-empty.png differ diff --git a/ansible/docs/screenshots/lab06/11-task3-clean-reinstall.png b/ansible/docs/screenshots/lab06/11-task3-clean-reinstall.png new file mode 100644 index 0000000000..bcbc0736fd Binary files /dev/null and b/ansible/docs/screenshots/lab06/11-task3-clean-reinstall.png differ diff --git a/ansible/docs/screenshots/lab06/12-task3-reinstall-docker-ps.png b/ansible/docs/screenshots/lab06/12-task3-reinstall-docker-ps.png new file mode 100644 index 0000000000..8b56f4f64c Binary files /dev/null and b/ansible/docs/screenshots/lab06/12-task3-reinstall-docker-ps.png differ diff --git a/ansible/docs/screenshots/lab06/13-task3-tag-only-wipe-blocked.png b/ansible/docs/screenshots/lab06/13-task3-tag-only-wipe-blocked.png new file mode 100644 index 0000000000..e455984388 Binary files /dev/null and b/ansible/docs/screenshots/lab06/13-task3-tag-only-wipe-blocked.png differ diff --git a/ansible/docs/screenshots/lab06/14-task3-wipe-only-repeat.png b/ansible/docs/screenshots/lab06/14-task3-wipe-only-repeat.png new file mode 100644 index 0000000000..1a937a49a4 Binary files /dev/null and b/ansible/docs/screenshots/lab06/14-task3-wipe-only-repeat.png differ diff --git a/ansible/docs/screenshots/lab06/15-task4-workflow-success.png b/ansible/docs/screenshots/lab06/15-task4-workflow-success.png new file mode 100644 index 0000000000..f021aed97d Binary files /dev/null and b/ansible/docs/screenshots/lab06/15-task4-workflow-success.png differ diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml new file mode 100644 index 0000000000..666e34b53f --- /dev/null +++ b/ansible/group_vars/all.yml @@ -0,0 +1,18 @@ +$ANSIBLE_VAULT;1.1;AES256 +63623637306130363633643234646638393262633734366538373332363037346661633566346232 +3330333237613732636334373332646565323035656238350a363064353562616334653863626533 +63333537376231356666383231333136363662326139643132616332626666363030396135613966 +6534626466396538630a373635643361356165326638376435666230633339323833666231396237 +35353966653833393866643834613831626130383332323233396564356634363439613062356561 +62326564663637363262616664383062386366656630326566386330636535656139623337643664 +64613038313864333638356235633636313837643664666364383465626466633862383030643532 +31626437626430353066343034346138353439666331623639393263306234653036376431613632 +31366662363436626466376132366561316166313237616664316665303364316562313537313163 +64643139396134303166393930373037376362363562356362396232336538343434373538366464 +35323162613030373264626664653433326338356136643338653635323239626439623534663830 +39636262653133636330333464333736353637373936646230373435646237343636653337643162 +33393935366630663765386134303132353764323630363031323337323063303338653837336437 +64376539303964636530376165653162613230626234633965653834616439386335646466373933 +33343034623462343065336161336634626664386634653836383138616262333932366462356536 +30623463373233313365623338363034343561393934656339643437343038636435623336303231 +6164 diff --git a/ansible/group_vars/all.yml~ b/ansible/group_vars/all.yml~ new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ansible/inventory/hosts.ini b/ansible/inventory/hosts.ini new file mode 100644 index 0000000000..901d591319 --- /dev/null +++ b/ansible/inventory/hosts.ini @@ -0,0 +1,2 @@ +[webservers] +lab04-vm ansible_host=104.197.249.40 ansible_user=ubuntu diff --git a/ansible/playbooks/deploy.yml b/ansible/playbooks/deploy.yml new file mode 100644 index 0000000000..95174b9e0e --- /dev/null +++ b/ansible/playbooks/deploy.yml @@ -0,0 +1,7 @@ +--- +- name: Deploy application + hosts: webservers + become: true + + roles: + - web_app diff --git a/ansible/playbooks/provision.yml b/ansible/playbooks/provision.yml new file mode 100644 index 0000000000..362e19a8b2 --- /dev/null +++ b/ansible/playbooks/provision.yml @@ -0,0 +1,10 @@ +--- +- name: Provision web servers + hosts: webservers + become: true + + roles: + - role: common + tags: [common] + - role: docker + tags: [docker] diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml new file mode 100644 index 0000000000..734d2a51c5 --- /dev/null +++ b/ansible/roles/common/defaults/main.yml @@ -0,0 +1,8 @@ +common_packages: + - python3-pip + - curl + - git + - vim + - htop + - wget + - unzip diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml new file mode 100644 index 0000000000..4843af2385 --- /dev/null +++ b/ansible/roles/common/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- name: Install common packages + become: true + tags: + - packages + block: + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + + - name: Install packages + ansible.builtin.apt: + name: "{{ common_packages }}" + state: present + + rescue: + - name: Fix apt cache and retry + ansible.builtin.apt: + update_cache: true + + - name: Retry package installation + ansible.builtin.apt: + name: "{{ common_packages }}" + state: present + + always: + - name: Log completion + ansible.builtin.file: + path: /tmp/common_role_complete + state: touch + mode: "0644" + +- name: Create application user + become: true + tags: + - users + block: + - name: Ensure app user exists + ansible.builtin.user: + name: "{{ app_user | default('appuser') }}" + shell: /bin/bash + create_home: true diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml new file mode 100644 index 0000000000..5bf13c3441 --- /dev/null +++ b/ansible/roles/docker/defaults/main.yml @@ -0,0 +1,3 @@ +--- +docker_user: ubuntu +docker_version: latest diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml new file mode 100644 index 0000000000..07aa0eb290 --- /dev/null +++ b/ansible/roles/docker/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Restart docker + ansible.builtin.service: + name: docker + state: restarted diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml new file mode 100644 index 0000000000..3e6c04b8bd --- /dev/null +++ b/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,73 @@ +--- +- name: Install Docker + become: true + tags: + - docker_install + block: + - name: Install Docker prerequisites + ansible.builtin.apt: + name: + - apt-transport-https + - ca-certificates + - curl + state: present + + - name: Add Docker GPG key + ansible.builtin.apt_key: + url: https://download.docker.com/linux/ubuntu/gpg + state: present + + - name: Add Docker repository + ansible.builtin.apt_repository: + repo: "deb https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable" + state: present + + - name: Install Docker packages + ansible.builtin.apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + state: present + update_cache: true + notify: Restart docker + + rescue: + - name: Wait before retry + ansible.builtin.pause: + seconds: 10 + + - name: Retry apt update + ansible.builtin.apt: + update_cache: true + + - name: Retry Docker installation + ansible.builtin.apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + state: present + + always: + - name: Ensure Docker service is enabled + ansible.builtin.service: + name: docker + state: started + enabled: true + +- name: Configure Docker + become: true + tags: + - docker_config + block: + - name: Add user to docker group + ansible.builtin.user: + name: "{{ docker_user }}" + groups: docker + append: true + + - name: Install python3-docker + ansible.builtin.apt: + name: python3-docker + state: present diff --git a/ansible/roles/web_app/defaults/main.yml b/ansible/roles/web_app/defaults/main.yml new file mode 100644 index 0000000000..88563fd374 --- /dev/null +++ b/ansible/roles/web_app/defaults/main.yml @@ -0,0 +1,8 @@ +--- +web_app_name: devops-app +web_app_image: roma3213/info_service +web_app_tag: latest +web_app_port: 5000 +web_app_internal_port: 5000 +web_app_compose_project_dir: "/opt/{{ web_app_name }}" +web_app_wipe: false diff --git a/ansible/roles/web_app/handlers/main.yml b/ansible/roles/web_app/handlers/main.yml new file mode 100644 index 0000000000..7351fc0210 --- /dev/null +++ b/ansible/roles/web_app/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Restart app container + community.docker.docker_compose_v2: + project_src: "{{ web_app_compose_project_dir }}" + state: restarted diff --git a/ansible/roles/web_app/meta/main.yml b/ansible/roles/web_app/meta/main.yml new file mode 100644 index 0000000000..cb7d8e0460 --- /dev/null +++ b/ansible/roles/web_app/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: docker diff --git a/ansible/roles/web_app/tasks/main.yml b/ansible/roles/web_app/tasks/main.yml new file mode 100644 index 0000000000..c1a8be6d3d --- /dev/null +++ b/ansible/roles/web_app/tasks/main.yml @@ -0,0 +1,41 @@ +--- +- name: Include wipe tasks + ansible.builtin.include_tasks: wipe.yml + tags: + - web_app_wipe + +- name: Deploy application with Docker Compose + become: true + tags: + - app_deploy + - compose + block: + - name: Create app directory + ansible.builtin.file: + path: "{{ web_app_compose_project_dir }}" + state: directory + mode: "0755" + + - name: Template docker-compose file + ansible.builtin.template: + src: docker-compose.yml.j2 + dest: "{{ web_app_compose_project_dir }}/docker-compose.yml" + mode: "0644" + + - name: Deploy with Docker Compose + community.docker.docker_compose_v2: + project_src: "{{ web_app_compose_project_dir }}" + state: present + pull: always + + - name: Verify application is running + ansible.builtin.uri: + url: "http://localhost:{{ web_app_port }}/health" + status_code: 200 + retries: 5 + delay: 10 + + rescue: + - name: Log deployment failure + ansible.builtin.debug: + msg: "Deployment of {{ web_app_name }} failed!" diff --git a/ansible/roles/web_app/tasks/wipe.yml b/ansible/roles/web_app/tasks/wipe.yml new file mode 100644 index 0000000000..0d5af16824 --- /dev/null +++ b/ansible/roles/web_app/tasks/wipe.yml @@ -0,0 +1,26 @@ +--- +- name: Wipe web application + when: web_app_wipe | bool + become: true + tags: + - web_app_wipe + block: + - name: Stop and remove containers + community.docker.docker_compose_v2: + project_src: "{{ web_app_compose_project_dir }}" + state: absent + failed_when: false + + - name: Remove docker-compose file + ansible.builtin.file: + path: "{{ web_app_compose_project_dir }}/docker-compose.yml" + state: absent + + - name: Remove application directory + ansible.builtin.file: + path: "{{ web_app_compose_project_dir }}" + state: absent + + - name: Log wipe completion + ansible.builtin.debug: + msg: "Application {{ web_app_name }} wiped successfully" diff --git a/ansible/roles/web_app/templates/docker-compose.yml.j2 b/ansible/roles/web_app/templates/docker-compose.yml.j2 new file mode 100644 index 0000000000..8be5a59ef7 --- /dev/null +++ b/ansible/roles/web_app/templates/docker-compose.yml.j2 @@ -0,0 +1,7 @@ +services: + {{ web_app_name }}: + image: {{ web_app_image }}:{{ web_app_tag }} + container_name: {{ web_app_name }} + ports: + - "{{ web_app_port }}:{{ web_app_internal_port }}" + restart: unless-stopped diff --git a/app_python/.dockerignore b/app_python/.dockerignore new file mode 100644 index 0000000000..44fa25304b --- /dev/null +++ b/app_python/.dockerignore @@ -0,0 +1,22 @@ +# 🐙 Version control +.git +.gitignore + +# 🐍 Python +__pycache__ +*.pyc +*.pyo +venv/ +.venv/ + +# 🔐 Secrets (NEVER include!) +.env +*.pem +secrets/ + +# 📝 Documentation +*.md +docs/ + +# 🧪 Tests (if not needed in container) +tests/ \ No newline at end of file diff --git a/app_python/.gitignore b/app_python/.gitignore new file mode 100644 index 0000000000..4de420a8f7 --- /dev/null +++ b/app_python/.gitignore @@ -0,0 +1,12 @@ +# Python +__pycache__/ +*.py[cod] +venv/ +*.log + +# IDE +.vscode/ +.idea/ + +# OS +.DS_Store \ No newline at end of file diff --git a/app_python/Dockerfile b/app_python/Dockerfile new file mode 100644 index 0000000000..60e104ebef --- /dev/null +++ b/app_python/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.13-slim + +WORKDIR /app + +RUN useradd --create-home --shell /bin/bash appuser + +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +COPY . . + +USER appuser + +EXPOSE 5000 + +CMD ["python", "app.py"] \ No newline at end of file diff --git a/app_python/README.md b/app_python/README.md new file mode 100644 index 0000000000..acf5db6d04 --- /dev/null +++ b/app_python/README.md @@ -0,0 +1,113 @@ +# DevOps Info Service + +[![Python CI](https://github.com/TurikRoma/DevOps-Core-Course/actions/workflows/python-ci.yml/badge.svg)](https://github.com/TurikRoma/DevOps-Core-Course/actions) + +## Overview + +Web service that reports system information and health status. Provides API endpoints for service info, hostname, platform, uptime, and request details. + +## Prerequisites + +- Python 3.11+ +- pip + +## Installation + +```bash +python -m venv venv +venv\Scripts\activate +pip install -r requirements.txt +``` + +## Testing + +```bash +pytest tests/ -v +``` + +Run from `app_python/`. Dependencies in `requirements.txt`. + +## Running + +```bash +python app.py +``` + +Default: `http://0.0.0.0:5000` + +**Custom config:** + +```bash +PORT=8080 python app.py +HOST=127.0.0.1 PORT=3000 python app.py +``` + +## API Endpoints + +### `GET /` + +Returns service and system information. + +```bash +curl http://localhost:5000/ +``` + +### `GET /health` + +Health check endpoint. + +```bash +curl http://localhost:5000/health +``` + +## Configuration + +| Variable | Default | Description | +| -------- | --------- | ------------ | +| `HOST` | `0.0.0.0` | Host address | +| `PORT` | `5000` | Port number | +| `DEBUG` | `False` | Debug mode | + +## Docker + +### Build the image + +```bash +docker build -t roma3213/info_service:1.0 . +``` + +### Run a container + +```bash +docker run -p 5000:5000 roma3213/info_service:1.0 +``` + +With custom port: + +```bash +docker run -p 5000:5000 roma3213/info_service:1.0 +``` + +### Pull from Docker Hub + +```bash +docker pull roma3213/info_service:1.0 +docker run -p 5000:5000 roma3213/info_service:1.0 +``` + +## Project Structure + +``` +app_python/ +├── app.py # Main app +├── config.py # Config +├── routes/ # API routes +├── services/ # Business logic +├── tests/ +├── docs/ # Lab docs, screenshots +├── requirements.txt +├── Dockerfile # Container image +├── .dockerignore +├── .gitignore +└── README.md +``` diff --git a/app_python/app.py b/app_python/app.py new file mode 100644 index 0000000000..f480eca123 --- /dev/null +++ b/app_python/app.py @@ -0,0 +1,42 @@ +import logging +from fastapi import FastAPI, Request, HTTPException +from fastapi.responses import JSONResponse +from routes.system_info import router as system_info_router +from config import HOST, PORT +import uvicorn + +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +logger = logging.getLogger(__name__) + +app = FastAPI() + +app.include_router(system_info_router) + + +@app.exception_handler(404) +async def not_found(request: Request, exc: HTTPException): + return JSONResponse( + status_code=404, + content={ + "error": "Not Found", + "message": "Endpoint does not exist" + } + ) + + +@app.exception_handler(Exception) +async def internal_error(request: Request, exc: Exception): + return JSONResponse( + status_code=500, + content={ + "error": "Internal Server Error", + "message": "An unexpected error occurred" + } + ) + +if __name__ == "__main__": + logger.info(f"Application started on {HOST}:{PORT}") + uvicorn.run(app, host=HOST, port=PORT) diff --git a/app_python/config.py b/app_python/config.py new file mode 100644 index 0000000000..a163457e64 --- /dev/null +++ b/app_python/config.py @@ -0,0 +1,5 @@ +import os + +HOST = os.getenv('HOST', '0.0.0.0') +PORT = int(os.getenv('PORT', 5000)) +DEBUG = os.getenv('DEBUG', 'False').lower() == 'true' diff --git a/app_python/docs/LAB01.md b/app_python/docs/LAB01.md new file mode 100644 index 0000000000..62bf8c4e7a --- /dev/null +++ b/app_python/docs/LAB01.md @@ -0,0 +1,67 @@ +# Lab 1 Submission + +## Framework Selection + +**FastAPI** - Modern, fast, has auto-docs and async support. + +| Criteria | FastAPI | Flask | Django | +| --------- | --------- | ---------- | ------ | +| Speed | Very Fast | Fast | Medium | +| Auto Docs | Yes | No | No | +| Async | Yes | Limited | Yes | +| Size | Small | Very Small | Large | + +**Why not Flask?** Flask is simpler but FastAPI has better async and auto-docs. + +**Why not Django?** Too big for this simple service. + +## Best Practices Applied + +1. **Code Organization** - Separated into routes/, services/, config.py +2. **Error Handling** - 404 and 500 handlers +3. **Logging** - Basic logging setup +4. **Environment Variables** - Config via env vars +5. **Clear Function Names** - get_system_info(), get_uptime(), etc. + +## API Documentation + +### `GET /` + +Returns service info, system info, runtime, request details, and endpoints list. + +```bash +curl http://localhost:5000/ +``` + +### `GET /health` + +Returns health status and uptime. + +```bash +curl http://localhost:5000/health +``` + +### Error Responses + +```bash +curl http://localhost:5000/something +# {"error": "Not Found", "message": "Endpoint does not exist"} +``` + +## Testing Evidence + +Screenshots in `docs/screenshots/`: + +- `01-main-endpoint.png` +- `02-health-check.png` +- `03-formatted-output.png` + +## Challenges & Solutions + +1. **Function name conflict** — Named the route handler `get_system_info()` which conflicted with the imported function from `services.system_info`. When calling `get_system_info()` inside the route handler, Python was calling the route handler itself instead of the imported function, causing recursion errors. Fixed by importing the entire module as `import services.system_info as system_info_service` and accessing functions via `system_info_service.get_system_info()`. + +2. **Timezone method call error** — Used `timezone.utc.tzname()` without arguments, but `tzname()` method requires a datetime object as parameter. This caused `TypeError: timezone.tzname() takes exactly one argument (0 given)`. Fixed by calling `tzname()` on a datetime object: `datetime.now(timezone.utc).tzname()`. + +## GitHub Community + +\*Starring repositories helps with discovery and bookmarking — it signals project quality to the community and encourages maintainers. Following developers builds professional connections and keeps you informed about relevant projects and industry trends. diff --git a/app_python/docs/LAB02.md b/app_python/docs/LAB02.md new file mode 100644 index 0000000000..1af6c7542f --- /dev/null +++ b/app_python/docs/LAB02.md @@ -0,0 +1,177 @@ +# Lab 02 — Docker Containerization + +## Docker Best Practices Applied + +### 1. Non-root user + +```dockerfile +RUN useradd --create-home --shell /bin/bash appuser +USER appuser +``` + +**Why:** Running as root means container escape = root on host. Non-root user limits damage if compromised. + +### 2. Specific base image version + +```dockerfile +FROM python:3.13-slim +``` + +**Why:** `python:latest` can change unexpectedly. Pinning `3.13-slim` ensures reproducible builds. Slim is ~150 MB vs ~1 GB for full image. + +### 3. Layer ordering (dependencies before code) + +```dockerfile +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt +COPY . . +``` + +**Why:** Dependencies change rarely, code changes often. This order caches dependency layer, making rebuilds fast (seconds vs minutes). + +### 4. .dockerignore + +**Why:** Excludes `venv/`, `.git/`, docs from build context. Faster builds, prevents secrets from entering image. + +### 5. --no-cache-dir for pip + +```dockerfile +RUN pip install --no-cache-dir -r requirements.txt +``` + +**Why:** No reinstalls in Docker, cache just wastes space. Reduces image size. + +--- + +## Image Information & Decisions + +**Base image:** `python:3.13-slim` — Debian-based minimal Python image. Chosen over Alpine (musl libc compatibility issues). Good balance of size (~150 MB) and compatibility. + +**Final image size:** ~170-200 MB (check with `docker images roma3213/info_service:1.0`) + +**Layer structure:** + +1. Base image (python:3.13-slim) +2. Create user +3. Set working directory +4. Copy requirements.txt +5. Install dependencies (cached separately) +6. Copy application code +7. Switch to non-root user + +--- + +## Build & Run Process + +### Build + +```bash +cd app_python +docker build -t roma3213/info_service:1.0 . +``` + +**Terminal output:** + +``` +[+] Building 31.3s (11/11) FINISHED docker:desktop-linux + => [internal] load build definition from Dockerfile 0.2s + => => transferring dockerfile: 281B 0.0s + => [internal] load metadata for docker.io/library/python:3.13-slim 2.0s + => [internal] load .dockerignore 0.1s + => => transferring context: 289B 0.0s + => [1/6] FROM docker.io/library/python:3.13-slim@sha256:3de9a8d7aedbb7984dc18f2dff178a7850f16c1ae7c34ba9d7ecc23d0755e35f 10.1s + => => resolve docker.io/library/python:3.13-slim@sha256:3de9a8d7aedbb7984dc18f2dff178a7850f16c1ae7c34ba9d7ecc23d0755e35f 0.1s + => => sha256:03af238a5946948d06e8485bb27b05831c5d13f0b3781a01fe347aaf847c2400 1.29MB / 1.29MB 0.8s + => => sha256:0c8d55a45c0dc58de60579b9cc5b708de9e7957f4591fc7de941b67c7e245da0 29.78MB / 29.78MB 8.8s + => => sha256:f1cadbd7abd229d3d8c50b4aa381724025f6bfe89783a8d2bfd6fa751a75946b 252B / 252B 1.0s + => => sha256:686599c79c8709aa5d9f1abf19c75b1760ae0a0ea0335206fe1db9a8793e09f6 11.80MB / 11.80MB 6.2s + => => extracting sha256:0c8d55a45c0dc58de60579b9cc5b708de9e7957f4591fc7de941b67c7e245da0 0.7s + => => extracting sha256:03af238a5946948d06e8485bb27b05831c5d13f0b3781a01fe347aaf847c2400 0.1s + => => extracting sha256:686599c79c8709aa5d9f1abf19c75b1760ae0a0ea0335206fe1db9a8793e09f6 0.4s + => => extracting sha256:f1cadbd7abd229d3d8c50b4aa381724025f6bfe89783a8d2bfd6fa751a75946b 0.0s + => [internal] load build context 0.1s + => => transferring context: 4.89kB 0.0s + => [2/6] WORKDIR /app 0.3s + => [3/6] RUN useradd --create-home --shell /bin/bash appuser 1.0s + => [4/6] COPY requirements.txt . 0.1s + => [5/6] RUN pip install --no-cache-dir -r requirements.txt 13.1s + => [6/6] COPY . . 0.1s + => exporting to image 3.8s + => => exporting layers 2.4s + => => exporting manifest sha256:b218227291f74e8761d0df79e23c22fae99f0311107901b5e76cb89c72a1a55e 0.1s + => => exporting config sha256:f2ef2715b0f7271a2011dcc48f2375f243776b5a3330bbdfaba7f34b8ebc3b7d 0.1s + => => exporting attestation manifest sha256:9c55cad9161240ac44ab4ca9a11105878271afcfe5245d94f27ef097c0effa65 0.1s + => => exporting manifest list sha256:9fb3c79f5a1e50a7a91bb55d089095581954ffc37b3236163b3b76b037bf8ab5 0.1s + => => naming to docker.io/library/info_service:1.0 0.0s + => => unpacking to docker.io/library/info_service:1.0 0.9s +``` + +### Run + +```bash +docker run -d -p 5000:5000 roma3213/info_service:1.0 +``` + +**Check container status:** +```bash +docker ps +``` + +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +91128e9d6039 roma3213/info_service:1.0 "python app.py" 2 minutes ago Up 2 minutes 0.0.0.0:5000->5000/tcp, [::]:5000->5000/tcp epic_elbakyan +``` + +### Testing + +```bash +curl http://localhost:5000/ +``` + +**Output:** +```json +{"service":{"name":"devops-info-service","version":"1.0.0","description":"DevOps course info service","framework":"FastAPI"},"system":{"hostname":"91128e9d6039","platform":"Linux","platform_version":"Linux-5.15.167.4-microsoft-standard-WSL2-x86_64-with-glibc2.41","architecture":"x86_64","cpu_count":12,"python_version":"3.13.12"},"runtime":{"uptime_seconds":186,"uptime_human":"0 hours, 3 minutes","current_time":"2026-02-13T11:01:43.559091+00:00","timezone":"UTC"},"request":{"client_ip":"172.17.0.1","user_agent":"curl/8.8.0","method":"GET","path":"/"},"endpoints":[{"path":"/","method":"GET","description":"Service information"},{"path":"/health","method":"GET","description":"Health check"}]} +``` + +```bash +curl http://localhost:5000/health +``` + +**Output:** +```json +{"status":"healthy","timestamp":"2026-02-13T11:02:11.626478+00:00","uptime_seconds":214} +``` + +### Docker Hub + +```bash +docker tag roma3213/info_service:1.0 roma3213/info_service:1.0 +docker login +docker push roma3213/info_service:1.0 +``` + +**Docker Hub repository URL:** https://hub.docker.com/r/roma3213/info_service + +--- + +## Technical Analysis + +**Why layer order works:** Code changes frequently, dependencies don't. By copying requirements first, dependency layer is cached. Changing code only rebuilds from `COPY . .` onwards. + +**If order changed:** `COPY . .` before `pip install` would invalidate cache on every code change → slow rebuilds. + +**Security:** + +- Non-root user prevents privilege escalation +- Slim image = smaller attack surface +- `.dockerignore` prevents secrets in image + +**How .dockerignore helps:** Excludes `venv/`, `.git/` from build context → faster builds, smaller context. + +--- + +## Challenges & Solutions + +**Port mapping:** Forgot `-p 5000:5000` flag initially → app wasn't accessible. Always specify port mapping explicitly. + +**Docker Hub tagging:** Must use full name `roma3213/info_service:1.0`, not just `info_service:1.0`, otherwise Docker looks in official repo. diff --git a/app_python/docs/LAB03.md b/app_python/docs/LAB03.md new file mode 100644 index 0000000000..3a6a5f68d5 --- /dev/null +++ b/app_python/docs/LAB03.md @@ -0,0 +1,53 @@ +# Lab 03 — CI/CD + +## 1. Overview + +**Testing framework:** pytest. Simple syntax, built-in TestClient for FastAPI, Python standard. + +**Test structure:** Classes TestRootEndpoint, TestHealthEndpoint, TestErrorCases. Each endpoint has status, structure, and type checks. + +**Endpoints covered:** `GET /` (JSON structure, service/system/runtime/request/endpoints), `GET /health` (status, timestamp, uptime_seconds), `GET /nonexistent` → 404. + +**CI triggers:** push and pull_request on main, lab03. + +**Versioning:** CalVer (YYYY.MM.DD). Suits web service, continuous deployment. + +**Actions used:** actions/checkout, setup-python, cache, docker/login-action, build-push-action — standard, well-maintained, pinned versions. + +--- + +## 2. Workflow Evidence + +- **Successful workflow run:** [GitHub Actions](https://github.com/TurikRoma/DevOps-Core-Course/actions) +- **Tests passing locally:** ![pytest](screenshots/04-pytest-passed.png) +- **Docker image:** [roma3213/info_service](https://hub.docker.com/r/roma3213/info_service) +- **Status badge:** in README (clickable, links to Actions) + +--- + +## 3. Best Practices Implemented + +- **Job dependencies:** Docker push only after tests pass +- **Conditional push:** image only on push, not on PR +- **Caching:** actions/cache for ~/.cache/pip. With only 5 deps, no noticeable time improvement — kept for consistency and future scaling. +- **Snyk:** --severity-threshold=high. 2 vulnerabilities in starlette (ReDoS, Throttling) — upgraded fastapi to 0.129+ + +--- + +## 4. Key Decisions + +**Versioning Strategy:** CalVer. Service, not library — date matters more than breaking changes. + +**Docker Tags:** `roma3213/info_service:YYYY.MM.DD` and `roma3213/info_service:latest` + +**Workflow Triggers:** push/PR on main, lab03 — catch issues before merge, validate on feature branches. + +**Test Coverage:** endpoints / and /health, JSON structure, 404. Not covered: config, exception handlers. + +--- + +## 5. Challenges + +- Snyk 401: snyk/actions/setup auth error. Fix: `npm install -g snyk` +- Snyk "Required packages missing": snyk/actions/python runs in Docker. Fix: run in same job where deps are installed +- starlette vulnerabilities: Fix: fastapi>=0.129.0 diff --git a/app_python/docs/screenshots/01-main-endpoint.png b/app_python/docs/screenshots/01-main-endpoint.png new file mode 100644 index 0000000000..6d0a41efdd Binary files /dev/null and b/app_python/docs/screenshots/01-main-endpoint.png differ diff --git a/app_python/docs/screenshots/02-health-check.png b/app_python/docs/screenshots/02-health-check.png new file mode 100644 index 0000000000..88f95b7b67 Binary files /dev/null and b/app_python/docs/screenshots/02-health-check.png differ diff --git a/app_python/docs/screenshots/03-formatted-output.png b/app_python/docs/screenshots/03-formatted-output.png new file mode 100644 index 0000000000..0cca35f0bd Binary files /dev/null and b/app_python/docs/screenshots/03-formatted-output.png differ diff --git a/app_python/docs/screenshots/04-pytest-passed.png b/app_python/docs/screenshots/04-pytest-passed.png new file mode 100644 index 0000000000..64a0717b99 Binary files /dev/null and b/app_python/docs/screenshots/04-pytest-passed.png differ diff --git a/app_python/docs/screenshots/05-workflow-success.png b/app_python/docs/screenshots/05-workflow-success.png new file mode 100644 index 0000000000..2cb49f016f Binary files /dev/null and b/app_python/docs/screenshots/05-workflow-success.png differ diff --git a/app_python/docs/screenshots/06-dockerhub-tags.png b/app_python/docs/screenshots/06-dockerhub-tags.png new file mode 100644 index 0000000000..6641f25ea9 Binary files /dev/null and b/app_python/docs/screenshots/06-dockerhub-tags.png differ diff --git a/app_python/requirements.txt b/app_python/requirements.txt new file mode 100644 index 0000000000..d985e95697 --- /dev/null +++ b/app_python/requirements.txt @@ -0,0 +1,5 @@ +fastapi>=0.129.0 +uvicorn[standard]==0.32.0 +httpx>=0.27.0 +pytest>=8.0.0 +flake8>=7.0.0 diff --git a/app_python/routes/system_info.py b/app_python/routes/system_info.py new file mode 100644 index 0000000000..b43a4c468c --- /dev/null +++ b/app_python/routes/system_info.py @@ -0,0 +1,25 @@ +from fastapi import APIRouter, Request +from datetime import datetime, timezone +import services.system_info as system_info_service + +router = APIRouter() + + +@router.get("/") +async def get_system_info(request: Request): + return { + "service": system_info_service.get_service_info(), + "system": system_info_service.get_system_info(), + "runtime": system_info_service.get_runtime_info(), + "request": system_info_service.get_request_info(request), + "endpoints": system_info_service.get_endpoints_info() + } + + +@router.get("/health") +async def health(): + return { + "status": "healthy", + "timestamp": datetime.now(timezone.utc).isoformat(), + "uptime_seconds": system_info_service.get_uptime()['seconds'] + } diff --git a/app_python/services/system_info.py b/app_python/services/system_info.py new file mode 100644 index 0000000000..52ccafc73d --- /dev/null +++ b/app_python/services/system_info.py @@ -0,0 +1,73 @@ +from fastapi import Request +import os +import platform +import socket +from datetime import datetime, timezone + +start_time = datetime.now(timezone.utc) + + +def get_system_info(): + hostname = socket.gethostname() + platform_name = platform.system() + platform_version = platform.platform() + architecture = platform.machine() + cpu_count = os.cpu_count() + python_version = platform.python_version() + + return { + "hostname": hostname, + "platform": platform_name, + "platform_version": platform_version, + "architecture": architecture, + "cpu_count": cpu_count, + "python_version": python_version + } + + +def get_uptime(): + delta = datetime.now(timezone.utc) - start_time + seconds = int(delta.total_seconds()) + hours = seconds // 3600 + minutes = (seconds % 3600) // 60 + + return { + 'seconds': seconds, + 'human': f"{hours} hours, {minutes} minutes" + } + + +def get_runtime_info(): + uptime_info = get_uptime() + + return { + "uptime_seconds": uptime_info['seconds'], + "uptime_human": uptime_info['human'], + 'current_time': datetime.now(timezone.utc).isoformat(), + 'timezone': datetime.now(timezone.utc).tzname() + } + + +def get_service_info(): + return { + "name": "devops-info-service", + "version": "1.0.0", + "description": "DevOps course info service", + "framework": "FastAPI" + } + + +def get_request_info(request: Request): + return { + "client_ip": request.client.host, + "user_agent": request.headers.get('user-agent'), + "method": request.method, + "path": request.url.path + } + + +def get_endpoints_info(): + return [ + {"path": "/", "method": "GET", "description": "Service information"}, + {"path": "/health", "method": "GET", "description": "Health check"} + ] diff --git a/app_python/tests/__init__.py b/app_python/tests/__init__.py new file mode 100644 index 0000000000..d4839a6b14 --- /dev/null +++ b/app_python/tests/__init__.py @@ -0,0 +1 @@ +# Tests package diff --git a/app_python/tests/test_endpoints.py b/app_python/tests/test_endpoints.py new file mode 100644 index 0000000000..989b209dd1 --- /dev/null +++ b/app_python/tests/test_endpoints.py @@ -0,0 +1,115 @@ +"""Unit tests for API endpoints.""" +from fastapi.testclient import TestClient + +from app import app + +client = TestClient(app) + + +class TestRootEndpoint: + """Tests for GET / endpoint.""" + + def test_status_code(self): + """Verify root endpoint returns 200.""" + response = client.get("/") + assert response.status_code == 200 + + def test_response_structure(self): + """Verify JSON contains required top-level fields.""" + response = client.get("/") + data = response.json() + assert "service" in data + assert "system" in data + assert "runtime" in data + assert "request" in data + assert "endpoints" in data + + def test_service_fields(self): + """Verify service object has required fields and types.""" + response = client.get("/") + service = response.json()["service"] + assert "name" in service + assert "version" in service + assert "description" in service + assert "framework" in service + assert isinstance(service["name"], str) + assert isinstance(service["framework"], str) + + def test_system_fields(self): + """Verify system object has required fields.""" + response = client.get("/") + system = response.json()["system"] + assert "hostname" in system + assert "platform" in system + assert "architecture" in system + assert "python_version" in system + assert isinstance(system["hostname"], str) + + def test_runtime_fields(self): + """Verify runtime object has required fields.""" + response = client.get("/") + runtime = response.json()["runtime"] + assert "uptime_seconds" in runtime + assert "uptime_human" in runtime + assert isinstance(runtime["uptime_seconds"], int) + assert runtime["uptime_seconds"] >= 0 + + def test_request_fields(self): + """Verify request info is present.""" + response = client.get("/") + request_info = response.json()["request"] + assert "client_ip" in request_info + assert "method" in request_info + assert "path" in request_info + assert request_info["method"] == "GET" + assert request_info["path"] == "/" + + def test_endpoints_is_list(self): + """Verify endpoints is a non-empty list.""" + response = client.get("/") + endpoints = response.json()["endpoints"] + assert isinstance(endpoints, list) + assert len(endpoints) >= 2 + assert any(ep["path"] == "/" for ep in endpoints) + assert any(ep["path"] == "/health" for ep in endpoints) + + +class TestHealthEndpoint: + """Tests for GET /health endpoint.""" + + def test_status_code(self): + """Verify health endpoint returns 200.""" + response = client.get("/health") + assert response.status_code == 200 + + def test_status_healthy(self): + """Verify health check returns healthy status.""" + response = client.get("/health") + data = response.json() + assert data["status"] == "healthy" + + def test_required_fields(self): + """Verify health response has required fields.""" + response = client.get("/health") + data = response.json() + assert "timestamp" in data + assert "uptime_seconds" in data + assert isinstance(data["uptime_seconds"], int) + assert data["uptime_seconds"] >= 0 + + +class TestErrorCases: + """Tests for error handling.""" + + def test_404_nonexistent_endpoint(self): + """Verify 404 for non-existent endpoint.""" + response = client.get("/nonexistent") + assert response.status_code == 404 + + def test_404_response_structure(self): + """Verify 404 response has expected structure.""" + response = client.get("/nonexistent") + data = response.json() + assert "error" in data + assert "message" in data + assert "Not Found" in data["error"] or "Not Found" in data["message"] diff --git a/docs/LAB04.md b/docs/LAB04.md new file mode 100644 index 0000000000..7241cc2d5f --- /dev/null +++ b/docs/LAB04.md @@ -0,0 +1,98 @@ +# Lab 04 — Infrastructure as Code (Terraform & Pulumi) + +## 1. Cloud Provider & Infrastructure + +**Cloud provider:** Google Cloud Platform (GCP) + +**Rationale:** Already had an account and familiar interface. GCP offers free tier (e2-micro), good documentation, and integration with Terraform/Pulumi. + +**Instance type:** e2-micro (free tier) — 2 vCPU (shared), 1 GB RAM + +**Region/Zone:** us-central1-a — zone is part of GCP always-free tier + +**Cost:** $0 (free tier) + +**Resources created:** + +- VM instance (lab04-vm) +- Firewall (lab04-vm-firewall) — ports 22, 80, 5000 +- Public IP (via access_config) + +--- + +## 2. Terraform Implementation + +**Terraform version:** v1.14.5 + +**Project structure:** + +``` +terraform/ +├── main.tf # Provider, firewall, VM +├── variables.tf # project_id, region, zone, machine_type, ssh_public_key_path +├── outputs.tf # instance_public_ip, ssh_command +├── terraform.tfvars # Values (gitignored) +└── Outputs/ # Command outputs +``` + +**Key decisions:** Variables for project_id, zone, machine_type. Firewall with source_ranges 0.0.0.0/0 (dynamic IP). SSH key via metadata. + +**Challenges:** None. + +**Public IP :** 35.193.180.39 + +**Terminal output:** + +- [terraform init](terraform/Outputs/terraform-init.txt) +- [terraform plan](terraform/Outputs/terraform-plan.txt) +- [terraform apply](terraform/Outputs/terraform-apply.txt) +- [SSH connection](terraform/Outputs/ssh-connect.png) + +--- + +## 3. Pulumi Implementation + +**Pulumi version:** v3.222.0 + +**Language:** Python + +**How code differs from Terraform:** Imperative approach — Python instead of HCL. Config via `pulumi config set`. Resources created by function calls (gcp.compute.Firewall, gcp.compute.Instance). + +**Advantages:** Familiar language, can use loops and functions. Pulumi Cloud for state (free tier). + +**Challenges:** CLI installation — Chocolatey did not see pulumi in PATH, had to download archive and specify full path (or add to PATH). Pulumi Cloud login on first config set. + +**Public IP:** 136.119.173.134 + +**Terminal output:** + +- [pulumi preview](pulumi/Outputs/pului-preview.txt) +- [pulumi up](pulumi/Outputs/pulumi-up.rxt) +- [SSH connection](pulumi/Outputs/ssh-conntection.png) + +--- + +## 4. Terraform vs Pulumi Comparison + +**Ease of Learning:** Terraform — installation via choco/download. Pulumi — requires CLI, login, pip install. HCL is easier for simple infrastructure. + +**Code Readability:** Terraform HCL — declarative, structured. Pulumi Python — familiar for developers. For this task both are readable. + +**Debugging:** Roughly the same. Terraform — plan shows changes, clear errors. Pulumi — Python traceback, preview is similar. Both tools provide enough information for debugging. + +**Documentation:** Roughly the same. Terraform — Registry, many examples. Pulumi — good official documentation, Registry. For basic tasks both are well documented. + +**Use Case:** Terraform — standard choice for IaC, large teams, multi-cloud. Pulumi — when Python/TS logic, tests, or complex dynamics are needed. + +--- + +## 5. Lab 5 Preparation & Cleanup + +**VM for Lab 5:** No + +**Plan:** Recreate cloud VM in Lab 5 via Terraform or Pulumi (code is ready). + +**Cleanup status:** Terraform and Pulumi resources destroyed. + +- [terraform destroy](terraform/Outputs/terraform-destroy.txt) +- [pulumi destroy](pulumi/Outputs/pulumi-destroy.txt) diff --git a/pulumi/Outputs/instance-on-GCP.png b/pulumi/Outputs/instance-on-GCP.png new file mode 100644 index 0000000000..4c8ad62b34 Binary files /dev/null and b/pulumi/Outputs/instance-on-GCP.png differ diff --git a/pulumi/Outputs/pului-preview.txt b/pulumi/Outputs/pului-preview.txt new file mode 100644 index 0000000000..373e60cdb4 --- /dev/null +++ b/pulumi/Outputs/pului-preview.txt @@ -0,0 +1,28 @@ + +Admin@DESKTOP-5I9FNA3 MINGW64 /c/DevOps-Core-Course/pulumi (lab04) +$ pulumi preview +Previewing update (dev) + +View in Browser (Ctrl+O): https://app.pulumi.com/TurikRoma-org/lab04-vm/dev/previews/fda84f3b-5e11-4d8a-bd63-cf6b3ef6b708 + Type Name Plan Info + + pulumi:pulumi:Stack lab04-vm-dev create + ├─ pulumi:providers:gcp default_9_12_0 1 warning +Downloads +Installing plugin gcp-9.12.0 [------------------------------------------------------------------------------>___] 46.98 MiB/48.98 MiB + + ├─ gcp:compute:Firewall lab04-vm-firewall create + + └─ gcp:compute:Instance lab04-vm create +Diagnostics: + pulumi:providers:gcp (default_9_12_0): + warning: unable to detect a global setting for GCP Project. + Pulumi will rely on per-resource settings for this operation. + Set the GCP Project by using: + `pulumi config set gcp:project ` + If you would like to disable this warning use: + `pulumi config set gcp:disableGlobalProjectWarning true` + +Outputs: + instance_public_ip: [unknown] + ssh_command : [unknown] + +Resources: + + 3 to create \ No newline at end of file diff --git a/pulumi/Outputs/pulumi-destroy.txt b/pulumi/Outputs/pulumi-destroy.txt new file mode 100644 index 0000000000..71393878a4 --- /dev/null +++ b/pulumi/Outputs/pulumi-destroy.txt @@ -0,0 +1,37 @@ +Admin@DESKTOP-5I9FNA3 MINGW64 /c/DevOps-Core-Course/pulumi (lab04) +$ pulumi destroy +Previewing destroy (dev) + +View in Browser (Ctrl+O): https://app.pulumi.com/TurikRoma-org/lab04-vm/dev/previews/a56e72c3-6e32-4f22-a3f2-f478db656c88 + + Type Name Plan + - pulumi:pulumi:Stack lab04-vm-dev delete + - ├─ gcp:compute:Firewall lab04-vm-firewall delete + - └─ gcp:compute:Instance lab04-vm delete +Outputs: + - instance_public_ip: "136.119.173.134" + - ssh_command : "ssh ubuntu@136.119.173.134" + +Resources: + - 3 to delete + +Do you want to perform this destroy? yes +Destroying (dev) + +View in Browser (Ctrl+O): https://app.pulumi.com/TurikRoma-org/lab04-vm/dev/updates/2 + + Type Name Status + - pulumi:pulumi:Stack lab04-vm-dev deleted (0.23s) + - ├─ gcp:compute:Firewall lab04-vm-firewall deleted (13s) + - └─ gcp:compute:Instance lab04-vm deleted (66s) +Outputs: + - instance_public_ip: "136.119.173.134" + - ssh_command : "ssh ubuntu@136.119.173.134" + +Resources: + - 3 deleted + +Duration: 1m9s + +The resources in the stack have been deleted, but the history and configuration associated with the stack are still maintained. +If you want to remove the stack completely, run `pulumi stack rm dev`. \ No newline at end of file diff --git a/pulumi/Outputs/pulumi-up.rxt b/pulumi/Outputs/pulumi-up.rxt new file mode 100644 index 0000000000..93ca141d66 --- /dev/null +++ b/pulumi/Outputs/pulumi-up.rxt @@ -0,0 +1,60 @@ +Admin@DESKTOP-5I9FNA3 MINGW64 /c/DevOps-Core-Course/pulumi (lab04) +$ pulumi up +Previewing update (dev) + +View in Browser (Ctrl+O): https://app.pulumi.com/TurikRoma-org/lab04-vm/dev/previews/e80a63d3-85f3-45ae-903e-cbd90313e326 + + Type Name Plan Info + + pulumi:pulumi:Stack lab04-vm-dev create + ├─ pulumi:providers:gcp default_9_12_0 1 warning + + ├─ gcp:compute:Instance lab04-vm create + + └─ gcp:compute:Firewall lab04-vm-firewall create +Diagnostics: + pulumi:providers:gcp (default_9_12_0): + warning: unable to detect a global setting for GCP Project. + Pulumi will rely on per-resource settings for this operation. + Set the GCP Project by using: + `pulumi config set gcp:project ` + If you would like to disable this warning use: + `pulumi config set gcp:disableGlobalProjectWarning true` + + [Pulumi Neo] Would you like help with these diagnostics? + https://app.pulumi.com/TurikRoma-org/lab04-vm/dev/previews/e80a63d3-85f3-45ae-903e-cbd90313e326?explainFailure + +Outputs: + instance_public_ip: [unknown] + ssh_command : [unknown] + +Resources: + + 3 to create + +Do you want to perform this update? yes +Updating (dev) + +View in Browser (Ctrl+O): https://app.pulumi.com/TurikRoma-org/lab04-vm/dev/updates/1 + + Type Name Status Info + + pulumi:pulumi:Stack lab04-vm-dev created (35s) + ├─ pulumi:providers:gcp default_9_12_0 1 warning + + ├─ gcp:compute:Instance lab04-vm created (31s) + + └─ gcp:compute:Firewall lab04-vm-firewall created (12s) +Diagnostics: + pulumi:providers:gcp (default_9_12_0): + warning: unable to detect a global setting for GCP Project. + Pulumi will rely on per-resource settings for this operation. + Set the GCP Project by using: + `pulumi config set gcp:project ` + If you would like to disable this warning use: + `pulumi config set gcp:disableGlobalProjectWarning true` + + [Pulumi Neo] Would you like help with these diagnostics? + https://app.pulumi.com/TurikRoma-org/lab04-vm/dev/updates/1?explainFailure + +Outputs: + instance_public_ip: "136.119.173.134" + ssh_command : "ssh ubuntu@136.119.173.134" + +Resources: + + 3 created + +Duration: 37s \ No newline at end of file diff --git a/pulumi/Outputs/ssh-conntection.png b/pulumi/Outputs/ssh-conntection.png new file mode 100644 index 0000000000..b1b2783d07 Binary files /dev/null and b/pulumi/Outputs/ssh-conntection.png differ diff --git a/pulumi/Pulumi.yaml b/pulumi/Pulumi.yaml new file mode 100644 index 0000000000..88572a4447 --- /dev/null +++ b/pulumi/Pulumi.yaml @@ -0,0 +1,3 @@ +name: lab04-vm +runtime: python +description: Lab 4 - GCP VM (same as Terraform) diff --git a/pulumi/__main__.py b/pulumi/__main__.py new file mode 100644 index 0000000000..ba8da87903 --- /dev/null +++ b/pulumi/__main__.py @@ -0,0 +1,60 @@ +"""Lab 4 - Pulumi: GCP VM (same infrastructure as Terraform).""" +import pulumi +import pulumi_gcp as gcp + +config = pulumi.Config() +project_id = config.require("project_id") +zone = config.get("zone") or "us-central1-a" +ssh_public_key_path = config.require("ssh_public_key_path") + +# Read SSH public key +with open(ssh_public_key_path, "r", encoding="utf-8") as f: + ssh_public_key = f.read().strip() + +# Firewall: SSH (22), HTTP (80), app port (5000) +firewall = gcp.compute.Firewall( + "lab04-vm-firewall", + name="lab04-vm-firewall", + network="default", + allows=[{"protocol": "tcp", "ports": ["22", "80", "5000"]}], + source_ranges=["0.0.0.0/0"], + target_tags=["lab04-vm"], + project=project_id, +) + +# VM instance (e2-micro free tier) +vm = gcp.compute.Instance( + "lab04-vm", + name="lab04-vm", + machine_type="e2-micro", + zone=zone, + tags=["lab04-vm"], + boot_disk=gcp.compute.InstanceBootDiskArgs( + initialize_params=gcp.compute.InstanceBootDiskInitializeParamsArgs( + image="ubuntu-os-cloud/ubuntu-2204-lts", + size=10, + ) + ), + network_interfaces=[ + gcp.compute.InstanceNetworkInterfaceArgs( + network="default", + access_configs=[gcp.compute.InstanceNetworkInterfaceAccessConfigArgs()], + ) + ], + metadata={ + "ssh-keys": f"ubuntu:{ssh_public_key}", + }, + labels={ + "lab": "lab04", + }, + project=project_id, +) + +# Outputs +pulumi.export("instance_public_ip", vm.network_interfaces[0].access_configs[0].nat_ip) +pulumi.export( + "ssh_command", + vm.network_interfaces[0].access_configs[0].nat_ip.apply( + lambda ip: f"ssh ubuntu@{ip}" + ), +) diff --git a/pulumi/__pycache__/__main__.cpython-311.pyc b/pulumi/__pycache__/__main__.cpython-311.pyc new file mode 100644 index 0000000000..0be61b35bf Binary files /dev/null and b/pulumi/__pycache__/__main__.cpython-311.pyc differ diff --git a/pulumi/requirements.txt b/pulumi/requirements.txt new file mode 100644 index 0000000000..b8a8caa539 --- /dev/null +++ b/pulumi/requirements.txt @@ -0,0 +1,2 @@ +pulumi>=3.0.0 +pulumi-gcp>=7.0.0 diff --git a/terraform/Outputs/ssh-connect.png b/terraform/Outputs/ssh-connect.png new file mode 100644 index 0000000000..581b7f2313 Binary files /dev/null and b/terraform/Outputs/ssh-connect.png differ diff --git a/terraform/Outputs/terraform-apply.txt b/terraform/Outputs/terraform-apply.txt new file mode 100644 index 0000000000..30cda56d63 --- /dev/null +++ b/terraform/Outputs/terraform-apply.txt @@ -0,0 +1,146 @@ +Admin@DESKTOP-5I9FNA3 MINGW64 /c/DevOps-Core-Course/terraform (lab04) +$ terraform apply + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following +symbols: + + create + +Terraform will perform the following actions: + + # google_compute_firewall.allow_ssh_http will be created + + resource "google_compute_firewall" "allow_ssh_http" { + + creation_timestamp = (known after apply) + + destination_ranges = (known after apply) + + direction = (known after apply) + + enable_logging = (known after apply) + + id = (known after apply) + + name = "lab04-vm-firewall" + + network = "default" + + priority = 1000 + + project = "idyllic-adviser-475019-t8" + + self_link = (known after apply) + + source_ranges = [ + + "0.0.0.0/0", + ] + + target_tags = [ + + "lab04-vm", + ] + + + allow { + + ports = [ + + "22", + + "80", + + "5000", + ] + + protocol = "tcp" + } + } + + # google_compute_instance.vm will be created + + resource "google_compute_instance" "vm" { + + can_ip_forward = false + + cpu_platform = (known after apply) + + current_status = (known after apply) + + deletion_protection = false + + effective_labels = { + + "goog-terraform-provisioned" = "true" + + "lab" = "lab04" + } + + id = (known after apply) + + instance_id = (known after apply) + + label_fingerprint = (known after apply) + + labels = { + + "lab" = "lab04" + } + + machine_type = "e2-micro" + + metadata = { + + "ssh-keys" = <<-EOT + ubuntu:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICH1MG1Gf2NeFNZi5UVIZu+0vOJfcolnb0fC3/0MRMaN turikroma15@gmail.com + EOT + } + + metadata_fingerprint = (known after apply) + + min_cpu_platform = (known after apply) + + name = "lab04-vm" + + project = "idyllic-adviser-475019-t8" + + self_link = (known after apply) + + tags = [ + + "lab04-vm", + ] + + tags_fingerprint = (known after apply) + + terraform_labels = { + + "goog-terraform-provisioned" = "true" + + "lab" = "lab04" + } + + zone = "us-central1-a" + + + boot_disk { + + auto_delete = true + + device_name = (known after apply) + + disk_encryption_key_sha256 = (known after apply) + + kms_key_self_link = (known after apply) + + mode = "READ_WRITE" + + source = (known after apply) + + + initialize_params { + + image = "ubuntu-os-cloud/ubuntu-2204-lts" + + labels = (known after apply) + + provisioned_iops = (known after apply) + + provisioned_throughput = (known after apply) + + resource_policies = (known after apply) + + size = 10 + + type = (known after apply) + } + } + + + confidential_instance_config (known after apply) + + + guest_accelerator (known after apply) + + + network_interface { + + internal_ipv6_prefix_length = (known after apply) + + ipv6_access_type = (known after apply) + + ipv6_address = (known after apply) + + name = (known after apply) + + network = "default" + + network_ip = (known after apply) + + stack_type = (known after apply) + + subnetwork = (known after apply) + + subnetwork_project = (known after apply) + + + access_config { + + nat_ip = (known after apply) + + network_tier = (known after apply) + } + } + + + reservation_affinity (known after apply) + + + scheduling (known after apply) + } + +Plan: 2 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + instance_public_ip = (known after apply) + + ssh_command = (known after apply) + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +google_compute_firewall.allow_ssh_http: Creating... +google_compute_instance.vm: Creating... +google_compute_firewall.allow_ssh_http: Still creating... [00m10s elapsed] +google_compute_instance.vm: Still creating... [00m10s elapsed] +google_compute_firewall.allow_ssh_http: Creation complete after 12s [id=projects/idyllic-adviser-475019-t8/global/firewalls/lab04-vm-firewall] +google_compute_instance.vm: Still creating... [00m20s elapsed] +google_compute_instance.vm: Creation complete after 26s [id=projects/idyllic-adviser-475019-t8/zones/us-central1-a/instances/lab04-vm] + +Apply complete! Resources: 2 added, 0 changed, 0 destroyed. + +Outputs: + +instance_public_ip = "35.193.180.39" +ssh_command = "ssh ubuntu@35.193.180.39" \ No newline at end of file diff --git a/terraform/Outputs/terraform-destroy.txt b/terraform/Outputs/terraform-destroy.txt new file mode 100644 index 0000000000..db010b42b8 --- /dev/null +++ b/terraform/Outputs/terraform-destroy.txt @@ -0,0 +1,163 @@ +Admin@DESKTOP-5I9FNA3 MINGW64 /c/DevOps-Core-Course/terraform (lab04) +$ terraform destroy +google_compute_firewall.allow_ssh_http: Refreshing state... [id=projects/idyllic-adviser-475019-t8/global/firewalls/lab04-vm-firewall] +google_compute_instance.vm: Refreshing state... [id=projects/idyllic-adviser-475019-t8/zones/us-central1-a/instances/lab04-vm] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following +symbols: + - destroy + +Terraform will perform the following actions: + + # google_compute_firewall.allow_ssh_http will be destroyed + - resource "google_compute_firewall" "allow_ssh_http" { + - creation_timestamp = "2026-02-19T06:36:43.382-08:00" -> null + - destination_ranges = [] -> null + - direction = "INGRESS" -> null + - disabled = false -> null + - id = "projects/idyllic-adviser-475019-t8/global/firewalls/lab04-vm-firewall" -> null + - name = "lab04-vm-firewall" -> null + - network = "https://www.googleapis.com/compute/v1/projects/idyllic-adviser-475019-t8/global/networks/default" -> null + - priority = 1000 -> null + - project = "idyllic-adviser-475019-t8" -> null + - self_link = "https://www.googleapis.com/compute/v1/projects/idyllic-adviser-475019-t8/global/firewalls/lab04-vm-firewall" -> null + - source_ranges = [ + - "0.0.0.0/0", + ] -> null + - source_service_accounts = [] -> null + - source_tags = [] -> null + - target_service_accounts = [] -> null + - target_tags = [ + - "lab04-vm", + ] -> null + # (1 unchanged attribute hidden) + + - allow { + - ports = [ + - "22", + - "80", + - "5000", + ] -> null + - protocol = "tcp" -> null + } + } + + # google_compute_instance.vm will be destroyed + - resource "google_compute_instance" "vm" { + - can_ip_forward = false -> null + - cpu_platform = "AMD Rome" -> null + - current_status = "RUNNING" -> null + - deletion_protection = false -> null + - effective_labels = { + - "goog-terraform-provisioned" = "true" + - "lab" = "lab04" + } -> null + - enable_display = false -> null + - id = "projects/idyllic-adviser-475019-t8/zones/us-central1-a/instances/lab04-vm" -> null + - instance_id = "2914385374995980563" -> null + - label_fingerprint = "AOJZ1qn684A=" -> null + - labels = { + - "lab" = "lab04" + } -> null + - machine_type = "e2-micro" -> null + - metadata = { + - "ssh-keys" = <<-EOT + ubuntu:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICH1MG1Gf2NeFNZi5UVIZu+0vOJfcolnb0fC3/0MRMaN turikroma15@gmail.com + EOT + } -> null + - metadata_fingerprint = "jS4nPNqOIDI=" -> null + - name = "lab04-vm" -> null + - project = "idyllic-adviser-475019-t8" -> null + - resource_policies = [] -> null + - self_link = "https://www.googleapis.com/compute/v1/projects/idyllic-adviser-475019-t8/zones/us-central1-a/instances/lab04-vm" -> null + - tags = [ + - "lab04-vm", + ] -> null + - tags_fingerprint = "5G2YeVT3u9Y=" -> null + - terraform_labels = { + - "goog-terraform-provisioned" = "true" + - "lab" = "lab04" + } -> null + - zone = "us-central1-a" -> null + # (3 unchanged attributes hidden) + + - boot_disk { + - auto_delete = true -> null + - device_name = "persistent-disk-0" -> null + - mode = "READ_WRITE" -> null + - source = "https://www.googleapis.com/compute/v1/projects/idyllic-adviser-475019-t8/zones/us-central1-a/disks/lab04-vm" -> null + # (4 unchanged attributes hidden) + + - initialize_params { + - enable_confidential_compute = false -> null + - image = "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-2204-jammy-v20260218" -> null + - labels = {} -> null + - provisioned_iops = 0 -> null + - provisioned_throughput = 0 -> null + - resource_manager_tags = {} -> null + - resource_policies = [] -> null + - size = 10 -> null + - type = "pd-standard" -> null + # (1 unchanged attribute hidden) + } + } + + - network_interface { + - internal_ipv6_prefix_length = 0 -> null + - name = "nic0" -> null + - network = "https://www.googleapis.com/compute/v1/projects/idyllic-adviser-475019-t8/global/networks/default" -> null + - network_ip = "10.128.0.3" -> null + - queue_count = 0 -> null + - stack_type = "IPV4_ONLY" -> null + - subnetwork = "https://www.googleapis.com/compute/v1/projects/idyllic-adviser-475019-t8/regions/us-central1/subnetworks/default" -> null + - subnetwork_project = "idyllic-adviser-475019-t8" -> null + # (3 unchanged attributes hidden) + + - access_config { + - nat_ip = "35.193.180.39" -> null + - network_tier = "PREMIUM" -> null + # (1 unchanged attribute hidden) + } + } + + - scheduling { + - automatic_restart = true -> null + - min_node_cpus = 0 -> null + - on_host_maintenance = "MIGRATE" -> null + - preemptible = false -> null + - provisioning_model = "STANDARD" -> null + # (1 unchanged attribute hidden) + } + + - shielded_instance_config { + - enable_integrity_monitoring = true -> null + - enable_secure_boot = false -> null + - enable_vtpm = true -> null + } + } + +Plan: 0 to add, 0 to change, 2 to destroy. + +Changes to Outputs: + - instance_public_ip = "35.193.180.39" -> null + - ssh_command = "ssh ubuntu@35.193.180.39" -> null + +Do you really want to destroy all resources? + Terraform will destroy all your managed infrastructure, as shown above. + There is no undo. Only 'yes' will be accepted to confirm. + + Enter a value: yes + +google_compute_firewall.allow_ssh_http: Destroying... [id=projects/idyllic-adviser-475019-t8/global/firewalls/lab04-vm-firewall] +google_compute_instance.vm: Destroying... [id=projects/idyllic-adviser-475019-t8/zones/us-central1-a/instances/lab04-vm] +google_compute_instance.vm: Still destroying... [id=projects/idyllic-adviser-475019-t8/zones/us-central1-a/instances/lab04-vm, 00m10s elapsed] +google_compute_firewall.allow_ssh_http: Still destroying... [id=projects/idyllic-adviser-475019-t8/global/firewalls/lab04-vm-firewall, 00m10s elapsed] +google_compute_firewall.allow_ssh_http: Destruction complete after 12s +google_compute_instance.vm: Still destroying... [id=projects/idyllic-adviser-475019-t8/zones/us-central1-a/instances/lab04-vm, 00m20s elapsed] +google_compute_instance.vm: Still destroying... [id=projects/idyllic-adviser-475019-t8/zones/us-central1-a/instances/lab04-vm, 00m30s elapsed] +google_compute_instance.vm: Still destroying... [id=projects/idyllic-adviser-475019-t8/zones/us-central1-a/instances/lab04-vm, 00m40s elapsed] +google_compute_instance.vm: Still destroying... [id=projects/idyllic-adviser-475019-t8/zones/us-central1-a/instances/lab04-vm, 00m50s elapsed] +google_compute_instance.vm: Still destroying... [id=projects/idyllic-adviser-475019-t8/zones/us-central1-a/instances/lab04-vm, 01m00s elapsed] +google_compute_instance.vm: Destruction complete after 1m6s + +Destroy complete! Resources: 2 destroyed. diff --git a/terraform/Outputs/terraform-init.txt b/terraform/Outputs/terraform-init.txt new file mode 100644 index 0000000000..c0dea30328 --- /dev/null +++ b/terraform/Outputs/terraform-init.txt @@ -0,0 +1,21 @@ +Admin@DESKTOP-5I9FNA3 MINGW64 /c/DevOps-Core-Course/terraform (lab04) +$ terraform init +Initializing the backend... +Initializing provider plugins... +- Finding hashicorp/google versions matching "6.8.0"... +- Installing hashicorp/google v6.8.0... +- Installed hashicorp/google v6.8.0 (signed by HashiCorp) +Terraform has created a lock file .terraform.lock.hcl to record the provider +selections it made above. Include this file in your version control repository +so that Terraform can guarantee to make the same selections by default when +you run "terraform init" in the future. + +Terraform has been successfully initialized! + +You may now begin working with Terraform. Try running "terraform plan" to see +any changes that are required for your infrastructure. All Terraform commands +should now work. + +If you ever set or change modules or backend configuration for Terraform, +rerun this command to reinitialize your working directory. If you forget, other +commands will detect it and remind you to do so if necessary. \ No newline at end of file diff --git a/terraform/Outputs/terraform-plan.txt b/terraform/Outputs/terraform-plan.txt new file mode 100644 index 0000000000..16ddcd2b8d --- /dev/null +++ b/terraform/Outputs/terraform-plan.txt @@ -0,0 +1,130 @@ +dmin@DESKTOP-5I9FNA3 MINGW64 /c/DevOps-Core-Course/terraform (lab04) +$ terraform plan + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following +symbols: + + create + +Terraform will perform the following actions: + + # google_compute_firewall.allow_ssh_http will be created + + resource "google_compute_firewall" "allow_ssh_http" { + + creation_timestamp = (known after apply) + + destination_ranges = (known after apply) + + direction = (known after apply) + + enable_logging = (known after apply) + + id = (known after apply) + + name = "lab04-vm-firewall" + + network = "default" + + priority = 1000 + + project = "idyllic-adviser-475019-t8" + + self_link = (known after apply) + + source_ranges = [ + + "0.0.0.0/0", + ] + + target_tags = [ + + "lab04-vm", + ] + + + allow { + + ports = [ + + "22", + + "80", + + "5000", + ] + + protocol = "tcp" + } + } + + # google_compute_instance.vm will be created + + resource "google_compute_instance" "vm" { + + can_ip_forward = false + + cpu_platform = (known after apply) + + current_status = (known after apply) + + deletion_protection = false + + effective_labels = { + + "goog-terraform-provisioned" = "true" + + "lab" = "lab04" + } + + id = (known after apply) + + instance_id = (known after apply) + + label_fingerprint = (known after apply) + + labels = { + + "lab" = "lab04" + } + + machine_type = "e2-micro" + + metadata = { + + "ssh-keys" = <<-EOT + ubuntu:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICH1MG1Gf2NeFNZi5UVIZu+0vOJfcolnb0fC3/0MRMaN turikroma15@gmail.com + EOT + } + + metadata_fingerprint = (known after apply) + + min_cpu_platform = (known after apply) + + name = "lab04-vm" + + project = "idyllic-adviser-475019-t8" + + self_link = (known after apply) + + tags = [ + + "lab04-vm", + ] + + tags_fingerprint = (known after apply) + + terraform_labels = { + + "goog-terraform-provisioned" = "true" + + "lab" = "lab04" + } + + zone = "us-central1-a" + + + boot_disk { + + auto_delete = true + + device_name = (known after apply) + + disk_encryption_key_sha256 = (known after apply) + + kms_key_self_link = (known after apply) + + mode = "READ_WRITE" + + source = (known after apply) + + + initialize_params { + + image = "ubuntu-os-cloud/ubuntu-2204-lts" + + labels = (known after apply) + + provisioned_iops = (known after apply) + + provisioned_throughput = (known after apply) + + resource_policies = (known after apply) + + size = 10 + + type = (known after apply) + } + } + + + confidential_instance_config (known after apply) + + + guest_accelerator (known after apply) + + + network_interface { + + internal_ipv6_prefix_length = (known after apply) + + ipv6_access_type = (known after apply) + + ipv6_address = (known after apply) + + name = (known after apply) + + network = "default" + + network_ip = (known after apply) + + stack_type = (known after apply) + + subnetwork = (known after apply) + + subnetwork_project = (known after apply) + + + access_config { + + nat_ip = (known after apply) + + network_tier = (known after apply) + } + } + + + reservation_affinity (known after apply) + + + scheduling (known after apply) + } + +Plan: 2 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + instance_public_ip = (known after apply) + + ssh_command = (known after apply) + +──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── + +Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run +"terraform apply" now. \ No newline at end of file diff --git a/terraform/main.tf b/terraform/main.tf new file mode 100644 index 0000000000..79c5b2c9c4 --- /dev/null +++ b/terraform/main.tf @@ -0,0 +1,57 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google" + version = "6.8.0" + } + } +} + +provider "google" { + project = var.project_id + region = var.region + zone = var.zone +} + +# Firewall rules: SSH (22), HTTP (80), app port (5000) +resource "google_compute_firewall" "allow_ssh_http" { + name = "lab04-vm-firewall" + network = "default" + + allow { + protocol = "tcp" + ports = ["22", "80", "5000"] + } + + source_ranges = ["0.0.0.0/0"] + target_tags = ["lab04-vm"] +} + +# VM instance (e2-micro free tier) +resource "google_compute_instance" "vm" { + name = "lab04-vm" + machine_type = var.machine_type + zone = var.zone + + tags = ["lab04-vm"] + + boot_disk { + initialize_params { + image = "ubuntu-os-cloud/ubuntu-2204-lts" + size = 10 + } + } + + network_interface { + network = "default" + access_config {} + } + + metadata = { + ssh-keys = "ubuntu:${file(var.ssh_public_key_path)}" + } + + labels = { + lab = "lab04" + } +} diff --git a/terraform/outputs.tf b/terraform/outputs.tf new file mode 100644 index 0000000000..3e3669115b --- /dev/null +++ b/terraform/outputs.tf @@ -0,0 +1,9 @@ +output "instance_public_ip" { + description = "Public IP address of the VM" + value = google_compute_instance.vm.network_interface[0].access_config[0].nat_ip +} + +output "ssh_command" { + description = "SSH command to connect to the VM" + value = "ssh ubuntu@${google_compute_instance.vm.network_interface[0].access_config[0].nat_ip}" +} diff --git a/terraform/variables.tf b/terraform/variables.tf new file mode 100644 index 0000000000..4f4b62f659 --- /dev/null +++ b/terraform/variables.tf @@ -0,0 +1,27 @@ +variable "project_id" { + description = "GCP project ID" + type = string +} + +variable "region" { + description = "GCP region" + type = string + default = "us-central1" +} + +variable "zone" { + description = "GCP zone" + type = string + default = "us-central1-a" +} + +variable "machine_type" { + description = "Machine type (e2-micro for free tier)" + type = string + default = "e2-micro" +} + +variable "ssh_public_key_path" { + description = "Path to SSH public key file" + type = string +}