diff --git a/.github/workflows/ansible-deploy.yml b/.github/workflows/ansible-deploy.yml new file mode 100644 index 0000000000..e9abe948ed --- /dev/null +++ b/.github/workflows/ansible-deploy.yml @@ -0,0 +1,89 @@ +name: Ansible Deployment + +on: + push: + branches: [main, master] + paths: + - 'ansible/**' + - '!ansible/docs/**' + - '.github/workflows/ansible-deploy.yml' + pull_request: + branches: [main, master] + paths: + - 'ansible/**' + - '!ansible/docs/**' + +jobs: + lint: + name: Ansible Lint + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install dependencies + run: | + pip install ansible ansible-lint + + - name: Run ansible-lint + env: + ANSIBLE_VAULT_PASSWORD: ${{ secrets.ANSIBLE_VAULT_PASSWORD }} + run: | + cd ansible + echo "$ANSIBLE_VAULT_PASSWORD" > .vault_pass + ansible-lint playbooks/*.yml + rm -f .vault_pass + + deploy: + name: Deploy Application + needs: lint + if: github.event_name == 'push' + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install Ansible + run: pip install ansible + + - name: Setup SSH + run: | + mkdir -p ~/.ssh + echo "${{ secrets.SSH_PRIVATE_KEY }}" > ~/.ssh/id_rsa + chmod 600 ~/.ssh/id_rsa + ssh-keyscan -H ${{ secrets.VM_HOST }} >> ~/.ssh/known_hosts 2>/dev/null || true + + - name: Create vault password file + run: echo "${{ secrets.ANSIBLE_VAULT_PASSWORD }}" > /tmp/vault_pass + + - name: Update inventory with CI host + run: | + cd ansible + echo "[webservers]" > inventory/hosts.ini + echo "myvm ansible_host=${{ secrets.VM_HOST }} ansible_port=${{ secrets.VM_PORT }} ansible_user=${{ secrets.VM_USER }} ansible_ssh_private_key_file=~/.ssh/id_rsa ansible_ssh_common_args='-o StrictHostKeyChecking=no'" >> inventory/hosts.ini + + - name: Run Ansible playbook + run: | + cd ansible + ansible-playbook playbooks/deploy.yml \ + --vault-password-file /tmp/vault_pass + + - name: Verify Deployment + run: | + sleep 10 + curl -f http://${{ secrets.VM_HOST }}:5000/health || exit 1 + + - name: Cleanup secrets + if: always() + run: | + rm -f /tmp/vault_pass ~/.ssh/id_rsa diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml new file mode 100644 index 0000000000..f62168ba5f --- /dev/null +++ b/.github/workflows/python-ci.yml @@ -0,0 +1,109 @@ +name: Python CI/CD + +on: + push: + branches: ["main", "master", "lab03"] + paths: + - "app_python/**" + - ".github/workflows/python-ci.yml" + pull_request: + branches: ["main", "master"] + paths: + - "app_python/**" + - ".github/workflows/python-ci.yml" + workflow_dispatch: + +concurrency: + group: python-ci-${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +env: + IMAGE_NAME: devops-info-service + PYTHON_VERSION: "3.11" + +jobs: + test: + runs-on: ubuntu-latest + timeout-minutes: 15 + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + cache: "pip" + cache-dependency-path: | + app_python/requirements.txt + app_python/requirements-dev.txt + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r app_python/requirements.txt -r app_python/requirements-dev.txt + + - name: Lint (ruff) + run: ruff check app_python + + - name: Run tests + env: + PYTHONPATH: app_python + run: | + pytest app_python/tests \ + --cov=app_python \ + --cov-report=term-missing \ + --cov-report=xml \ + --cov-fail-under=70 + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v5 + with: + files: ./coverage.xml + token: ${{ secrets.CODECOV_TOKEN }} + fail_ci_if_error: false + verbose: true + + - name: Snyk scan + if: ${{ env.SNYK_TOKEN != '' }} + uses: snyk/actions/python@v1 + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + with: + args: --severity-threshold=high --file=app_python/requirements.txt + + docker: + needs: test + runs-on: ubuntu-latest + timeout-minutes: 20 + if: > + github.event_name == 'workflow_dispatch' || + (github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master' || github.ref == 'refs/heads/lab03')) + steps: + - uses: actions/checkout@v4 + + - name: Set version (CalVer) + run: echo "VERSION=$(date -u +'%Y.%m.%d')" >> $GITHUB_ENV + + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push image + uses: docker/build-push-action@v6 + with: + context: app_python + file: app_python/Dockerfile + push: true + cache-from: type=gha + cache-to: type=gha,mode=max + tags: | + ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.IMAGE_NAME }}:${{ env.VERSION }} + ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.IMAGE_NAME }}:latest diff --git a/.github/workflows/terraform-ci.yml b/.github/workflows/terraform-ci.yml new file mode 100644 index 0000000000..3cdc884bbb --- /dev/null +++ b/.github/workflows/terraform-ci.yml @@ -0,0 +1,50 @@ +name: Terraform CI + +on: + pull_request: + paths: + - 'terraform/**' + - '.github/workflows/terraform-ci.yml' + push: + branches: [main] + paths: + - 'terraform/**' + - '.github/workflows/terraform-ci.yml' + +jobs: + validate: + name: Validate Terraform + runs-on: ubuntu-latest + + defaults: + run: + working-directory: terraform/ + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: "1.9.0" + + - name: Terraform Format Check + run: terraform fmt -check -recursive + + - name: Terraform Init + run: terraform init -backend=false + + - name: Terraform Validate + run: terraform validate + + - name: Setup TFLint + uses: terraform-linters/setup-tflint@v4 + with: + tflint_version: latest + + - name: Init TFLint + run: tflint --init + + - name: Run TFLint + run: tflint --format compact diff --git a/.gitignore b/.gitignore index 30d74d2584..10d5f97e5d 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,28 @@ -test \ No newline at end of file +test +/.venv +.coverage* +coverage.xml + +# Terraform +*.tfstate +*.tfstate.* +.terraform/ +.terraform.lock.hcl +terraform.tfvars +*.tfvars +crash.log +*.pem +*.key +*.box +*.ova + +# Pulumi +.pulumi/ +__pycache__/ +venv/ +.pulumi-cache/ + +# Ansible +*.retry +.vault_pass +ansible/inventory/*.pyc diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000000..df748dcfbf --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,4 @@ +{ + "python-envs.defaultEnvManager": "ms-python.python:system", + "python-envs.pythonProjects": [] +} \ No newline at end of file diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000000..452b4f5ff6 --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,12 @@ +[defaults] +inventory = inventory/hosts.ini +roles_path = roles +host_key_checking = False +remote_user = vagrant +retry_files_enabled = False +vault_password_file = .vault_pass + +[privilege_escalation] +become = True +become_method = sudo +become_user = root diff --git a/ansible/docs/LAB05.md b/ansible/docs/LAB05.md new file mode 100644 index 0000000000..8e4c9f3bae --- /dev/null +++ b/ansible/docs/LAB05.md @@ -0,0 +1,257 @@ +# Lab 05 — Ansible Fundamentals + +## 1. Architecture Overview + +**Ansible version:** 2.16+ +**Target VM:** Ubuntu 22.04 LTS (local VirtualBox VM from Lab 04, created via Pulumi) +**Connection:** SSH via NAT port forwarding (`127.0.0.1:2223`, user `vagrant`) + +### Role Structure + +``` +ansible/ +├── ansible.cfg # Ansible configuration +├── inventory/ +│ └── hosts.ini # Static inventory (VM connection details) +├── roles/ +│ ├── common/ # System setup: apt update, essential packages +│ │ ├── tasks/main.yml +│ │ └── defaults/main.yml +│ ├── docker/ # Docker CE installation and configuration +│ │ ├── tasks/main.yml +│ │ ├── handlers/main.yml +│ │ └── defaults/main.yml +│ └── app_deploy/ # Application deployment via Docker +│ ├── tasks/main.yml +│ ├── handlers/main.yml +│ └── defaults/main.yml +├── playbooks/ +│ ├── site.yml # Master playbook (provision + deploy) +│ ├── provision.yml # System provisioning (common + docker) +│ └── deploy.yml # App deployment +├── group_vars/ +│ └── all.yml # Encrypted variables (Ansible Vault) +└── docs/ + └── LAB05.md # This documentation +``` + +**Why roles instead of monolithic playbooks?** +Roles provide reusability, modularity, and clear separation of concerns. Each role handles one specific responsibility (system setup, Docker, app deploy) and can be tested or reused independently. + +--- + +## 2. Roles Documentation + +### Role: `common` + +- **Purpose:** Update apt cache and install essential system packages. +- **Variables:** `common_packages` — list of packages to install (python3-pip, curl, git, vim, htop, etc.). +- **Handlers:** None. +- **Dependencies:** None. + +### Role: `docker` + +- **Purpose:** Install Docker CE from the official repository, enable the service, and add user to the docker group. +- **Variables:** `docker_user` — user to add to the docker group (default: `vagrant`). +- **Handlers:** `restart docker` — triggered when Docker packages are installed or configuration changes. +- **Dependencies:** None (but should run after `common`). + +### Role: `app_deploy` + +- **Purpose:** Pull and run the Python application container from Docker Hub. +- **Variables:** `app_port`, `app_restart_policy` (defaults), plus vaulted variables: `dockerhub_username`, `dockerhub_password`, `docker_image`, `docker_image_tag`, `app_container_name`. +- **Handlers:** `restart app container` — restarts the application container if needed. +- **Dependencies:** Requires Docker to be installed (role `docker`). + +--- + +## 3. Idempotency Demonstration + +### First Run (`ansible-playbook playbooks/provision.yml`) + +``` +PLAY [Provision web servers] ************************************************** + +TASK [Gathering Facts] ******************************************************** +ok: [myvm] + +TASK [common : Update apt cache] ********************************************** +changed: [myvm] + +TASK [common : Install common packages] *************************************** +changed: [myvm] + +TASK [docker : Add Docker GPG key] ******************************************** +changed: [myvm] + +TASK [docker : Add Docker repository] ***************************************** +changed: [myvm] + +TASK [docker : Install Docker CE packages] ************************************ +changed: [myvm] + +TASK [docker : Ensure Docker service is started and enabled] ****************** +ok: [myvm] + +TASK [docker : Add vagrant user to docker group] ****************************** +ok: [myvm] + +TASK [docker : Install python3-docker] **************************************** +ok: [myvm] + +RUNNING HANDLER [docker : restart docker] ************************************* +changed: [myvm] + +PLAY RECAP ******************************************************************** +myvm : ok=12 changed=4 unreachable=0 failed=0 skipped=0 +``` + +Many tasks show **"changed"** (yellow) — packages installed, Docker repo added, service started. + +### Second Run (`ansible-playbook playbooks/provision.yml`) + +``` +PLAY [Provision web servers] ************************************************** + +TASK [Gathering Facts] ******************************************************** +ok: [myvm] + +TASK [common : Update apt cache] ********************************************** +ok: [myvm] + +TASK [common : Install common packages] *************************************** +ok: [myvm] + +TASK [docker : Add Docker GPG key] ******************************************** +ok: [myvm] + +TASK [docker : Add Docker repository] ***************************************** +ok: [myvm] + +TASK [docker : Install Docker CE packages] ************************************ +ok: [myvm] + +TASK [docker : Ensure Docker service is started and enabled] ****************** +ok: [myvm] + +TASK [docker : Add vagrant user to docker group] ****************************** +ok: [myvm] + +TASK [docker : Install python3-docker] **************************************** +ok: [myvm] + +PLAY RECAP ******************************************************************** +myvm : ok=11 changed=0 unreachable=0 failed=0 skipped=0 +``` + +All tasks show **"ok"** (green), zero "changed". This proves idempotency. + +### Analysis + +- **First run:** apt cache updated, packages installed, Docker GPG key added, Docker repo configured, Docker service started, user added to docker group — all new changes. +- **Second run:** All desired states already achieved. Ansible detects no drift, makes no changes. +- **What makes roles idempotent:** Using declarative modules like `apt: state=present`, `service: state=started`, `user: groups=docker append=yes` — they check current state before acting. + +--- + +## 4. Ansible Vault Usage + +Credentials are stored in `group_vars/all.yml`, encrypted with Ansible Vault. + +**How credentials are stored:** The file contains DockerHub username and access token, encrypted at rest. + +**Vault password management:** Password is entered interactively via `--ask-vault-pass`, or stored in `.vault_pass` (excluded from git via `.gitignore`). + +**Encrypted file example:** +``` +$ANSIBLE_VAULT;1.1;AES256 +31396664316237616632386465333739343530653266616435656233653337656365656164346233 +3632633136386562653139376639393739313962626461620a633563366631343438633739653732 +... +``` + +**Why Ansible Vault is important:** It prevents plaintext secrets from being committed to version control. Credentials remain encrypted and are only decrypted in memory during playbook execution. + +--- + +## 5. Deployment Verification + +### Deploy Run (`ansible-playbook playbooks/deploy.yml --vault-password-file .vault_pass`) + +``` +PLAY [Deploy application] ***************************************************** + +TASK [Gathering Facts] ******************************************************** +ok: [myvm] + +TASK [app_deploy : Log in to Docker Hub] ************************************** +changed: [myvm] + +TASK [app_deploy : Pull Docker image] ***************************************** +changed: [myvm] + +TASK [app_deploy : Remove old container (if exists)] ************************** +ok: [myvm] + +TASK [app_deploy : Run application container] ********************************* +changed: [myvm] + +TASK [app_deploy : Wait for application to be ready] ************************** +ok: [myvm] + +TASK [app_deploy : Verify health endpoint] ************************************ +ok: [myvm] + +TASK [app_deploy : Show health check result] ********************************** +ok: [myvm] => { + "health_result.json": { + "status": "healthy", + "timestamp": "2026-02-20T20:14:30.313Z", + "uptime_seconds": 3 + } +} + +PLAY RECAP ******************************************************************** +myvm : ok=8 changed=3 unreachable=0 failed=0 skipped=0 +``` + +### Container Status (`docker ps`) + +``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +4fdc191f5d76 vladimirzhidkov/devops-info-service:latest "python app.py" 4 minutes ago Up 4 minutes 0.0.0.0:5000->5000/tcp devops-info-service +``` + +### Health Check + +``` +$ curl http://localhost:5000/health +{"status":"healthy","timestamp":"2026-02-20T20:18:43.889Z","uptime_seconds":257} +``` + +--- + +## 6. Key Decisions + +- **Why use roles instead of plain playbooks?** + Roles separate concerns, making each component independently testable and reusable across projects. + +- **How do roles improve reusability?** + A role like `docker` can be dropped into any project that needs Docker. Variables in `defaults/` allow customization without modifying role code. + +- **What makes a task idempotent?** + Using declarative state-based modules (`state: present`, `state: started`) instead of imperative commands. Ansible checks current state before making changes. + +- **How do handlers improve efficiency?** + Handlers only run when notified by a changed task, and only once at the end of the play. This prevents unnecessary service restarts. + +- **Why is Ansible Vault necessary?** + Secrets (passwords, tokens) must not be stored in plaintext in version control. Vault encrypts them, allowing safe commits while keeping secrets accessible during execution. + +--- + +## 7. Challenges + +- Ansible does not run natively on Windows — used WSL2 as the control node. +- VM uses password-based SSH — required `sshpass` package and `ansible_password` in inventory. +- NAT port forwarding means using `127.0.0.1:2223` instead of a direct IP. diff --git a/ansible/docs/LAB06.md b/ansible/docs/LAB06.md new file mode 100644 index 0000000000..7a1a078a81 --- /dev/null +++ b/ansible/docs/LAB06.md @@ -0,0 +1,260 @@ +# Lab 6: Advanced Ansible & CI/CD + +**Name:** Vladimir Zhidkov +**Date:** 2026-02-20 +**Lab Points:** 10 + +--- + +## Task 1: Blocks & Tags (2 pts) + +### Block Usage + +All roles refactored with blocks for logical grouping and error handling. + +#### `common` role + +- **Package block** (`tags: packages`): Groups apt cache update and package installation. Rescue block runs `apt-get update --fix-missing` on failure. Always block logs completion to `/tmp/ansible_common_done.log`. + +#### `docker` role + +- **Install block** (`tags: docker_install`): Groups all Docker installation tasks (prerequisites, GPG key, repo, packages). Rescue block waits 10 seconds and retries on failure. +- **Config block** (`tags: docker_config`): Groups service start, user group, python3-docker. Always block ensures Docker service is enabled. + +#### `web_app` role + +- **Deploy block** (`tags: app_deploy, compose`): Groups Docker login, compose template, pull, deploy, health check. Rescue block logs failure details and fails the play. + +### Tag Strategy + +| Tag | Scope | Description | +|-----|-------|-------------| +| `packages` | common | Package installation | +| `common` | common role | Entire common role | +| `docker` | docker role | Entire docker role | +| `docker_install` | docker | Docker installation tasks | +| `docker_config` | docker | Docker configuration tasks | +| `app_deploy` | web_app | Deployment tasks | +| `compose` | web_app | Docker Compose tasks | +| `web_app_wipe` | web_app | Wipe/cleanup tasks | + +### Tag Execution Examples + +```bash +# Run only docker installation +ansible-playbook playbooks/provision.yml --tags "docker_install" + +# Skip common role +ansible-playbook playbooks/provision.yml --skip-tags "common" + +# List all tags +ansible-playbook playbooks/provision.yml --list-tags +``` + +### Evidence + +![Tags and selective execution](../../screenshots/lab06/task1.png) + +### Research Answers + +- **What happens if rescue block also fails?** The play fails entirely. Ansible does not have a "rescue of rescue" — the always block still runs though. +- **Can you have nested blocks?** Yes, blocks can be nested within other blocks for more granular error handling. +- **How do tags inherit to tasks within blocks?** Tags applied at block level are inherited by all tasks inside the block. Tasks can also have their own additional tags. + +--- + +## Task 2: Docker Compose (3 pts) + +### Migration from `docker run` to Docker Compose + +Renamed `app_deploy` → `web_app` role. Replaced `community.docker.docker_container` module with Docker Compose template + `docker compose` CLI. + +### Template Structure + +**`roles/web_app/templates/docker-compose.yml.j2`:** +```yaml +version: '3.8' + +services: + {{ app_name }}: + image: {{ docker_image }}:{{ docker_image_tag }} + container_name: {{ app_name }} + ports: + - "{{ app_port }}:{{ app_internal_port }}" + restart: {{ app_restart_policy }} + environment: + APP_NAME: "{{ app_name }}" +``` + +### Role Dependencies + +**`roles/web_app/meta/main.yml`** declares `docker` as a dependency, so running only `deploy.yml` automatically ensures Docker is installed first. + +### Before/After Comparison + +| Aspect | Before (Lab 5) | After (Lab 6) | +|--------|----------------|----------------| +| Deployment | `docker run` via community.docker | Docker Compose template | +| Config | Ansible variables inline | `docker-compose.yml.j2` template | +| Management | Individual docker commands | `docker compose up/down` | +| Error handling | None | Block/rescue/always | +| Tags | None | `app_deploy`, `compose` | +| Wipe logic | None | `web_app_wipe` variable + tag | + +### Evidence + +![Docker Compose deployment and verification](../../screenshots/lab06/task2.png) + +--- + +## Task 3: Wipe Logic (1 pt) + +### Implementation + +Wipe logic uses **double gating** — both a variable (`web_app_wipe: true`) AND a tag (`web_app_wipe`) must be active for wipe to execute. + +**`roles/web_app/tasks/wipe.yml`** performs: +1. `docker compose down --remove-orphans` +2. Remove docker-compose.yml +3. Remove application directory +4. Remove Docker image (optional) + +### Test Scenarios + +**Scenario 1: Normal deployment** — wipe does NOT run (tag not specified, variable false by default). + +**Scenario 2: Wipe only:** +```bash +ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" --tags web_app_wipe +``` +Result: App removed, no redeployment. + +**Scenario 3: Clean reinstallation:** +```bash +ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" +``` +Result: Wipe runs first, then fresh deployment. + +**Scenario 4: Tag without variable:** +```bash +ansible-playbook playbooks/deploy.yml --tags web_app_wipe +``` +Result: Wipe tasks are included but skipped (`when: web_app_wipe | bool` is false). + +### Research Answers + +1. **Why use both variable AND tag?** Double safety — variable prevents accidental execution even if tag is specified, and tag prevents wipe from running during normal deploys. +2. **Difference from `never` tag?** The `never` tag requires `--tags never` to run, while this approach allows combining wipe with deployment (clean reinstall scenario). +3. **Why wipe before deployment?** Enables clean reinstallation workflow: remove old → install new, all in one playbook run. +4. **Clean reinstall vs rolling update?** Clean reinstall ensures no leftover state; rolling update is faster but may carry forward old configs. +5. **Extending wipe to include volumes?** Add `docker volume prune -f` or target specific volumes in the wipe block. + +### Evidence + +![Wipe logic test scenarios](../../screenshots/lab06/task3.png) + +--- + +## Task 4: CI/CD (3 pts) + +### Workflow Architecture + +**`.github/workflows/ansible-deploy.yml`:** + +``` +Push to ansible/** → Lint Job → Deploy Job → Verify +``` + +**Lint job:** Installs `ansible-lint`, checks all playbooks for best practices. + +**Deploy job:** Configures SSH, creates vault password from GitHub Secret, runs `ansible-playbook deploy.yml`, verifies health endpoint. + +### GitHub Secrets Required + +| Secret | Purpose | +|--------|---------| +| `ANSIBLE_VAULT_PASSWORD` | Decrypt vault-encrypted variables | +| `SSH_PRIVATE_KEY` | SSH access to target VM | +| `VM_HOST` | Target VM IP address | +| `VM_PORT` | SSH port (e.g., 2223) | +| `VM_USER` | SSH username | + +### Path Filters + +Workflow triggers only on changes to `ansible/**` (excluding `ansible/docs/**`), preventing unnecessary runs on documentation changes. + +### Security + +- Vault password stored in GitHub Secrets (never in code) +- SSH key cleaned up in `always` block +- Temporary files removed after use + +### Research Answers + +1. **Security of SSH keys in GitHub Secrets:** Encrypted at rest, only available to workflows in the repo. Risk: anyone with push access can exfiltrate them via workflow. Mitigate with branch protection and required reviews. +2. **Staging → production pipeline:** Add environments in GitHub Actions with separate secrets, require manual approval for production. +3. **Rollbacks:** Tag Docker images with commit SHA, keep previous image; add rollback playbook that deploys previous tag. +4. **Self-hosted vs GitHub-hosted:** Self-hosted has direct network access (no SSH needed), secrets don't leave infrastructure, but requires maintenance. + +--- + +## Task 5: Documentation + +This file serves as the complete Lab 6 documentation. + +### File Structure After Lab 6 + +``` +ansible/ +├── ansible.cfg +├── inventory/ +│ └── hosts.ini +├── roles/ +│ ├── common/ +│ │ ├── tasks/main.yml # Refactored with blocks & tags +│ │ └── defaults/main.yml +│ ├── docker/ +│ │ ├── tasks/main.yml # Refactored with blocks & tags +│ │ ├── handlers/main.yml +│ │ └── defaults/main.yml +│ └── web_app/ # Renamed from app_deploy +│ ├── tasks/ +│ │ ├── main.yml # Docker Compose deployment +│ │ └── wipe.yml # Wipe logic +│ ├── handlers/main.yml +│ ├── defaults/main.yml +│ ├── templates/ +│ │ └── docker-compose.yml.j2 +│ └── meta/main.yml # Role dependencies +├── playbooks/ +│ ├── site.yml +│ ├── provision.yml # Tags: common, docker +│ └── deploy.yml # Tags: app_deploy, web_app_wipe +├── group_vars/ +│ └── all.yml # Ansible Vault encrypted +└── docs/ + ├── LAB05.md + └── LAB06.md +.github/ +└── workflows/ + └── ansible-deploy.yml # CI/CD pipeline +``` + +--- + +## Challenges & Solutions + +- **Rename `app_deploy` → `web_app`**: Required updating all playbook role references. +- **Docker Compose on Ubuntu 22.04**: Used `docker-compose-plugin` (v2) instead of standalone `docker-compose` (v1). Commands use `docker compose` (space, not hyphen). +- **Wipe logic safety**: Implemented double gating (variable + tag) to prevent accidental data loss. +- **CI/CD SSH access**: GitHub-hosted runners need SSH key + host scanning; self-hosted runners have direct access. + +--- + +## Summary + +- Refactored all 3 roles with blocks, rescue/always, and comprehensive tag strategy +- Migrated from `docker run` to Docker Compose with Jinja2 templating +- Implemented role dependencies (`web_app` depends on `docker`) +- Created double-gated wipe logic for safe cleanup +- Built CI/CD pipeline with ansible-lint + automated deployment diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml new file mode 100644 index 0000000000..5210850403 --- /dev/null +++ b/ansible/group_vars/all.yml @@ -0,0 +1,22 @@ +$ANSIBLE_VAULT;1.1;AES256 +31396664316237616632386465333739343530653266616435656233653337656365656164346233 +3632633136386562653139376639393739313962626461620a633563366631343438633739653732 +35643934646339356264646364613930363735383333336436616264613335653532613337323361 +3332336533656433330a656164613332396339643736636237623938646665636137323061383734 +33373065336366376531363563383562353732656238306430633338646531626330646262383138 +62663439646532633435623865363835343865386637373464323336343234376639383435663732 +30353435386533646264383862626332663935383666333964656461336132623932326139323935 +61373534616166633862643835373737326165333734323536323261343532646434353333336666 +63373566353738643065393065643466333436633536643164646161396564613361333933343637 +64383039626364633437353436383361303062363166613537333264646261336133623361666463 +63656633613237373230613537383265373837333233316637623735333536323133353236653534 +36343131333866303234353436396239396265306262653239396436306662333861303637363362 +34643962356662646337306562613164636263613266376533333934383766393131636531636264 +30656164626166663061626131323139336333613964346361353765306137663032643631646361 +31623364373835326664633534363333353339323031366334343363333931633934363739316438 +62376339366238653231303137353764386131316632653661656236663363626362663961366365 +35393966386264326631353630363138386265386332346233363834356530316235353036643363 +34653265396439373463386533303165363534306539306535343764623630323533353437373535 +39643935336430666337323932646539366338376362356138616631346631303566373234653262 +34353839356564393732306339616238393333393330306432383364316665613439616633646233 +3731 diff --git a/ansible/inventory/hosts.ini b/ansible/inventory/hosts.ini new file mode 100644 index 0000000000..f0362364d6 --- /dev/null +++ b/ansible/inventory/hosts.ini @@ -0,0 +1,2 @@ +[webservers] +myvm ansible_host=127.0.0.1 ansible_port=2223 ansible_user=vagrant ansible_password=vagrant ansible_ssh_common_args='-o StrictHostKeyChecking=no' diff --git a/ansible/playbooks/deploy.yml b/ansible/playbooks/deploy.yml new file mode 100644 index 0000000000..1288a42727 --- /dev/null +++ b/ansible/playbooks/deploy.yml @@ -0,0 +1,13 @@ +--- +- name: Deploy application + hosts: webservers + become: true + vars_files: + - "../group_vars/all.yml" + + roles: + - role: web_app + tags: + - app_deploy + - compose + - web_app_wipe diff --git a/ansible/playbooks/provision.yml b/ansible/playbooks/provision.yml new file mode 100644 index 0000000000..f7250a373a --- /dev/null +++ b/ansible/playbooks/provision.yml @@ -0,0 +1,17 @@ +--- +- name: Provision web servers + hosts: webservers + become: true + vars_files: + - "../group_vars/all.yml" + + roles: + - role: common + tags: + - common + - packages + - role: docker + tags: + - docker + - docker_install + - docker_config diff --git a/ansible/playbooks/site.yml b/ansible/playbooks/site.yml new file mode 100644 index 0000000000..cc3d0bf1a2 --- /dev/null +++ b/ansible/playbooks/site.yml @@ -0,0 +1,5 @@ +--- +- name: Provision infrastructure + ansible.builtin.import_playbook: provision.yml +- name: Deploy application + ansible.builtin.import_playbook: deploy.yml diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml new file mode 100644 index 0000000000..89be9574dc --- /dev/null +++ b/ansible/roles/common/defaults/main.yml @@ -0,0 +1,12 @@ +--- +common_packages: + - python3-pip + - curl + - git + - vim + - htop + - ca-certificates + - gnupg + - lsb-release + - apt-transport-https + - software-properties-common diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml new file mode 100644 index 0000000000..a70886ad7c --- /dev/null +++ b/ansible/roles/common/tasks/main.yml @@ -0,0 +1,34 @@ +--- +# Package installation block with error handling +- name: Install system packages + become: true + tags: + - packages + block: + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + + - name: Install common packages + ansible.builtin.apt: + name: "{{ common_packages }}" + state: present + + rescue: + - name: Fix apt cache on failure + ansible.builtin.apt: + update_cache: true + changed_when: true + + - name: Retry package installation + ansible.builtin.apt: + name: "{{ common_packages }}" + state: present + + always: + - name: Log package installation completion + ansible.builtin.copy: + content: "Common packages provisioned at {{ ansible_date_time.iso8601 }}\n" + dest: /tmp/ansible_common_done.log + mode: '0644' diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml new file mode 100644 index 0000000000..61a71be1e3 --- /dev/null +++ b/ansible/roles/docker/defaults/main.yml @@ -0,0 +1,2 @@ +--- +docker_user: vagrant diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml new file mode 100644 index 0000000000..55637bda17 --- /dev/null +++ b/ansible/roles/docker/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Restart docker + ansible.builtin.service: + name: docker + state: restarted diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml new file mode 100644 index 0000000000..f8d7af0698 --- /dev/null +++ b/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,92 @@ +--- +# Docker installation block with error handling +- name: Install Docker Engine + become: true + tags: + - docker_install + block: + - name: Install prerequisites for Docker repository + ansible.builtin.apt: + name: + - ca-certificates + - curl + - gnupg + state: present + + - name: Create keyrings directory + ansible.builtin.file: + path: /etc/apt/keyrings + state: directory + mode: '0755' + + - name: Add Docker GPG key + ansible.builtin.apt_key: + url: https://download.docker.com/linux/ubuntu/gpg + state: present + + - name: Add Docker repository + ansible.builtin.apt_repository: + repo: "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable" + state: present + filename: docker + + - name: Install Docker packages + ansible.builtin.apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin + state: present + update_cache: true + notify: Restart docker + + rescue: + - name: Wait before retrying Docker installation + ansible.builtin.pause: + seconds: 10 + + - name: Retry apt update after failure + ansible.builtin.apt: + update_cache: true + + - name: Retry Docker packages installation + ansible.builtin.apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin + state: present + notify: Restart docker + +# Docker configuration block +- name: Configure Docker + become: true + tags: + - docker_config + block: + - name: Ensure Docker service is started and enabled + ansible.builtin.service: + name: docker + state: started + enabled: true + + - name: Add user to docker group + ansible.builtin.user: + name: "{{ docker_user }}" + groups: docker + append: true + + - name: Install python3-docker for Ansible docker modules + ansible.builtin.apt: + name: python3-docker + state: present + + always: + - name: Ensure Docker service is enabled + ansible.builtin.service: + name: docker + enabled: true diff --git a/ansible/roles/web_app/defaults/main.yml b/ansible/roles/web_app/defaults/main.yml new file mode 100644 index 0000000000..fa6b790b08 --- /dev/null +++ b/ansible/roles/web_app/defaults/main.yml @@ -0,0 +1,15 @@ +--- +# Application Configuration +web_app_name: devops-info-service +web_app_port: 5000 +web_app_internal_port: 5000 +web_app_restart_policy: unless-stopped + +# Docker Compose +web_app_compose_project_dir: "/opt/{{ web_app_name }}" + +# Wipe Logic Control +# Set to true to remove application completely +# Wipe only: ansible-playbook deploy.yml -e "web_app_wipe=true" --tags web_app_wipe +# Clean install: ansible-playbook deploy.yml -e "web_app_wipe=true" +web_app_wipe: false diff --git a/ansible/roles/web_app/handlers/main.yml b/ansible/roles/web_app/handlers/main.yml new file mode 100644 index 0000000000..228f9ad6fb --- /dev/null +++ b/ansible/roles/web_app/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: Restart app compose + ansible.builtin.command: + cmd: docker compose up -d --force-recreate + chdir: "{{ web_app_compose_project_dir }}" + changed_when: true diff --git a/ansible/roles/web_app/meta/main.yml b/ansible/roles/web_app/meta/main.yml new file mode 100644 index 0000000000..8afff1479c --- /dev/null +++ b/ansible/roles/web_app/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: docker diff --git a/ansible/roles/web_app/tasks/main.yml b/ansible/roles/web_app/tasks/main.yml new file mode 100644 index 0000000000..fb225bc12e --- /dev/null +++ b/ansible/roles/web_app/tasks/main.yml @@ -0,0 +1,74 @@ +--- +# Wipe logic (runs first when explicitly requested) +- name: Include wipe tasks + ansible.builtin.include_tasks: wipe.yml + tags: + - web_app_wipe + +# Deploy application with Docker Compose +- name: Deploy application with Docker Compose + tags: + - app_deploy + - compose + block: + - name: Log in to Docker Hub + ansible.builtin.shell: + cmd: set -o pipefail && echo "$DHPASS" | docker login --username "$DHUSER" --password-stdin + executable: /bin/bash + environment: + DHUSER: "{{ dockerhub_username }}" + DHPASS: "{{ dockerhub_password }}" + no_log: true + changed_when: true + + - name: Create application directory + ansible.builtin.file: + path: "{{ web_app_compose_project_dir }}" + state: directory + mode: '0755' + + - name: Template docker-compose file + ansible.builtin.template: + src: docker-compose.yml.j2 + dest: "{{ web_app_compose_project_dir }}/docker-compose.yml" + mode: '0644' + notify: Restart app compose + + - name: Pull Docker image + ansible.builtin.command: + cmd: docker compose pull + chdir: "{{ web_app_compose_project_dir }}" + register: web_app_pull_result + changed_when: "'Downloaded' in web_app_pull_result.stderr or 'Pull complete' in web_app_pull_result.stderr" + + - name: Deploy with docker compose + ansible.builtin.command: + cmd: docker compose up -d + chdir: "{{ web_app_compose_project_dir }}" + register: web_app_compose_result + changed_when: "'Started' in web_app_compose_result.stderr or 'Creating' in web_app_compose_result.stderr" + + - name: Wait for application to be ready + ansible.builtin.wait_for: + port: "{{ web_app_port }}" + delay: 3 + timeout: 30 + + - name: Verify health endpoint + ansible.builtin.uri: + url: "http://localhost:{{ web_app_port }}/health" + status_code: 200 + register: web_app_health_result + + - name: Show health check result + ansible.builtin.debug: + var: web_app_health_result.json + + rescue: + - name: Log deployment failure + ansible.builtin.debug: + msg: "Deployment of {{ web_app_name }} failed. Check logs with: docker compose -f {{ web_app_compose_project_dir }}/docker-compose.yml logs" + + - name: Fail the play + ansible.builtin.fail: + msg: "Application deployment failed" diff --git a/ansible/roles/web_app/tasks/wipe.yml b/ansible/roles/web_app/tasks/wipe.yml new file mode 100644 index 0000000000..8f013750db --- /dev/null +++ b/ansible/roles/web_app/tasks/wipe.yml @@ -0,0 +1,33 @@ +--- +# Wipe web application +- name: Wipe web application + when: web_app_wipe | bool + tags: + - web_app_wipe + block: + - name: Stop and remove containers via docker compose + ansible.builtin.command: + cmd: docker compose down --remove-orphans + chdir: "{{ web_app_compose_project_dir }}" + failed_when: false + changed_when: true + + - name: Remove docker-compose file + ansible.builtin.file: + path: "{{ web_app_compose_project_dir }}/docker-compose.yml" + state: absent + + - name: Remove application directory + ansible.builtin.file: + path: "{{ web_app_compose_project_dir }}" + state: absent + + - name: Remove Docker image + ansible.builtin.command: + cmd: "docker rmi {{ docker_image }}:{{ docker_image_tag }}" + failed_when: false + changed_when: true + + - name: Log wipe completion + ansible.builtin.debug: + msg: "Application {{ web_app_name }} wiped successfully" diff --git a/ansible/roles/web_app/templates/docker-compose.yml.j2 b/ansible/roles/web_app/templates/docker-compose.yml.j2 new file mode 100644 index 0000000000..ca859ddf3c --- /dev/null +++ b/ansible/roles/web_app/templates/docker-compose.yml.j2 @@ -0,0 +1,15 @@ +--- +services: + {{ web_app_name }}: + image: {{ docker_image }}:{{ docker_image_tag }} + container_name: {{ web_app_name }} + ports: + - "{{ web_app_port }}:{{ web_app_internal_port }}" + restart: {{ web_app_restart_policy }} + environment: + APP_NAME: "{{ web_app_name }}" +{% if web_app_env_vars is defined %} +{% for key, value in web_app_env_vars.items() %} + {{ key }}: "{{ value }}" +{% endfor %} +{% endif %} diff --git a/app_java/README.md b/app_java/README.md new file mode 100644 index 0000000000..ae80418c1f --- /dev/null +++ b/app_java/README.md @@ -0,0 +1,51 @@ +# DevOps Info Service (Java / Spring Boot) + +## Overview + +This is the compiled-language version of the DevOps Info Service implemented with Spring Boot. It mirrors the Python API and prepares the project for multi-stage Docker builds. + +## Prerequisites + +- Java 21+ +- Maven 3.9+ (for build and run commands) + +## Build and Run + +From the `app_java` directory: + +```bash +mvn spring-boot:run +``` + +Or build a runnable JAR: + +```bash +mvn clean package +java -jar target/devops-info-service-1.0.0.jar +``` + +## Configuration + +Environment variables are mapped in `src/main/resources/application.properties`: + +| Variable | Default | Description | +| --- | --- | --- | +| `HOST` | `0.0.0.0` | Host interface to bind (`server.address`) | +| `PORT` | `8080` | Port to listen on (`server.port`) | + +Examples: + +```bash +PORT=9090 mvn spring-boot:run +HOST=127.0.0.1 PORT=3000 mvn spring-boot:run +``` + +## API Endpoints + +- `GET /` - Service and system information +- `GET /health` - Health check + +## Notes on Schema Parity + +The lab requires the same JSON structure as the Python version. To keep schema parity, the `python_version` field is still present but contains the Java runtime version (for example, `java-21`). + diff --git a/app_java/docs/Java.md b/app_java/docs/Java.md new file mode 100644 index 0000000000..350560e6e5 --- /dev/null +++ b/app_java/docs/Java.md @@ -0,0 +1,10 @@ +# Why Java and Spring Boot for the Compiled Version + +Spring Boot is a practical choice for DevOps-oriented services: +- It is widely used in industry and integrates well with enterprise tooling. +- It offers strong defaults for web APIs, JSON serialization, and configuration. +- It scales from small labs to production-ready services without rewrites. +- It works naturally with Docker and Kubernetes (health checks, ports, env vars). + +For this lab, Spring Boot keeps the code clear while still being realistic. + diff --git a/app_java/docs/LAB01.md b/app_java/docs/LAB01.md new file mode 100644 index 0000000000..b11fc64de1 --- /dev/null +++ b/app_java/docs/LAB01.md @@ -0,0 +1,55 @@ +# Lab 01 - DevOps Info Service (Java / Spring Boot) + +## Implementation Notes + +The compiled-language version is implemented with Spring Boot and mirrors the Python API: +- `GET /` +- `GET /health` + +Key implementation files: +- `app_java/src/main/java/com/devopsinfo/DevopsInfoServiceApplication.java` +- `app_java/src/main/java/com/devopsinfo/api/InfoController.java` +- `app_java/src/main/java/com/devopsinfo/service/InfoService.java` +- `app_java/src/main/java/com/devopsinfo/api/RestExceptionHandler.java` + +## Configuration + +Environment variables are wired through `application.properties`: + +```properties +server.address=${HOST:0.0.0.0} +server.port=${PORT:8080} +``` + +This preserves the lab requirement to configure the app via `HOST` and `PORT`. + +## Build and Run + +From the `app_java` directory: + +```bash +mvn spring-boot:run +curl http://127.0.0.1:8080/ +curl http://127.0.0.1:8080/health +``` + +Or build a runnable JAR: + +```bash +mvn clean package +java -jar target/devops-info-service-1.0.0.jar +``` + +## Schema Parity with Python + +The lab asks for the same JSON structure as the Python service. To keep parity, the `python_version` field is still present but contains the Java runtime version (for example, `java-21`). + +## Screenshots + +Screenshots directory: +- `app_java/docs/screenshots/01-main-endpoint.png` +- `app_java/docs/screenshots/02-health-check.png` +- `app_java/docs/screenshots/03-formatted-output.png` + +Replace the placeholder images with real screenshots from your environment. + diff --git a/app_java/docs/screenshots/01-main-endpoint.png b/app_java/docs/screenshots/01-main-endpoint.png new file mode 100644 index 0000000000..39210020c0 Binary files /dev/null and b/app_java/docs/screenshots/01-main-endpoint.png differ diff --git a/app_java/docs/screenshots/02-health-check.png b/app_java/docs/screenshots/02-health-check.png new file mode 100644 index 0000000000..51ce4e8c3a Binary files /dev/null and b/app_java/docs/screenshots/02-health-check.png differ diff --git a/app_java/docs/screenshots/03-formatted-output.png b/app_java/docs/screenshots/03-formatted-output.png new file mode 100644 index 0000000000..597757af1d Binary files /dev/null and b/app_java/docs/screenshots/03-formatted-output.png differ diff --git a/app_java/pom.xml b/app_java/pom.xml new file mode 100644 index 0000000000..cfc5be0196 --- /dev/null +++ b/app_java/pom.xml @@ -0,0 +1,40 @@ + + + 4.0.0 + + + org.springframework.boot + spring-boot-starter-parent + 3.3.0 + + + + com.devopsinfo + devops-info-service + 1.0.0 + devops-info-service + DevOps course info service (Spring Boot) + + + 21 + + + + + org.springframework.boot + spring-boot-starter-web + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + + diff --git a/app_java/src/main/java/com/devopsinfo/DevopsInfoServiceApplication.java b/app_java/src/main/java/com/devopsinfo/DevopsInfoServiceApplication.java new file mode 100644 index 0000000000..77de372f97 --- /dev/null +++ b/app_java/src/main/java/com/devopsinfo/DevopsInfoServiceApplication.java @@ -0,0 +1,13 @@ +package com.devopsinfo; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class DevopsInfoServiceApplication { + + public static void main(String[] args) { + SpringApplication.run(DevopsInfoServiceApplication.class, args); + } +} + diff --git a/app_java/src/main/java/com/devopsinfo/api/InfoController.java b/app_java/src/main/java/com/devopsinfo/api/InfoController.java new file mode 100644 index 0000000000..f435b544f6 --- /dev/null +++ b/app_java/src/main/java/com/devopsinfo/api/InfoController.java @@ -0,0 +1,29 @@ +package com.devopsinfo.api; + +import com.devopsinfo.api.dto.HealthResponse; +import com.devopsinfo.api.dto.InfoResponse; +import com.devopsinfo.service.InfoService; +import jakarta.servlet.http.HttpServletRequest; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RestController; + +@RestController +public class InfoController { + + private final InfoService infoService; + + public InfoController(InfoService infoService) { + this.infoService = infoService; + } + + @GetMapping("/") + public InfoResponse index(HttpServletRequest request) { + return infoService.buildInfoResponse(request); + } + + @GetMapping("/health") + public HealthResponse health() { + return infoService.buildHealthResponse(); + } +} + diff --git a/app_java/src/main/java/com/devopsinfo/api/RequestLoggingFilter.java b/app_java/src/main/java/com/devopsinfo/api/RequestLoggingFilter.java new file mode 100644 index 0000000000..dc99564830 --- /dev/null +++ b/app_java/src/main/java/com/devopsinfo/api/RequestLoggingFilter.java @@ -0,0 +1,33 @@ +package com.devopsinfo.api; + +import jakarta.servlet.FilterChain; +import jakarta.servlet.ServletException; +import jakarta.servlet.http.HttpServletRequest; +import jakarta.servlet.http.HttpServletResponse; +import java.io.IOException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.stereotype.Component; +import org.springframework.web.filter.OncePerRequestFilter; + +@Component +public class RequestLoggingFilter extends OncePerRequestFilter { + + private static final Logger log = LoggerFactory.getLogger(RequestLoggingFilter.class); + + @Override + protected void doFilterInternal( + HttpServletRequest request, + HttpServletResponse response, + FilterChain filterChain + ) throws ServletException, IOException { + String remoteAddr = request.getRemoteAddr(); + if (remoteAddr == null || remoteAddr.isBlank()) { + remoteAddr = "unknown"; + } + + log.info("Request received: {} {} from {}", request.getMethod(), request.getRequestURI(), remoteAddr); + filterChain.doFilter(request, response); + } +} + diff --git a/app_java/src/main/java/com/devopsinfo/api/RestExceptionHandler.java b/app_java/src/main/java/com/devopsinfo/api/RestExceptionHandler.java new file mode 100644 index 0000000000..f573b11d01 --- /dev/null +++ b/app_java/src/main/java/com/devopsinfo/api/RestExceptionHandler.java @@ -0,0 +1,44 @@ +package com.devopsinfo.api; + +import jakarta.servlet.http.HttpServletRequest; +import java.util.Map; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.http.HttpStatus; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.ExceptionHandler; +import org.springframework.web.bind.annotation.RestControllerAdvice; +import org.springframework.web.servlet.NoHandlerFoundException; + +@RestControllerAdvice +public class RestExceptionHandler { + + private static final Logger log = LoggerFactory.getLogger(RestExceptionHandler.class); + + @ExceptionHandler(NoHandlerFoundException.class) + public ResponseEntity> handleNotFound( + HttpServletRequest request, + NoHandlerFoundException ex + ) { + Map body = Map.of( + "error", "Not Found", + "message", "Endpoint does not exist", + "path", request.getRequestURI() + ); + return ResponseEntity.status(HttpStatus.NOT_FOUND).body(body); + } + + @ExceptionHandler(Exception.class) + public ResponseEntity> handleUnexpectedError( + HttpServletRequest request, + Exception ex + ) { + log.error("Unhandled exception on {} {}: {}", request.getMethod(), request.getRequestURI(), ex.getMessage(), ex); + Map body = Map.of( + "error", "Internal Server Error", + "message", "An unexpected error occurred" + ); + return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(body); + } +} + diff --git a/app_java/src/main/java/com/devopsinfo/api/dto/EndpointInfo.java b/app_java/src/main/java/com/devopsinfo/api/dto/EndpointInfo.java new file mode 100644 index 0000000000..2094a9c331 --- /dev/null +++ b/app_java/src/main/java/com/devopsinfo/api/dto/EndpointInfo.java @@ -0,0 +1,9 @@ +package com.devopsinfo.api.dto; + +public record EndpointInfo( + String path, + String method, + String description +) { +} + diff --git a/app_java/src/main/java/com/devopsinfo/api/dto/HealthResponse.java b/app_java/src/main/java/com/devopsinfo/api/dto/HealthResponse.java new file mode 100644 index 0000000000..ab878212da --- /dev/null +++ b/app_java/src/main/java/com/devopsinfo/api/dto/HealthResponse.java @@ -0,0 +1,12 @@ +package com.devopsinfo.api.dto; + +import com.fasterxml.jackson.annotation.JsonProperty; + +public record HealthResponse( + String status, + String timestamp, + @JsonProperty("uptime_seconds") + long uptimeSeconds +) { +} + diff --git a/app_java/src/main/java/com/devopsinfo/api/dto/InfoResponse.java b/app_java/src/main/java/com/devopsinfo/api/dto/InfoResponse.java new file mode 100644 index 0000000000..83111c0168 --- /dev/null +++ b/app_java/src/main/java/com/devopsinfo/api/dto/InfoResponse.java @@ -0,0 +1,13 @@ +package com.devopsinfo.api.dto; + +import java.util.List; + +public record InfoResponse( + ServiceInfo service, + SystemInfo system, + RuntimeInfo runtime, + RequestInfo request, + List endpoints +) { +} + diff --git a/app_java/src/main/java/com/devopsinfo/api/dto/RequestInfo.java b/app_java/src/main/java/com/devopsinfo/api/dto/RequestInfo.java new file mode 100644 index 0000000000..949d7876f2 --- /dev/null +++ b/app_java/src/main/java/com/devopsinfo/api/dto/RequestInfo.java @@ -0,0 +1,14 @@ +package com.devopsinfo.api.dto; + +import com.fasterxml.jackson.annotation.JsonProperty; + +public record RequestInfo( + @JsonProperty("client_ip") + String clientIp, + @JsonProperty("user_agent") + String userAgent, + String method, + String path +) { +} + diff --git a/app_java/src/main/java/com/devopsinfo/api/dto/RuntimeInfo.java b/app_java/src/main/java/com/devopsinfo/api/dto/RuntimeInfo.java new file mode 100644 index 0000000000..04318ef8c8 --- /dev/null +++ b/app_java/src/main/java/com/devopsinfo/api/dto/RuntimeInfo.java @@ -0,0 +1,15 @@ +package com.devopsinfo.api.dto; + +import com.fasterxml.jackson.annotation.JsonProperty; + +public record RuntimeInfo( + @JsonProperty("uptime_seconds") + long uptimeSeconds, + @JsonProperty("uptime_human") + String uptimeHuman, + @JsonProperty("current_time") + String currentTime, + String timezone +) { +} + diff --git a/app_java/src/main/java/com/devopsinfo/api/dto/ServiceInfo.java b/app_java/src/main/java/com/devopsinfo/api/dto/ServiceInfo.java new file mode 100644 index 0000000000..0ed6f64db6 --- /dev/null +++ b/app_java/src/main/java/com/devopsinfo/api/dto/ServiceInfo.java @@ -0,0 +1,10 @@ +package com.devopsinfo.api.dto; + +public record ServiceInfo( + String name, + String version, + String description, + String framework +) { +} + diff --git a/app_java/src/main/java/com/devopsinfo/api/dto/SystemInfo.java b/app_java/src/main/java/com/devopsinfo/api/dto/SystemInfo.java new file mode 100644 index 0000000000..5996d9e358 --- /dev/null +++ b/app_java/src/main/java/com/devopsinfo/api/dto/SystemInfo.java @@ -0,0 +1,17 @@ +package com.devopsinfo.api.dto; + +import com.fasterxml.jackson.annotation.JsonProperty; + +public record SystemInfo( + String hostname, + String platform, + @JsonProperty("platform_version") + String platformVersion, + String architecture, + @JsonProperty("cpu_count") + int cpuCount, + @JsonProperty("python_version") + String pythonVersion +) { +} + diff --git a/app_java/src/main/java/com/devopsinfo/service/InfoService.java b/app_java/src/main/java/com/devopsinfo/service/InfoService.java new file mode 100644 index 0000000000..0580ac1495 --- /dev/null +++ b/app_java/src/main/java/com/devopsinfo/service/InfoService.java @@ -0,0 +1,163 @@ +package com.devopsinfo.service; + +import com.devopsinfo.api.dto.EndpointInfo; +import com.devopsinfo.api.dto.HealthResponse; +import com.devopsinfo.api.dto.InfoResponse; +import com.devopsinfo.api.dto.RequestInfo; +import com.devopsinfo.api.dto.RuntimeInfo; +import com.devopsinfo.api.dto.ServiceInfo; +import com.devopsinfo.api.dto.SystemInfo; +import jakarta.servlet.http.HttpServletRequest; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.time.Duration; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.time.temporal.ChronoUnit; +import java.util.List; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.stereotype.Service; + +@Service +public class InfoService { + + private static final Logger log = LoggerFactory.getLogger(InfoService.class); + + private static final Instant START_TIME = Instant.now(); + private static final DateTimeFormatter ISO_UTC_MILLIS = + DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSX").withZone(ZoneOffset.UTC); + + private static final ServiceInfo SERVICE_INFO = new ServiceInfo( + "devops-info-service", + "1.0.0", + "DevOps course info service", + "Spring Boot" + ); + + private static final List ENDPOINTS = List.of( + new EndpointInfo("/", "GET", "Service information"), + new EndpointInfo("/health", "GET", "Health check") + ); + + public InfoResponse buildInfoResponse(HttpServletRequest request) { + return new InfoResponse( + SERVICE_INFO, + getSystemInfo(), + getRuntimeInfo(), + getRequestInfo(request), + ENDPOINTS + ); + } + + public HealthResponse buildHealthResponse() { + return new HealthResponse( + "healthy", + isoUtcNow(), + getUptimeSeconds() + ); + } + + private SystemInfo getSystemInfo() { + String hostname = resolveHostname(); + String platform = System.getProperty("os.name", "unknown"); + String platformVersion = System.getProperty("os.version", "unknown"); + String architecture = System.getProperty("os.arch", "unknown"); + int cpuCount = Runtime.getRuntime().availableProcessors(); + String javaVersion = System.getProperty("java.version", "unknown"); + + // Keep the Python-shaped field name from the lab for schema parity. + String pythonVersion = "java-" + javaVersion; + + return new SystemInfo( + hostname, + platform, + platformVersion, + architecture, + cpuCount, + pythonVersion + ); + } + + private RuntimeInfo getRuntimeInfo() { + long uptimeSeconds = getUptimeSeconds(); + String uptimeHuman = formatUptime(uptimeSeconds); + String timezone = ZoneId.systemDefault().getId(); + + return new RuntimeInfo( + uptimeSeconds, + uptimeHuman, + isoUtcNow(), + timezone + ); + } + + private RequestInfo getRequestInfo(HttpServletRequest request) { + String clientIp = resolveClientIp(request); + String userAgent = headerOrDefault(request, "User-Agent", "unknown"); + String method = request.getMethod(); + String path = request.getRequestURI(); + + return new RequestInfo(clientIp, userAgent, method, path); + } + + private String resolveHostname() { + try { + return InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException ex) { + log.warn("Unable to resolve hostname: {}", ex.getMessage()); + return "unknown"; + } + } + + private long getUptimeSeconds() { + long seconds = Duration.between(START_TIME, Instant.now()).getSeconds(); + return Math.max(seconds, 0); + } + + private String isoUtcNow() { + Instant now = Instant.now().truncatedTo(ChronoUnit.MILLIS); + return ISO_UTC_MILLIS.format(now); + } + + private String resolveClientIp(HttpServletRequest request) { + String forwardedFor = request.getHeader("X-Forwarded-For"); + if (forwardedFor != null && !forwardedFor.isBlank()) { + String[] parts = forwardedFor.split(","); + if (parts.length > 0) { + return parts[0].trim(); + } + } + + String remoteAddr = request.getRemoteAddr(); + return (remoteAddr == null || remoteAddr.isBlank()) ? "unknown" : remoteAddr; + } + + private String headerOrDefault(HttpServletRequest request, String name, String fallback) { + String value = request.getHeader(name); + return (value == null || value.isBlank()) ? fallback : value; + } + + private String formatUptime(long seconds) { + long days = seconds / 86_400; + long remainder = seconds % 86_400; + long hours = remainder / 3_600; + remainder = remainder % 3_600; + long minutes = remainder / 60; + long secs = remainder % 60; + + StringBuilder sb = new StringBuilder(); + if (days > 0) { + sb.append(days).append(days == 1 ? " day, " : " days, "); + } + if (hours > 0 || days > 0) { + sb.append(hours).append(hours == 1 ? " hour, " : " hours, "); + } + sb.append(minutes).append(minutes == 1 ? " minute, " : " minutes, "); + sb.append(secs).append(secs == 1 ? " second" : " seconds"); + return sb.toString(); + } +} + diff --git a/app_java/src/main/resources/application.properties b/app_java/src/main/resources/application.properties new file mode 100644 index 0000000000..6f82a5355d --- /dev/null +++ b/app_java/src/main/resources/application.properties @@ -0,0 +1,7 @@ +server.address=${HOST:0.0.0.0} +server.port=${PORT:8080} + +# Make 404s throw NoHandlerFoundException so we can return JSON consistently. +spring.mvc.throw-exception-if-no-handler-found=true +spring.web.resources.add-mappings=false + diff --git a/app_java/target/classes/application.properties b/app_java/target/classes/application.properties new file mode 100644 index 0000000000..6f82a5355d --- /dev/null +++ b/app_java/target/classes/application.properties @@ -0,0 +1,7 @@ +server.address=${HOST:0.0.0.0} +server.port=${PORT:8080} + +# Make 404s throw NoHandlerFoundException so we can return JSON consistently. +spring.mvc.throw-exception-if-no-handler-found=true +spring.web.resources.add-mappings=false + diff --git a/app_java/target/classes/com/devopsinfo/DevopsInfoServiceApplication.class b/app_java/target/classes/com/devopsinfo/DevopsInfoServiceApplication.class new file mode 100644 index 0000000000..829a7735a4 Binary files /dev/null and b/app_java/target/classes/com/devopsinfo/DevopsInfoServiceApplication.class differ diff --git a/app_java/target/classes/com/devopsinfo/api/InfoController.class b/app_java/target/classes/com/devopsinfo/api/InfoController.class new file mode 100644 index 0000000000..5d6baf6100 Binary files /dev/null and b/app_java/target/classes/com/devopsinfo/api/InfoController.class differ diff --git a/app_java/target/classes/com/devopsinfo/api/RequestLoggingFilter.class b/app_java/target/classes/com/devopsinfo/api/RequestLoggingFilter.class new file mode 100644 index 0000000000..48be2b8530 Binary files /dev/null and b/app_java/target/classes/com/devopsinfo/api/RequestLoggingFilter.class differ diff --git a/app_java/target/classes/com/devopsinfo/api/RestExceptionHandler.class b/app_java/target/classes/com/devopsinfo/api/RestExceptionHandler.class new file mode 100644 index 0000000000..6a9bb4fe5a Binary files /dev/null and b/app_java/target/classes/com/devopsinfo/api/RestExceptionHandler.class differ diff --git a/app_java/target/classes/com/devopsinfo/api/dto/EndpointInfo.class b/app_java/target/classes/com/devopsinfo/api/dto/EndpointInfo.class new file mode 100644 index 0000000000..0e5a19bef0 Binary files /dev/null and b/app_java/target/classes/com/devopsinfo/api/dto/EndpointInfo.class differ diff --git a/app_java/target/classes/com/devopsinfo/api/dto/HealthResponse.class b/app_java/target/classes/com/devopsinfo/api/dto/HealthResponse.class new file mode 100644 index 0000000000..00ac80c804 Binary files /dev/null and b/app_java/target/classes/com/devopsinfo/api/dto/HealthResponse.class differ diff --git a/app_java/target/classes/com/devopsinfo/api/dto/InfoResponse.class b/app_java/target/classes/com/devopsinfo/api/dto/InfoResponse.class new file mode 100644 index 0000000000..db85f9dd6b Binary files /dev/null and b/app_java/target/classes/com/devopsinfo/api/dto/InfoResponse.class differ diff --git a/app_java/target/classes/com/devopsinfo/api/dto/RequestInfo.class b/app_java/target/classes/com/devopsinfo/api/dto/RequestInfo.class new file mode 100644 index 0000000000..9948e70271 Binary files /dev/null and b/app_java/target/classes/com/devopsinfo/api/dto/RequestInfo.class differ diff --git a/app_java/target/classes/com/devopsinfo/api/dto/RuntimeInfo.class b/app_java/target/classes/com/devopsinfo/api/dto/RuntimeInfo.class new file mode 100644 index 0000000000..9ddc81e970 Binary files /dev/null and b/app_java/target/classes/com/devopsinfo/api/dto/RuntimeInfo.class differ diff --git a/app_java/target/classes/com/devopsinfo/api/dto/ServiceInfo.class b/app_java/target/classes/com/devopsinfo/api/dto/ServiceInfo.class new file mode 100644 index 0000000000..079e188c00 Binary files /dev/null and b/app_java/target/classes/com/devopsinfo/api/dto/ServiceInfo.class differ diff --git a/app_java/target/classes/com/devopsinfo/api/dto/SystemInfo.class b/app_java/target/classes/com/devopsinfo/api/dto/SystemInfo.class new file mode 100644 index 0000000000..781a4406fe Binary files /dev/null and b/app_java/target/classes/com/devopsinfo/api/dto/SystemInfo.class differ diff --git a/app_java/target/classes/com/devopsinfo/service/InfoService.class b/app_java/target/classes/com/devopsinfo/service/InfoService.class new file mode 100644 index 0000000000..cc8a82a28f Binary files /dev/null and b/app_java/target/classes/com/devopsinfo/service/InfoService.class differ diff --git a/app_python/.dockerignore b/app_python/.dockerignore new file mode 100644 index 0000000000..af9befe4e1 --- /dev/null +++ b/app_python/.dockerignore @@ -0,0 +1,20 @@ +__pycache__/ +*.pyc +*.pyo +*.pyd +*.log +.pytest_cache/ +.mypy_cache/ +.coverage +.env +.env.* +.venv/ +venv/ +env/ +.idea/ +.vscode/ +.git/ +.gitignore +tests/ +docs/ +*.md diff --git a/app_python/.gitignore b/app_python/.gitignore new file mode 100644 index 0000000000..5602565a82 --- /dev/null +++ b/app_python/.gitignore @@ -0,0 +1,24 @@ +# Python +__pycache__/ +*.py[cod] +*.pyo +*.pyd +.python-version +venv/ +.venv/ +env/ +*.log + +# Testing and coverage +.pytest_cache/ +.coverage +htmlcov/ + +# IDEs and editors +.vscode/ +.idea/ + +# OS artifacts +.DS_Store +Thumbs.db + diff --git a/app_python/Dockerfile b/app_python/Dockerfile new file mode 100644 index 0000000000..59e155991b --- /dev/null +++ b/app_python/Dockerfile @@ -0,0 +1,21 @@ +FROM python:3.13-slim + +ENV PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 \ + PIP_DISABLE_PIP_VERSION_CHECK=1 \ + PIP_NO_CACHE_DIR=1 + +WORKDIR /app + +RUN addgroup --system app && adduser --system --ingroup app --home /home/app app + +COPY requirements.txt ./ +RUN pip install --no-cache-dir -r requirements.txt + +COPY --chown=app:app app.py ./ + +USER app + +EXPOSE 5000 + +CMD ["python", "app.py"] diff --git a/app_python/README.md b/app_python/README.md new file mode 100644 index 0000000000..7db5af6989 --- /dev/null +++ b/app_python/README.md @@ -0,0 +1,138 @@ +# DevOps Info Service (Python / FastAPI) + +[![Python CI](https://github.com/Vlad1mirZhidkov/DevOps-Core-Course/actions/workflows/python-ci.yml/badge.svg)](https://github.com/Vlad1mirZhidkov/DevOps-Core-Course/actions/workflows/python-ci.yml) +[![Coverage](https://codecov.io/gh/Vlad1mirZhidkov/DevOps-Core-Course/graph/badge.svg)](https://codecov.io/gh/Vlad1mirZhidkov/DevOps-Core-Course) + +## Overview + +DevOps Info Service is a small web API that reports: +- Service metadata +- System information +- Runtime health and uptime +- Basic request details + +It is designed as a foundation for later labs (Docker, CI/CD, monitoring, and Kubernetes). + +## Prerequisites + +- Python 3.11 or newer +- `pip` (usually bundled with Python) + +## Installation + +Create a virtual environment and install dependencies: + +```bash +python -m venv venv +``` + +Activate it: + +```bash +# Linux / macOS +source venv/bin/activate + +# Windows PowerShell +venv\Scripts\Activate.ps1 +``` + +Install requirements: + +```bash +pip install -r requirements.txt +``` + +## Running the Application + +You can run via `python app.py` (it starts Uvicorn internally): + +```bash +python app.py +``` + +Or run Uvicorn directly: + +```bash +# From the app_python directory +uvicorn app:app --host 0.0.0.0 --port 5000 + +# From the repository root +uvicorn app_python.app:app --host 0.0.0.0 --port 5000 +``` + +Run with custom configuration: + +```bash +# Bash-style +PORT=8080 python app.py + +# Windows PowerShell +$env:PORT=8080 +python app.py +``` + +Try the endpoints: + +```bash +curl http://127.0.0.1:5000/ +curl http://127.0.0.1:5000/health +``` + +## Testing + +Install dev dependencies: + +```bash +pip install -r requirements.txt -r requirements-dev.txt +``` + +Run tests: + +```bash +# From the repository root +pytest app_python/tests + +# From the app_python directory +pytest tests +``` + +Optional coverage report: + +```bash +pytest app_python/tests --cov=app_python --cov-report=term-missing --cov-report=xml --cov-fail-under=70 +``` + +## Docker + +Build the image locally (run from `app_python`): + +```bash +docker build -t : . +``` + +Run a container with port mapping: + +```bash +docker run --rm -p :5000 -e PORT=5000 : +``` + +Pull from Docker Hub: + +```bash +docker pull /: +``` + +## API Endpoints + +- `GET /` - Service and system information +- `GET /health` - Health check for probes and monitoring + +## Configuration + +The service is configured through environment variables: + +| Variable | Default | Description | +| --- | --- | --- | +| `HOST` | `0.0.0.0` | Host interface to bind | +| `PORT` | `5000` | Port to listen on | +| `DEBUG` | `false` | Enable debug-level logging | diff --git a/app_python/app.py b/app_python/app.py new file mode 100644 index 0000000000..6e4793998f --- /dev/null +++ b/app_python/app.py @@ -0,0 +1,212 @@ +""" +DevOps Info Service - Lab 01 + +Production-minded FastAPI application that exposes runtime and system details. +""" + +from __future__ import annotations + +import logging +import os +import platform +import socket +import time +from datetime import datetime, timezone +from typing import Any, Dict, Tuple + +import uvicorn +from fastapi import FastAPI, Request +from fastapi.exception_handlers import http_exception_handler +from fastapi.responses import JSONResponse +from starlette.exceptions import HTTPException as StarletteHTTPException + + +def _configure_logging() -> logging.Logger: + """Configure application-wide logging and return a module logger.""" + logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", + ) + return logging.getLogger(__name__) + + +logger = _configure_logging() + + +def _get_env_bool(name: str, default: bool) -> bool: + """Read a boolean environment variable with a safe default.""" + raw_value = os.getenv(name) + if raw_value is None: + return default + return raw_value.strip().lower() in {"1", "true", "yes", "on"} + + +def _get_env_port(default: int) -> int: + """Read the PORT environment variable safely.""" + raw_value = os.getenv("PORT") + if not raw_value: + return default + try: + return int(raw_value) + except ValueError: + logger.warning("Invalid PORT value '%s'. Falling back to %s.", raw_value, default) + return default + + +HOST: str = os.getenv("HOST", "0.0.0.0") +PORT: int = _get_env_port(default=5000) +DEBUG: bool = _get_env_bool("DEBUG", default=False) + +SERVICE_INFO: Dict[str, str] = { + "name": "devops-info-service", + "version": "1.0.0", + "description": "DevOps course info service", + "framework": "FastAPI", +} + +ENDPOINTS = [ + {"path": "/", "method": "GET", "description": "Service information"}, + {"path": "/health", "method": "GET", "description": "Health check"}, +] + +START_TIME: datetime = datetime.now(timezone.utc) + +app = FastAPI( + title="DevOps Info Service", + version=SERVICE_INFO["version"], + description=SERVICE_INFO["description"], +) + + +def _iso_utc_now() -> str: + """Return the current time in ISO 8601 format with a UTC 'Z' suffix.""" + now_utc = datetime.now(timezone.utc) + return now_utc.isoformat(timespec="milliseconds").replace("+00:00", "Z") + + +def _format_uptime(seconds: int) -> str: + """Return a human-readable uptime string.""" + days, remainder = divmod(seconds, 86_400) + hours, remainder = divmod(remainder, 3_600) + minutes, secs = divmod(remainder, 60) + + parts = [] + if days: + parts.append(f"{days} day{'s' if days != 1 else ''}") + if hours or days: + parts.append(f"{hours} hour{'s' if hours != 1 else ''}") + parts.append(f"{minutes} minute{'s' if minutes != 1 else ''}") + parts.append(f"{secs} second{'s' if secs != 1 else ''}") + return ", ".join(parts) + + +def get_uptime() -> Tuple[int, str]: + """Return uptime in seconds and a human-readable string.""" + delta = datetime.now(timezone.utc) - START_TIME + seconds = max(int(delta.total_seconds()), 0) + return seconds, _format_uptime(seconds) + + +def get_system_info() -> Dict[str, Any]: + """Collect system information about the runtime environment.""" + return { + "hostname": socket.gethostname(), + "platform": platform.system(), + "platform_version": platform.version(), + "architecture": platform.machine(), + "cpu_count": os.cpu_count() or 0, + "python_version": platform.python_version(), + } + + +def get_runtime_info() -> Dict[str, Any]: + """Collect runtime information such as uptime and timezone.""" + uptime_seconds, uptime_human = get_uptime() + local_tz = datetime.now().astimezone().tzinfo + return { + "uptime_seconds": uptime_seconds, + "uptime_human": uptime_human, + "current_time": _iso_utc_now(), + "timezone": str(local_tz) if local_tz else time.tzname[0], + } + + +def get_request_info(request: Request) -> Dict[str, Any]: + """Collect request-specific details useful for debugging and observability.""" + forwarded_for = request.headers.get("X-Forwarded-For", "") + client_host = request.client.host if request.client else "" + client_ip = forwarded_for.split(",")[0].strip() if forwarded_for else client_host + return { + "client_ip": client_ip or "unknown", + "user_agent": request.headers.get("User-Agent", "unknown"), + "method": request.method, + "path": request.url.path, + } + + +@app.middleware("http") +async def log_request(request: Request, call_next): + """Log incoming requests with basic metadata.""" + client_ip = request.client.host if request.client else "unknown" + logger.info("Request received: %s %s from %s", request.method, request.url.path, client_ip) + response = await call_next(request) + return response + + +@app.get("/", summary="Service information") +async def index(request: Request): + """Main endpoint returning service, system, runtime, and request info.""" + response = { + "service": SERVICE_INFO, + "system": get_system_info(), + "runtime": get_runtime_info(), + "request": get_request_info(request), + "endpoints": ENDPOINTS, + } + return response + + +@app.get("/health", summary="Health check") +async def health(): + """Health endpoint suitable for probes and monitoring.""" + uptime_seconds, _ = get_uptime() + payload = { + "status": "healthy", + "timestamp": _iso_utc_now(), + "uptime_seconds": uptime_seconds, + } + return payload + + +@app.exception_handler(StarletteHTTPException) +async def handle_http_exception(request: Request, exc: StarletteHTTPException): + """Return a JSON 404 response while preserving default handling for others.""" + if exc.status_code == 404: + return JSONResponse( + status_code=404, + content={ + "error": "Not Found", + "message": "Endpoint does not exist", + "path": request.url.path, + }, + ) + return await http_exception_handler(request, exc) + + +@app.exception_handler(Exception) +async def handle_unexpected_error(request: Request, exc: Exception): + """Return a JSON 500 response.""" + logger.exception("Unhandled exception on %s %s: %s", request.method, request.url.path, exc) + return JSONResponse( + status_code=500, + content={ + "error": "Internal Server Error", + "message": "An unexpected error occurred", + }, + ) + + +if __name__ == "__main__": + log_level = "debug" if DEBUG else "info" + logger.info("Starting DevOps Info Service on %s:%s (debug=%s)", HOST, PORT, DEBUG) + uvicorn.run(app, host=HOST, port=PORT, log_level=log_level) diff --git a/app_python/docs/LAB01.md b/app_python/docs/LAB01.md new file mode 100644 index 0000000000..34c61f0ea1 --- /dev/null +++ b/app_python/docs/LAB01.md @@ -0,0 +1,148 @@ +# Lab 01 - DevOps Info Service (Python / FastAPI) + +## 1. Framework Selection + +Chosen framework: FastAPI 0.115.0. + +FastAPI was selected because it keeps the service small while adding strong typing, built-in validation, and automatic API documentation at `/docs`. It is also a natural fit for modern DevOps services that may grow over time. + +Comparison summary: + +| Framework | Strengths | Trade-offs | Fit for Lab 01 | +| --- | --- | --- | --- | +| FastAPI | Type hints, validation, auto docs, async-ready | Requires ASGI server (Uvicorn) | Best balance of speed and future growth | +| Flask | Very simple and flexible | Less built-in structure and typing | Strong alternative for minimal APIs | +| Django | Full-featured framework with ORM and admin | Heavy for a two-endpoint service | Overkill for this lab | + +## 2. Best Practices Applied + +Key practices implemented in `app_python/app.py`: + +1. Clear structure and small functions. Examples include `get_system_info()`, `get_runtime_info()`, and `get_request_info(request)`. +2. Configuration via environment variables. Variables: `HOST`, `PORT`, and `DEBUG`, with safer parsing handled by `_get_env_port()` and `_get_env_bool()`. +3. Logging for observability. Logging is configured once in `_configure_logging()`, and requests are logged through `@app.middleware("http")`. +4. JSON error handling. A custom 404 response is implemented via `@app.exception_handler(StarletteHTTPException)`, and a 500 handler is implemented via `@app.exception_handler(Exception)`. +5. Reproducible dependencies. `app_python/requirements.txt` pins `fastapi==0.115.0` and `uvicorn[standard]==0.32.0`. +6. Clean repository hygiene. `app_python/.gitignore` excludes environments, caches, logs, and IDE files. + +Why this matters: +- Small focused functions are easier to test and reuse. +- Environment-based config is standard in DevOps workflows. +- Logging and consistent JSON errors improve debugging and monitoring. + +## 3. API Documentation + +### GET / - Service and System Information + +Example request: + +```bash +curl http://127.0.0.1:5000/ +``` + +Example response (values depend on your machine): + +```json +{ + "service": { + "name": "devops-info-service", + "version": "1.0.0", + "description": "DevOps course info service", + "framework": "FastAPI" + }, + "system": { + "hostname": "MY-PC", + "platform": "Windows", + "platform_version": "10.0.22631", + "architecture": "AMD64", + "cpu_count": 16, + "python_version": "3.11.9" + }, + "runtime": { + "uptime_seconds": 12, + "uptime_human": "0 minutes, 12 seconds", + "current_time": "2026-01-27T10:10:00.000Z", + "timezone": "Pacific Standard Time" + }, + "request": { + "client_ip": "127.0.0.1", + "user_agent": "curl/8.0.0", + "method": "GET", + "path": "/" + }, + "endpoints": [ + { + "path": "/", + "method": "GET", + "description": "Service information" + }, + { + "path": "/health", + "method": "GET", + "description": "Health check" + } + ] +} +``` + +### GET /health - Health Check + +Example request: + +```bash +curl http://127.0.0.1:5000/health +``` + +Example response: + +```json +{ + "status": "healthy", + "timestamp": "2026-01-27T10:10:05.000Z", + "uptime_seconds": 17 +} +``` + +### Pretty-Printed Output + +```bash +curl http://127.0.0.1:5000/ | python -m json.tool +``` + +## 4. Testing Evidence + +Endpoints can be verified locally by running the app and calling both routes: + +```bash +# From app_python/ +uvicorn app:app --host 0.0.0.0 --port 5000 + +curl http://127.0.0.1:5000/ +curl http://127.0.0.1:5000/health +curl http://127.0.0.1:5000/ | python -m json.tool +``` + +Alternative run command: + +```bash +python app.py +``` + +Screenshots directory: +- `app_python/docs/screenshots/lab01/01-main-endpoint.png` +- `app_python/docs/screenshots/lab01/02-health-check.png` +- `app_python/docs/screenshots/lab01/03-formatted-output.png` + +Note: placeholder PNG files were created in this environment. Replace them with real screenshots from your machine, browser, or terminal. + +## 5. Challenges and Solutions + +1. Robust environment parsing. Problem: `PORT` can be missing or invalid. Solution: `_get_env_port()` validates and falls back safely with a warning. +2. Consistent timestamps. Problem: mixed local vs UTC time formats. Solution: `_iso_utc_now()` always returns UTC with a `Z` suffix. +3. Friendly 404 responses in FastAPI. Problem: the default 404 is not in the requested lab format. Solution: a custom handler for `StarletteHTTPException` returns the required JSON for 404 while preserving default handling for other HTTP errors. + +## 6. GitHub Community + +Starring repositories helps with discovery, bookmarking, and signaling useful projects to others. Following developers (professor, TAs, and classmates) helps you learn from their activity and makes collaboration easier in team settings. + +Actions like starring and following must be done directly in GitHub. diff --git a/app_python/docs/LAB02.md b/app_python/docs/LAB02.md new file mode 100644 index 0000000000..1292f58178 --- /dev/null +++ b/app_python/docs/LAB02.md @@ -0,0 +1,123 @@ +# LAB02 - Docker Containerization (Python) + +## 1. Docker Best Practices Applied + +1) Pinned base image (`python:3.13-slim`) + - Why: predictable runtime and smaller footprint than full images. + +2) Non-root user + - Why: reduces container privilege and attack surface. + +3) Layer caching for dependencies + - `requirements.txt` is copied and installed before application code. + - Why: dependency layers are reused when only code changes. + +4) Minimal copy + .dockerignore + - Only `requirements.txt` and `app.py` are copied. + - `.dockerignore` excludes tests, docs, VCS, and virtualenvs. + - Why: smaller build context, faster builds, smaller images. + +5) Lean pip install + - `--no-cache-dir` and `PIP_DISABLE_PIP_VERSION_CHECK=1`. + - Why: avoids cache bloat and reduces noise. + +### Dockerfile snippets + +```dockerfile +FROM python:3.13-slim +``` + +Pinned slim image for smaller runtime. + +```dockerfile +COPY requirements.txt ./ +RUN pip install --no-cache-dir -r requirements.txt +``` + +Dependency install layer before code for caching. + +```dockerfile +RUN addgroup --system app && adduser --system --ingroup app --home /home/app app +COPY --chown=app:app app.py ./ +USER app +``` + +Creates and switches to a non-root user, ensuring app files are owned correctly. + +## 2. Image Information & Decisions + +- Base image: `python:3.13-slim` + - Chosen for a balance of size and compatibility with Python wheels. +- Final image size: 156MB + - Run `docker images` after build and record the size here. +- Layer structure (top to bottom): + 1. Base Python runtime + 2. Environment variables + 3. OS user creation + 4. Dependency install + 5. Application code copy + 6. Runtime user + CMD +- Optimization choices: + - Minimal base image. + - Single app file copied (no tests/docs). + - Pip cache disabled. + +## 3. Build & Run Process + +### Build + +```bash +docker build -t devops-info-service:lab02 . +``` + +![alt text](screenshots/lab02/build.png) + +### Run + +```bash +docker run --rm -p 5000:5000 devops-info-service:lab02 +``` + +![alt text](screenshots/lab02/run.png) + +### Test endpoints + +```bash +curl http://127.0.0.1:5000/ +curl http://127.0.0.1:5000/health +``` +![alt text](screenshots/lab02/curl.png) +### Docker Hub + +- Repository URL: https://hub.docker.com/r/vladimirzhidkov/devops-info-service +- Tagging strategy: `vladimirzhidkov/devops-info-service:lab02` (tag matches lab number). + +```bash +docker push vladimirzhidkov/devops-info-service:lab02 +``` + +![alt text](screenshots/lab02/push.png) + +## 4. Technical Analysis + +1) Why the Dockerfile works: + - Uses a compatible Python runtime and installs the exact dependencies. + - Runs the same `app.py` entrypoint used locally. + +2) Layer order impact: + - If you copy the app before installing dependencies, any code change + invalidates the cache and forces reinstalling packages, slowing builds. + +3) Security considerations: + - Non-root user prevents accidental privilege escalation. + - Slim base image lowers attack surface. + - No extra build tools installed in runtime image. + +4) .dockerignore impact: + - Reduces context size and speeds up Docker build. + - Prevents accidental inclusion of dev files. + +## 5. Challenges & Solutions + +- No significant issues encountered. +- Verified build, run, and endpoint checks; Docker Hub push succeeded. diff --git a/app_python/docs/LAB03.md b/app_python/docs/LAB03.md new file mode 100644 index 0000000000..888cf5bbff --- /dev/null +++ b/app_python/docs/LAB03.md @@ -0,0 +1,87 @@ +# LAB03 - Continuous Integration (Python) + +## 1. Overview + +- Testing framework: **pytest**. + - Reason: concise syntax, strong fixtures (`monkeypatch`), and mature ecosystem for API testing. +- Covered functionality: + - `GET /` success case, response schema and endpoint list. + - `GET /` request metadata behavior (`X-Forwarded-For`, `User-Agent`). + - `GET /health` success case. + - Error handling: custom `404` and generic `500` JSON responses. +- CI triggers: + - `push` and `pull_request` for `main`/`master`. + - Path filters: workflow runs only when `app_python/**` or `.github/workflows/python-ci.yml` changes. + - Manual trigger via `workflow_dispatch`. +- Versioning strategy: **CalVer (`YYYY.MM.DD`)**. + - Chosen because the service is release-frequency oriented and date tags are easy to map to deployment day. + +## 2. Workflow Evidence + +- Workflow page: + - https://github.com/Vlad1mirZhidkov/DevOps-Core-Course/actions/workflows/python-ci.yml +- Actions runs: + - https://github.com/Vlad1mirZhidkov/DevOps-Core-Course/actions +- Docker image repository: + - https://hub.docker.com/r/vladimirzhidkov/devops-info-service +- README with status badges: + - https://github.com/Vlad1mirZhidkov/DevOps-Core-Course/tree/master/app_python#readme + +Local test command: +```bash +pytest app_python/tests --cov=app_python --cov-report=term-missing --cov-report=xml --cov-fail-under=70 +``` + +Local verification result: +- `python -m ruff check app_python` -> `All checks passed!` +- `python -m pytest app_python/tests --cov=app_python --cov-report=term-missing --cov-report=xml --cov-fail-under=70` -> `6 passed`, total coverage `92.26%`. + +## 3. Best Practices Implemented + +- **Fail fast with job dependencies** + - `docker` job depends on `test`, so image build/push never happens when lint/tests fail. +- **Conditional deployment** + - Docker publish runs only for `push` events on `main`/`master`, not on pull requests. +- **Concurrency control** + - New commits cancel outdated runs on the same ref to reduce queue time and cloud spend. +- **Dependency caching** + - `actions/setup-python` pip cache is enabled with `requirements.txt` and `requirements-dev.txt` as cache keys. +- **Least-privilege permissions** + - Workflow-level `permissions: contents: read` limits token scope. +- **Docker layer caching** + - Buildx + GHA cache (`cache-from/cache-to`) reduce rebuild time when app dependencies are unchanged. +- **Security scanning** + - Snyk scan is integrated with `--severity-threshold=high` and enabled when `SNYK_TOKEN` is provided. +- **Coverage quality gate** + - `--cov-fail-under=70` prevents silent test-quality regressions. + +Caching observation approach: +- Baseline (cold cache): first run after dependency or Docker layer cache miss. +- Optimized (warm cache): subsequent run with unchanged dependency manifests. +- Compare `Install dependencies` and Docker build step durations in Actions logs. + +Snyk handling policy: +- Build fails on high/critical dependency vulnerabilities when Snyk is enabled. +- Vulnerabilities are fixed by dependency upgrades or documented risk acceptance if no fix exists. + +## 4. Key Decisions + +- **Versioning Strategy** + - CalVer was selected over SemVer to keep release automation simple and date-oriented. + - For service deployments, release date traceability is more valuable than API-change semantics. + +- **Docker Tags** + - The pipeline pushes two tags: `YYYY.MM.DD` and `latest`. + - Date tag provides immutable release reference; `latest` is convenient for quick pull/testing. + +- **Workflow Triggers** + - `push` + `pull_request` provide branch safety and pre-merge validation. + - Path filters keep monorepo CI efficient by avoiding Python pipeline runs on unrelated changes. + +- **Test Coverage** + - Coverage focuses on public API behavior and error contracts. + - Internal implementation details like logging format are intentionally not hard-asserted to reduce brittle tests. + +## 5. Challenges + +- No blockers after local environment setup; lint and tests pass with coverage above threshold. diff --git a/app_python/docs/screenshots/lab01/01-main-endpoint.png b/app_python/docs/screenshots/lab01/01-main-endpoint.png new file mode 100644 index 0000000000..8e783d9383 Binary files /dev/null and b/app_python/docs/screenshots/lab01/01-main-endpoint.png differ diff --git a/app_python/docs/screenshots/lab01/02-health-check.png b/app_python/docs/screenshots/lab01/02-health-check.png new file mode 100644 index 0000000000..f3c88c9dee Binary files /dev/null and b/app_python/docs/screenshots/lab01/02-health-check.png differ diff --git a/app_python/docs/screenshots/lab01/03-formatted-output.png b/app_python/docs/screenshots/lab01/03-formatted-output.png new file mode 100644 index 0000000000..87cd18068d Binary files /dev/null and b/app_python/docs/screenshots/lab01/03-formatted-output.png differ diff --git a/app_python/docs/screenshots/lab02/build.png b/app_python/docs/screenshots/lab02/build.png new file mode 100644 index 0000000000..2195aef6a9 Binary files /dev/null and b/app_python/docs/screenshots/lab02/build.png differ diff --git a/app_python/docs/screenshots/lab02/curl.png b/app_python/docs/screenshots/lab02/curl.png new file mode 100644 index 0000000000..e40d56b000 Binary files /dev/null and b/app_python/docs/screenshots/lab02/curl.png differ diff --git a/app_python/docs/screenshots/lab02/push.png b/app_python/docs/screenshots/lab02/push.png new file mode 100644 index 0000000000..8ebd35491a Binary files /dev/null and b/app_python/docs/screenshots/lab02/push.png differ diff --git a/app_python/docs/screenshots/lab02/run.png b/app_python/docs/screenshots/lab02/run.png new file mode 100644 index 0000000000..3ce516dbd6 Binary files /dev/null and b/app_python/docs/screenshots/lab02/run.png differ diff --git a/app_python/requirements-dev.txt b/app_python/requirements-dev.txt new file mode 100644 index 0000000000..6ecd1da5b0 --- /dev/null +++ b/app_python/requirements-dev.txt @@ -0,0 +1,5 @@ +# Testing & linting +pytest>=8.0 +pytest-cov>=4.1 +httpx>=0.26 +ruff>=0.1 diff --git a/app_python/requirements.txt b/app_python/requirements.txt new file mode 100644 index 0000000000..331a774400 --- /dev/null +++ b/app_python/requirements.txt @@ -0,0 +1,3 @@ +# Web framework +fastapi==0.115.0 +uvicorn[standard]==0.32.0 diff --git a/app_python/tests/__init__.py b/app_python/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/app_python/tests/test_app.py b/app_python/tests/test_app.py new file mode 100644 index 0000000000..8bfae6c9ca --- /dev/null +++ b/app_python/tests/test_app.py @@ -0,0 +1,108 @@ +from pathlib import Path +import sys + +from fastapi.testclient import TestClient + +APP_DIR = Path(__file__).resolve().parents[1] +if str(APP_DIR) not in sys.path: + sys.path.insert(0, str(APP_DIR)) + +import app as app_module # noqa: E402 + + +client = TestClient(app_module.app, raise_server_exceptions=False) + + +def test_root_returns_expected_structure(): + response = client.get("/") + assert response.status_code == 200 + payload = response.json() + + required_top = {"service", "system", "runtime", "request", "endpoints"} + assert required_top.issubset(payload.keys()) + + service = payload["service"] + for key in ("name", "version", "description", "framework"): + assert key in service + + system = payload["system"] + for key in ("hostname", "platform", "platform_version", "architecture", "cpu_count", "python_version"): + assert key in system + assert isinstance(system["cpu_count"], int) + + runtime = payload["runtime"] + for key in ("uptime_seconds", "uptime_human", "current_time", "timezone"): + assert key in runtime + assert isinstance(runtime["uptime_seconds"], int) + + request_info = payload["request"] + for key in ("client_ip", "user_agent", "method", "path"): + assert key in request_info + + endpoints = payload["endpoints"] + assert isinstance(endpoints, list) + endpoint_paths = {item["path"] for item in endpoints} + assert {"/", "/health"}.issubset(endpoint_paths) + + +def test_root_request_metadata_uses_forwarded_for_header(): + response = client.get( + "/", + headers={ + "X-Forwarded-For": "203.0.113.7, 10.0.0.2", + "User-Agent": "pytest-client", + }, + ) + assert response.status_code == 200 + payload = response.json() + request_info = payload["request"] + + assert request_info["client_ip"] == "203.0.113.7" + assert request_info["user_agent"] == "pytest-client" + assert request_info["method"] == "GET" + assert request_info["path"] == "/" + + +def test_health_endpoint_returns_status(): + response = client.get("/health") + assert response.status_code == 200 + payload = response.json() + + assert payload["status"] == "healthy" + assert "timestamp" in payload + assert isinstance(payload["uptime_seconds"], int) + + +def test_health_returns_500_when_runtime_fails(monkeypatch): + def _boom(): + raise RuntimeError("uptime failed") + + monkeypatch.setattr(app_module, "get_uptime", _boom) + response = client.get("/health") + assert response.status_code == 500 + payload = response.json() + + assert payload["error"] == "Internal Server Error" + assert payload["message"] == "An unexpected error occurred" + + +def test_404_returns_expected_payload(): + response = client.get("/missing-endpoint") + assert response.status_code == 404 + payload = response.json() + + assert payload["error"] == "Not Found" + assert payload["path"] == "/missing-endpoint" + + +def test_500_handler_returns_json(monkeypatch): + def _boom(): + raise RuntimeError("boom") + + monkeypatch.setattr(app_module, "get_system_info", _boom) + response = client.get("/") + assert response.status_code == 500 + payload = response.json() + + assert payload["error"] == "Internal Server Error" + assert payload["message"] == "An unexpected error occurred" diff --git a/docs/LAB04.md b/docs/LAB04.md new file mode 100644 index 0000000000..68a044b440 --- /dev/null +++ b/docs/LAB04.md @@ -0,0 +1,221 @@ +# Lab 04 — Infrastructure as Code (Terraform & Pulumi) + +## 1. Cloud Provider & Infrastructure + +**Provider:** Local VirtualBox (7.2.6) — no cloud provider used. +Reason: avoid cloud costs, meet deadline quickly, keep everything reproducible on a local machine. + +**Instance details:** + +| Parameter | Value | +|-----------|-------| +| OS | Ubuntu 22.04 LTS (bento/ubuntu-22.04 Vagrant box) | +| CPUs | 2 | +| RAM | 1 024 MB | +| Disk | ~10 GB (Vagrant box default) | +| Network | NIC 1 — NAT (SSH port-forwarded), NIC 2 — Host-only | +| Cost | $0 | + +**Resources created (per tool):** + +- Virtual machine (VBoxManage import + modifyvm + startvm) +- NAT adapter with SSH port forwarding +- Host-only adapter for direct host⟷guest access + +--- + +## 2. Terraform Implementation + +**Terraform version:** 1.14.5 +**Provider:** `hashicorp/null ~> 3.0` (null_resource + local-exec) + +> The official `terra-farm/virtualbox` provider is incompatible with +> VirtualBox 7.x (Guest Additions mismatch), so VMs are managed through +> `VBoxManage` CLI invocations wrapped in `null_resource` provisioners. + +### Project structure + +``` +terraform/ +├── main.tf # null_resource with local-exec (create + destroy) +├── variables.tf # vm_name, vm_image_url, vm_cpus, vm_memory, host_only_adapter +├── outputs.tf # vm_name, ssh_command, host_only_ip +└── .gitignore # *.tfstate, .terraform/, *.box, *.ova, terraform.tfvars +``` + +### Key configuration decisions + +1. **null_resource** — downloads the Vagrant `.box`, extracts OVF, and imports via `VBoxManage`. +2. **NAT + Host-Only dual NIC** — NAT on NIC 1 gives internet + port-forwarded SSH (`localhost:2222` → VM `22`), host-only on NIC 2 allows direct IP access. +3. **VBoxManage in `local-exec`** — all commands run locally in PowerShell. +4. **Destroy provisioner** — cleanly powers off and unregisters the VM. + +### Terminal output (key commands) + +``` +> terraform init +Initializing the backend... +Initializing provider plugins... +- Finding hashicorp/null versions matching "~> 3.0"... +- Installing hashicorp/null v3.2.4... +- Installed hashicorp/null v3.2.4 +Terraform has been successfully initialized! + +> terraform plan +Plan: 1 to add, 0 to change, 0 to destroy. + +> terraform apply -auto-approve +null_resource.ubuntu_vm: Creating... +null_resource.ubuntu_vm: Still creating... [10s elapsed] +... +null_resource.ubuntu_vm: Creation complete +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. + +Outputs: + ssh_command = "ssh -p 2222 vagrant@127.0.0.1" + vm_name = "ubuntu-devops" +``` + +### SSH verification + +``` +> ssh -p 2222 vagrant@127.0.0.1 +vagrant@vagrant:~$ uname -a +Linux vagrant 5.15.0-116-generic ... x86_64 GNU/Linux +``` + +### Challenges + +- `terra-farm/virtualbox` provider: "can't convert vbox network" error → switched to `null_resource`. +- Host-only DHCP was disabled → had to enable it via `VBoxManage dhcpserver modify --enable`. +- Cyrillic characters in workspace path (`Рабочий стол`) caused encoding issues with PowerShell's `-File` parameter. + +--- + +## 3. Pulumi Implementation + +**Pulumi version:** 3.221.0 +**Language:** Python 3.12 +**Provider package:** `pulumi-command` (`local.Command`) + +### Project structure + +``` +pulumi/ +├── __main__.py # Pulumi program (local.Command) +├── scripts/ +│ ├── create_vm.ps1 # VBoxManage create/start script +│ └── destroy_vm.ps1 # VBoxManage destroy script +├── Pulumi.yaml # Project metadata +├── Pulumi.dev.yaml # Stack config (vm_name, cpus, memory, …) +├── requirements.txt # pulumi, pulumi-command +├── venv/ # Python virtual environment +└── .gitignore # .pulumi/, venv/, __pycache__/ +``` + +### How code differs from Terraform + +| Aspect | Terraform | Pulumi | +|--------|-----------|--------| +| Resource definition | HCL `null_resource` block with `provisioner "local-exec"` | Python `local.Command(create=…, delete=…, environment=…)` | +| Variables | `variables.tf` + `var.name` | `pulumi.Config().get("key")` | +| Outputs | `output "name" { value = … }` | `pulumi.export("name", value)` | +| State | `terraform.tfstate` (local file) | Pulumi local backend (`file://~`) with encrypted secrets | +| Execution | `terraform apply` | `pulumi up` | + +### Challenges + +- Pulumi passphrase & `encryptionsalt` mismatch when recreating the stack → had to remove old `encryptionsalt` from `Pulumi.dev.yaml`. +- Inline multiline PowerShell strings caused `TerminatorExpectedAtEndOfString` errors → extracted scripts into external `.ps1` files. +- Cyrillic in workspace path broke `powershell -File "path"` → scripts are copied to `%TEMP%` at runtime and parameters passed via environment variables. + +### Terminal output + +``` +> pulumi preview + Type Name Plan + pulumi:pulumi:Stack devops-virtualbox-dev + + └─ command:local:Command ubuntu-vm create +Resources: + + 1 to create + 1 unchanged + +> pulumi up --yes +Updating (dev): + Type Name Status + pulumi:pulumi:Stack devops-virtualbox-dev + + └─ command:local:Command ubuntu-vm created +Outputs: + ssh_command : "ssh -p 2223 vagrant@127.0.0.1" + vm_name : "ubuntu-pulumi" +Resources: + + 1 created + 1 unchanged +``` + +### SSH verification + +``` +> ssh -p 2223 vagrant@127.0.0.1 +vagrant@vagrant:~$ uname -a +Linux vagrant 5.15.0-116-generic ... x86_64 GNU/Linux +``` + +--- + +## 4. Terraform vs Pulumi Comparison + +### Ease of Learning + +Terraform was easier to start with — HCL is simple and the documentation ecosystem is larger. Pulumi required understanding both the SDK and the underlying programming language (Python), and setting up a virtual environment with dependencies. For a beginner, Terraform's `init → plan → apply` cycle is more straightforward. + +### Code Readability + +Terraform's declarative HCL reads more like a config file, which is ideal for simple infrastructure. Pulumi's Python code is more verbose but allows real programming logic (loops, conditionals, imports). For this small project, Terraform is more readable; for larger projects with dynamic infrastructure, Pulumi would win. + +### Debugging + +Terraform's error messages are generally clearer and more actionable. Pulumi's errors (especially from `pulumi_command`) were opaque — exit codes like `0xfffd0000` and garbled Cyrillic error text made debugging harder. Terraform's `plan` output is also easier to parse than Pulumi's `preview`. + +### Documentation + +Terraform has a much larger community and more documented examples. Pulumi has excellent official docs, but the community is smaller. When using uncommon providers like `pulumi_command`, finding solutions to edge cases was harder. + +### Use Case + +Use **Terraform** when you want a stable, well-documented, declarative tool for standard cloud architectures. Use **Pulumi** when you need complex logic, type safety, or want to leverage existing programming skills — it shines in dynamic, multi-environment setups. + +--- + +## 5. Lab 5 Preparation & Cleanup + +**VM for Lab 5:** Yes — keeping the Pulumi-created VM (`ubuntu-pulumi`) running. +**Connection:** `ssh -p 2223 vagrant@127.0.0.1` (password: `vagrant`). + +**Cleanup status:** + +| Item | Status | +|------|--------| +| Terraform VM (`ubuntu-devops`) | Destroyed (`terraform destroy`) | +| Pulumi VM (`ubuntu-pulumi`) | **Running** — kept for Lab 5 | +| Terraform state file | Removed (no `terraform.tfstate` in repo) | +| Cloud resources | N/A (local VirtualBox only) | +| Secrets in code | None committed | + +--- + +## Bonus: IaC CI/CD (terraform-ci.yml) + +A GitHub Actions workflow [.github/workflows/terraform-ci.yml](../.github/workflows/terraform-ci.yml) was created that: + +- Triggers on PRs and pushes touching `terraform/**` +- Runs `terraform fmt -check` for formatting +- Runs `terraform init -backend=false` + `terraform validate` for syntax +- Runs `tflint` for best-practice linting + +## Bonus: GitHub Repository Import + +A Terraform configuration for importing the course repository into Terraform management was created in [`terraform/github-import/`](../terraform/github-import/). + +**Why importing matters:** +Managing existing resources (repos, infra) with IaC brings version control, auditability, consistency, and automated validation to previously manual configurations. It eliminates "tribal knowledge" and enables safe, reviewable changes. diff --git a/pulumi/.gitignore b/pulumi/.gitignore new file mode 100644 index 0000000000..3e2772bb90 --- /dev/null +++ b/pulumi/.gitignore @@ -0,0 +1,9 @@ +# Pulumi state & config (use Pulumi Cloud or local backend) +.pulumi/ + +# Python virtualenv +venv/ +__pycache__/ + +# Downloaded VM image cache +.pulumi-cache/ diff --git a/pulumi/Pulumi.dev.yaml b/pulumi/Pulumi.dev.yaml new file mode 100644 index 0000000000..bd311869a3 --- /dev/null +++ b/pulumi/Pulumi.dev.yaml @@ -0,0 +1,8 @@ +config: + devops-virtualbox:vm_name: ubuntu-pulumi + devops-virtualbox:vm_cpus: "2" + devops-virtualbox:vm_memory: "1024" + devops-virtualbox:host_only_adapter: VirtualBox Host-Only Ethernet Adapter + devops-virtualbox:vboxmanage: C:\Program Files\Oracle\VirtualBox\VBoxManage.exe + devops-virtualbox:box_url: "https://app.vagrantup.com/bento/boxes/ubuntu-22.04/versions/202407.23.0/providers/virtualbox/amd64/vagrant.box" +encryptionsalt: v1:55bH6i5lUZY=:v1:Up8FaR0q5sSjMy1W:pEOJWisxPYNqVb3w5KSXx95itqeX1Q== diff --git a/pulumi/Pulumi.yaml b/pulumi/Pulumi.yaml new file mode 100644 index 0000000000..6ed9d5deb0 --- /dev/null +++ b/pulumi/Pulumi.yaml @@ -0,0 +1,6 @@ +name: devops-virtualbox +runtime: + name: python + options: + virtualenv: venv +description: Provision a local VirtualBox Ubuntu VM using Pulumi diff --git a/pulumi/__main__.py b/pulumi/__main__.py new file mode 100644 index 0000000000..d325e77933 --- /dev/null +++ b/pulumi/__main__.py @@ -0,0 +1,62 @@ +""" +Pulumi program to provision a local VirtualBox Ubuntu VM. +Equivalent to the Terraform null_resource approach. +Uses pulumi_command.local.Command with environment variables to pass +parameters to PowerShell scripts (avoids quoting / Unicode path issues). +""" + +import os +import shutil +import tempfile +import pulumi +from pulumi_command import local + +# ── Config ────────────────────────────────────────────────────────────────── +config = pulumi.Config() +vm_name = config.get("vm_name") or "ubuntu-pulumi" +vm_cpus = config.get("vm_cpus") or "2" +vm_memory = config.get("vm_memory") or "1024" +host_only = config.get("host_only_adapter") or "VirtualBox Host-Only Ethernet Adapter" +vboxmanage = config.get("vboxmanage") or r"C:\Program Files\Oracle\VirtualBox\VBoxManage.exe" +box_url = config.get("box_url") or ( + "https://app.vagrantup.com/bento/boxes/ubuntu-22.04" + "/versions/202407.23.0/providers/virtualbox/amd64/vagrant.box" +) + +# Copy scripts to an ASCII-safe temp directory (avoids Cyrillic in path) +script_dir = os.path.dirname(os.path.abspath(__file__)) +tmp_scripts = os.path.join(tempfile.gettempdir(), "pulumi_vbox_scripts") +os.makedirs(tmp_scripts, exist_ok=True) +for name in ("create_vm.ps1", "destroy_vm.ps1"): + src = os.path.join(script_dir, "scripts", name) + dst = os.path.join(tmp_scripts, name) + shutil.copy2(src, dst) + +create_ps1 = os.path.join(tmp_scripts, "create_vm.ps1") +destroy_ps1 = os.path.join(tmp_scripts, "destroy_vm.ps1") +cache_dir = os.path.join(tempfile.gettempdir(), "pulumi_vbox_cache") + +# ── Shared env vars for scripts ────────────────────────────────────────────── +env = { + "VBOX_MANAGE": vboxmanage, + "VM_NAME": vm_name, + "BOX_URL": box_url, + "VM_MEMORY": str(vm_memory), + "VM_CPUS": str(vm_cpus), + "HOST_ONLY": host_only, + "CACHE_DIR": cache_dir, +} + +# ── Create VM ──────────────────────────────────────────────────────────────── +vm = local.Command( + "ubuntu-vm", + create=f"powershell -ExecutionPolicy Bypass -File {create_ps1}", + delete=f"powershell -ExecutionPolicy Bypass -File {destroy_ps1}", + environment=env, +) + +# ── Outputs ────────────────────────────────────────────────────────────────── +pulumi.export("vm_name", vm_name) +pulumi.export("ssh_command", f"ssh -p 2223 vagrant@127.0.0.1 # password: vagrant") +pulumi.export("host_only_ip_cmd", + f'& "{vboxmanage}" guestproperty get {vm_name} /VirtualBox/GuestInfo/Net/1/V4/IP') diff --git a/pulumi/requirements.txt b/pulumi/requirements.txt new file mode 100644 index 0000000000..bb836d781e --- /dev/null +++ b/pulumi/requirements.txt @@ -0,0 +1,2 @@ +pulumi>=3.0.0,<4.0.0 +pulumi-command>=0.11.0 diff --git a/pulumi/scripts/create_vm.ps1 b/pulumi/scripts/create_vm.ps1 new file mode 100644 index 0000000000..52835e199e --- /dev/null +++ b/pulumi/scripts/create_vm.ps1 @@ -0,0 +1,53 @@ +# Read config from environment variables (avoids quoting issues) +$VBoxManage = $env:VBOX_MANAGE +$VmName = $env:VM_NAME +$BoxUrl = $env:BOX_URL +$VmMemory = $env:VM_MEMORY +$VmCpus = $env:VM_CPUS +$HostOnlyAdapter = $env:HOST_ONLY +$CacheDir = $env:CACHE_DIR + +$ErrorActionPreference = 'Stop' +$boxFile = Join-Path $CacheDir "ubuntu.box" +$ovfDir = Join-Path $CacheDir "ovf" + +# Skip if VM already running +$existing = & $VBoxManage list runningvms 2>$null | Select-String $VmName +if ($existing) { Write-Host "VM already running."; exit 0 } + +# Download box if not cached +New-Item -ItemType Directory -Force -Path $CacheDir | Out-Null +if (!(Test-Path $boxFile)) { + Write-Host "Downloading Ubuntu 22.04 box (~500 MB)..." + Invoke-WebRequest -Uri $BoxUrl -OutFile $boxFile -UseBasicParsing +} else { + Write-Host "Box already cached." +} + +# Extract OVF +New-Item -ItemType Directory -Force -Path $ovfDir | Out-Null +if (!(Get-ChildItem $ovfDir -Filter '*.ovf' -Recurse -ErrorAction SilentlyContinue)) { + Write-Host "Extracting box..." + tar -xf $boxFile -C $ovfDir +} +$ovf = (Get-ChildItem $ovfDir -Filter '*.ovf' -Recurse | Select-Object -First 1).FullName + +# Import VM +Write-Host "Importing VM into VirtualBox..." +& $VBoxManage import $ovf ` + --vsys 0 --vmname $VmName ` + --memory $VmMemory --cpus $VmCpus + +# NIC1: NAT + SSH port forward localhost:2223 -> VM:22 +& $VBoxManage modifyvm $VmName --nic1 nat +& $VBoxManage modifyvm $VmName --natpf1 "ssh,tcp,,2223,,22" + +# NIC2: Host-only +& $VBoxManage modifyvm $VmName --nic2 hostonly ` + --hostonlyadapter2 $HostOnlyAdapter + +# Start headless +Write-Host "Starting VM..." +& $VBoxManage startvm $VmName --type headless +Write-Host "Done. VM '$VmName' is running." +Write-Host "SSH: ssh -p 2223 vagrant@127.0.0.1 (password: vagrant)" diff --git a/pulumi/scripts/destroy_vm.ps1 b/pulumi/scripts/destroy_vm.ps1 new file mode 100644 index 0000000000..175dc054ec --- /dev/null +++ b/pulumi/scripts/destroy_vm.ps1 @@ -0,0 +1,8 @@ +# Read config from environment variables +$VBoxManage = $env:VBOX_MANAGE +$VmName = $env:VM_NAME + +& $VBoxManage controlvm $VmName poweroff 2>$null +Start-Sleep 3 +& $VBoxManage unregistervm $VmName --delete +Write-Host "VM '$VmName' deleted." diff --git a/screenshots/lab06/task1.png b/screenshots/lab06/task1.png new file mode 100644 index 0000000000..dcafe36b62 Binary files /dev/null and b/screenshots/lab06/task1.png differ diff --git a/screenshots/lab06/task2.png b/screenshots/lab06/task2.png new file mode 100644 index 0000000000..e9bb2a5a33 Binary files /dev/null and b/screenshots/lab06/task2.png differ diff --git a/screenshots/lab06/task3.png b/screenshots/lab06/task3.png new file mode 100644 index 0000000000..871099d4e2 Binary files /dev/null and b/screenshots/lab06/task3.png differ diff --git a/terraform/.gitignore b/terraform/.gitignore new file mode 100644 index 0000000000..07b60bd03b --- /dev/null +++ b/terraform/.gitignore @@ -0,0 +1,23 @@ +# Terraform state files +*.tfstate +*.tfstate.* +*.tfstate.backup + +# Terraform working directory +.terraform/ +.terraform.lock.hcl + +# Variable values (may contain secrets) +terraform.tfvars +*.tfvars + +# Crash log +crash.log + +# SSH private keys +*.pem +*.key + +# Downloaded VM images cache +*.box +*.ova diff --git a/terraform/github-import/.gitignore b/terraform/github-import/.gitignore new file mode 100644 index 0000000000..614b2eb4c1 --- /dev/null +++ b/terraform/github-import/.gitignore @@ -0,0 +1,12 @@ +# Terraform +*.tfstate +*.tfstate.* +.terraform/ +.terraform.lock.hcl +terraform.tfvars +*.tfvars +crash.log +override.tf +override.tf.json +*_override.tf +*_override.tf.json diff --git a/terraform/github-import/main.tf b/terraform/github-import/main.tf new file mode 100644 index 0000000000..d54bcb28c2 --- /dev/null +++ b/terraform/github-import/main.tf @@ -0,0 +1,47 @@ +terraform { + required_providers { + github = { + source = "integrations/github" + version = "~> 5.0" + } + } +} + +# Authenticate via GITHUB_TOKEN environment variable (never hardcode!) +provider "github" {} + +variable "repo_name" { + description = "Name of the GitHub repository to import" + type = string + default = "DevOps-Core-Course" +} + +# ── Repository resource ───────────────────────────────────────────────────── +# After writing this block, run: +# export GITHUB_TOKEN="ghp_..." # or $env:GITHUB_TOKEN = "ghp_..." +# terraform init +# terraform import github_repository.course_repo DevOps-Core-Course +# terraform plan # should show no changes if config matches reality +resource "github_repository" "course_repo" { + name = var.repo_name + description = "DevOps course lab assignments" + visibility = "public" + + has_issues = true + has_wiki = false + has_projects = false + + # Prevent Terraform from destroying the repo on `terraform destroy` + lifecycle { + prevent_destroy = true + } +} + +# ── Outputs ────────────────────────────────────────────────────────────────── +output "repo_full_name" { + value = github_repository.course_repo.full_name +} + +output "repo_html_url" { + value = github_repository.course_repo.html_url +} diff --git a/terraform/main.tf b/terraform/main.tf new file mode 100644 index 0000000000..4fa11ff751 --- /dev/null +++ b/terraform/main.tf @@ -0,0 +1,87 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + null = { + source = "hashicorp/null" + version = "~> 3.0" + } + } +} + +locals { + vboxmanage = "C:\\Program Files\\Oracle\\VirtualBox\\VBoxManage.exe" +} + +resource "null_resource" "ubuntu_vm" { + triggers = { + vm_name = var.vm_name + vm_cpus = var.vm_cpus + vm_mem = var.vm_memory + } + + provisioner "local-exec" { + interpreter = ["PowerShell", "-Command"] + command = <<-EOT + $ErrorActionPreference = 'Stop' + $vbox = '${local.vboxmanage}' + $vmName = '${var.vm_name}' + $boxUrl = '${var.vm_image_url}' + $boxFile = "${path.module}\.terraform\ubuntu.box" + $ovfDir = "${path.module}\.terraform\ovf" + + # Skip if VM already running + $existing = & $vbox list runningvms 2>$null | Select-String $vmName + if ($existing) { Write-Host "VM already running."; exit 0 } + + # Download box if not cached + New-Item -ItemType Directory -Force -Path (Split-Path $boxFile) | Out-Null + if (!(Test-Path $boxFile)) { + Write-Host "Downloading Ubuntu 22.04 box (~500 MB)..." + Invoke-WebRequest -Uri $boxUrl -OutFile $boxFile -UseBasicParsing + } else { Write-Host "Box already cached." } + + # Extract OVF + New-Item -ItemType Directory -Force -Path $ovfDir | Out-Null + if (!(Get-ChildItem $ovfDir -Filter '*.ovf' -Recurse)) { + Write-Host "Extracting box..." + tar -xf $boxFile -C $ovfDir + } + $ovf = (Get-ChildItem $ovfDir -Filter '*.ovf' -Recurse | Select-Object -First 1).FullName + + # Import VM + Write-Host "Importing VM into VirtualBox..." + & $vbox import $ovf ` + --vsys 0 --vmname $vmName ` + --memory ${var.vm_memory} --cpus ${var.vm_cpus} + + # NIC1: NAT (for internet + port-forwarded SSH) + & $vbox modifyvm $vmName --nic1 nat + # SSH port forward: localhost:2222 -> VM:22 + & $vbox modifyvm $vmName ` + --natpf1 "ssh,tcp,,2222,,22" + # NIC2: Host-only (for direct access) + & $vbox modifyvm $vmName --nic2 hostonly ` + --hostonlyadapter2 '${var.host_only_adapter}' + + # Start headless + Write-Host "Starting VM..." + & $vbox startvm $vmName --type headless + Write-Host "Done. VM '$vmName' is running." + Write-Host "SSH: ssh -p 2222 vagrant@127.0.0.1 (password: vagrant)" + EOT + } + + provisioner "local-exec" { + when = destroy + interpreter = ["PowerShell", "-Command"] + command = <<-EOT + $vbox = 'C:\Program Files\Oracle\VirtualBox\VBoxManage.exe' + $vmName = '${self.triggers.vm_name}' + & $vbox controlvm $vmName poweroff 2>$null + Start-Sleep 3 + & $vbox unregistervm $vmName --delete + Write-Host "VM '$vmName' deleted." + EOT + } +} diff --git a/terraform/outputs.tf b/terraform/outputs.tf new file mode 100644 index 0000000000..f37c446b64 --- /dev/null +++ b/terraform/outputs.tf @@ -0,0 +1,14 @@ +output "vm_name" { + description = "Name of the created VM" + value = var.vm_name +} + +output "ssh_command" { + description = "SSH connection command (NAT port forwarding)" + value = "ssh -p 2222 vagrant@127.0.0.1 # password: vagrant" +} + +output "host_only_ip" { + description = "Get host-only IP after VM boots" + value = "& 'C:\\Program Files\\Oracle\\VirtualBox\\VBoxManage.exe' guestproperty get ${var.vm_name} /VirtualBox/GuestInfo/Net/1/V4/IP" +} diff --git a/terraform/variables.tf b/terraform/variables.tf new file mode 100644 index 0000000000..4b2fc5b1f0 --- /dev/null +++ b/terraform/variables.tf @@ -0,0 +1,30 @@ +variable "vm_name" { + description = "Name of the VirtualBox VM" + type = string + default = "ubuntu-devops" +} + +variable "vm_image_url" { + description = "URL to the VM image (Vagrant box)" + type = string + # bento/ubuntu-22.04 — actively maintained box with up-to-date VirtualBox Guest Additions + default = "https://app.vagrantup.com/bento/boxes/ubuntu-22.04/versions/202407.23.0/providers/virtualbox/amd64/vagrant.box" +} + +variable "vm_cpus" { + description = "Number of CPUs for the VM" + type = number + default = 2 +} + +variable "vm_memory" { + description = "Amount of RAM for the VM in MB" + type = number + default = 1024 +} + +variable "host_only_adapter" { + description = "Name of the VirtualBox host-only network adapter" + type = string + default = "VirtualBox Host-Only Ethernet Adapter" +}