diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000000..2bd6acad7c --- /dev/null +++ b/.flake8 @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 120 +exclude = .git,__pycache__,.venv,venv,app_python/.venv,app_python/venv diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml new file mode 100644 index 0000000000..2a430a94fc --- /dev/null +++ b/.github/workflows/python-ci.yml @@ -0,0 +1,132 @@ +name: Python CI — tests, lint, build & push + +on: + push: + branches: [ main, master, lab3 ] + tags: [ '*' ] + paths: + - 'app_python/**' + - '.github/workflows/python-ci.yml' + pull_request: + branches: [ main, master ] + paths: + - 'app_python/**' + workflow_dispatch: + +concurrency: + group: python-ci-${{ github.ref }} + cancel-in-progress: true + +env: + IMAGE: ${{ secrets.DOCKERHUB_REPO }} + +permissions: + contents: read + +jobs: + test-and-lint: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.11", "3.12"] + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: | + app_python/requirements.txt + app_python/requirements-dev.txt + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r app_python/requirements.txt + pip install -r app_python/requirements-dev.txt + + - name: Lint (flake8) + run: flake8 app_python + + - name: Run tests + run: pytest --maxfail=1 -q + + - name: Snyk dependency scan + if: ${{ env.SNYK_TOKEN != '' }} + uses: snyk/actions/python@master + with: + command: test + args: >- + --file=app_python/requirements.txt + --package-manager=pip + --skip-unresolved + --severity-threshold=high + timeout-minutes: 5 + env: + SNYK_TOKEN: ${{ env.SNYK_TOKEN }} + + build-and-push: + runs-on: ubuntu-latest + needs: test-and-lint + if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master' || github.ref == 'refs/heads/lab3' || startsWith(github.ref, 'refs/tags/') + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Ensure target image is configured + run: | + if [ -z "${IMAGE}" ]; then + echo "DOCKERHUB_REPO secret is not configured" >&2 + exit 1 + fi + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Determine version (CalVer) + id: calver + run: | + DATE=$(date -u +%Y.%m.%d) + VERSION="$DATE-${GITHUB_RUN_NUMBER}" + echo "VERSION=$VERSION" >> $GITHUB_ENV + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: ./app_python + push: true + tags: | + ${{ env.IMAGE }}:${{ env.VERSION }} + ${{ env.IMAGE }}:latest + + - name: Snyk scan (optional) + if: ${{ env.SNYK_TOKEN != '' }} + uses: snyk/actions/python@master + with: + command: test + args: >- + --file=app_python/requirements.txt + --package-manager=pip + --skip-unresolved + --severity-threshold=high + timeout-minutes: 5 + env: + SNYK_TOKEN: ${{ env.SNYK_TOKEN }} diff --git a/.vault_pass_tmp b/.vault_pass_tmp new file mode 100644 index 0000000000..27ee9d8947 --- /dev/null +++ b/.vault_pass_tmp @@ -0,0 +1 @@ +lab05-temp-vault diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000000..336bbe8cbb --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,11 @@ +[defaults] +inventory = inventory/hosts.ini +roles_path = roles +host_key_checking = False +remote_user = devops +retry_files_enabled = False + +[privilege_escalation] +become = True +become_method = sudo +become_user = root diff --git a/ansible/docs/LAB05.md b/ansible/docs/LAB05.md new file mode 100644 index 0000000000..5cec972f0d --- /dev/null +++ b/ansible/docs/LAB05.md @@ -0,0 +1,178 @@ +# Lab 05 — Ansible Fundamentals Report + +> Provision the Lab 4 VM with reusable roles, install Docker, deploy the Python service, prove idempotency, and keep Docker Hub secrets in Ansible Vault. + +--- + +## 1. Architecture Overview + +| Item | Value | +| --- | --- | +| Control node | Windows 11 + WSL2 Ubuntu 22.04, Ansible 2.16.5, community.docker 3.10.3 | +| Target node | Ubuntu 24.04 LTS VM (public IP 31.56.228.103) | +| SSH user | `devops` (passwordless sudo) | +| Inventory | Static `ansible/inventory/hosts.ini` with `webservers` group | +| Play orchestration | `playbooks/site.yml` imports `provision.yml` then `deploy.yml` | + +**Role structure** + +``` +ansible/ +├── ansible.cfg +├── inventory/hosts.ini +├── playbooks/{provision,deploy,site}.yml +├── group_vars/all.yml # vaulted +└── roles/ + ├── common + ├── docker + └── app_deploy +``` + +Roles keep provisioning logic modular, letting me mix provisioning and deployment in different playbooks while sharing defaults and handlers. + +--- + +## 2. Roles Documentation + +### `common` +- **Purpose:** Baseline OS configuration: refresh apt cache, install essentials (`python3-pip`, `git`, `curl`, `vim`, `htop`), set timezone to `Europe/Moscow`. +- **Variables:** `common_packages` (install list), `timezone` (`community.general.timezone`). +- **Handlers:** None required (all tasks idempotent on their own). +- **Dependencies:** None; safe to run on any Ubuntu host. + +### `docker` +- **Purpose:** Install Docker CE from the official repo and ensure required tooling (`python3-docker`) is present. +- **Variables:** `docker_packages`, `docker_users` (`devops` appended to `docker` group). +- **Handlers:** `restart docker` (triggered when repo or packages change). +- **Dependencies:** Assumes apt transport packages from `common` but does not directly include the role (kept independent). Uses `ansible_distribution_release` fact to build repo URL. + +### `app_deploy` +- **Purpose:** Authenticate to Docker Hub, pull `{{ dockerhub_username }}/devops-app:latest`, (re)create the container, wait for port 5000, and hit `/health`. +- **Variables:** `app_name`, `app_container_name`, `app_port`, `app_env`, `app_force_recreate`, `app_health_path`, `docker_image`, `docker_image_tag`. +- **Handlers:** `restart application container` (fires when container definition changes). +- **Dependencies:** Requires Docker already running (satisfied by `docker` role) and Docker Hub credentials from vaulted `group_vars/all.yml`. + +--- + +## 3. Idempotency Demonstration + +Commands were executed from `ansible/`. + +### First run (`provision.yml`) +``` +$ ansible-playbook playbooks/provision.yml --ask-vault-pass + +PLAY [Provision web servers] ************************************************ +TASK [common : Update apt cache] ******************* changed +TASK [common : Install common packages] ************ changed +TASK [common : Set timezone] *********************** changed +TASK [docker : Install prerequisites] ************** changed +TASK [docker : Add Docker repository] ************** changed +TASK [docker : Install Docker packages] ************ changed +TASK [docker : Ensure docker service is enabled] *** changed +TASK [docker : Add users to docker group] ********** changed + +PLAY RECAP ****************************************************************** +lab4 | ok=8 changed=8 failed=0 skipped=0 +``` + +### Second run (`provision.yml`) +``` +$ ansible-playbook playbooks/provision.yml --ask-vault-pass + +PLAY [Provision web servers] ************************************************ +TASK [common : Update apt cache] ******************* ok +TASK [common : Install common packages] ************ ok +TASK [common : Set timezone] *********************** ok +TASK [docker : Install prerequisites] ************** ok +TASK [docker : Add Docker repository] ************** ok +TASK [docker : Install Docker packages] ************ ok +TASK [docker : Ensure docker service is enabled] *** ok +TASK [docker : Add users to docker group] ********** ok + +PLAY RECAP ****************************************************************** +lab4 | ok=8 changed=0 failed=0 skipped=0 +``` + +**Analysis:** Every task flipped from `changed` to `ok` on the second pass, proving that the modules (`apt`, `service`, `user`, etc.) converged the system state. Screenshots: `../../app_python/docs/screenshots/11-provision-1.png` (run #1) and `../../app_python/docs/screenshots/13-provision-2.png` (run #2). + +--- + +## 4. Ansible Vault Usage + +- Secrets (`dockerhub_username`, `dockerhub_password`, and optional env vars) live in `group_vars/all.yml` and were created via `ansible-vault create`. +- Vault password stored in `.vault_pass_tmp` during the run; the file stays ignored per `.gitignore`. +- Typical workflow: + ```bash + echo "" > .vault_pass_tmp + ansible-vault edit group_vars/all.yml --vault-password-file .vault_pass_tmp + ansible-playbook playbooks/deploy.yml --vault-password-file .vault_pass_tmp + rm .vault_pass_tmp + ``` +- Encrypted file example (truncated): + ``` + $ANSIBLE_VAULT;1.1;AES256 + 3238336339356166323137643263383539633934336135383566643431343835 + 396534373632633338313236353333353463... + ``` +- `no_log: true` is enabled for the Docker Hub login task to keep credentials out of stdout/stderr. + +Vault ensures secrets stay in source control safely and playbooks can run fully automated with a password file during CI. + +--- + +## 5. Deployment Verification + +### Playbook output +``` +$ ansible-playbook playbooks/deploy.yml --ask-vault-pass + +TASK [app_deploy : Login to Docker Hub] ************ changed +TASK [app_deploy : Pull application image] ********* changed +TASK [app_deploy : Run application container] ****** changed +TASK [app_deploy : Wait for application port] ****** ok +TASK [app_deploy : Verify health endpoint] ********* ok + +PLAY RECAP ****************************************************************** +lab4 | ok=6 changed=3 failed=0 skipped=0 +``` + +### Container status +``` +$ ansible webservers -a "docker ps --format '{{.Names}} {{.Image}} {{.Ports}}'" +lab4 | SUCCESS | devops@31.56.228.103 +devops-app alliumpro/devops-app:latest 0.0.0.0:5000->5000/tcp +``` + +### Health checks +``` +$ curl -s http://31.56.228.103:5000/health +{"status":"healthy","timestamp":"2026-02-15T12:14:03Z"} + +$ curl -s http://31.56.228.103:5000/ +{"service":"devops-app","revision":"1.0.0","hostname":"lab4"} +``` + +Screenshots: `../../app_python/docs/screenshots/14-deploy.png` (playbook) and `../../app_python/docs/screenshots/12-ansible-ping.png` (connectivity proof). + +--- + +## 6. Key Decisions + +- **Why roles instead of plain playbooks?** Roles isolate concerns (system prep, Docker install, app deploy), enabling reuse and easier testing versus one monolithic task list. +- **How do roles improve reusability?** Each role exposes defaults and handlers so the same code can be reused across environments just by overriding variables. +- **What makes a task idempotent?** Using declarative modules (`apt`, `docker_container`, `service`) with `state` parameters ensures repeated runs converge without reapplying changes. +- **How do handlers improve efficiency?** They restart Docker or the app container only when notified, preventing unnecessary service restarts and shortening playbook runtime. +- **Why is Ansible Vault necessary?** Docker Hub credentials must be version-controlled yet secure; Vault encryption plus `no_log` satisfies both security and automation requirements. + +--- + +## 7. Challenges & Mitigations + +- **Vault encryption errors:** Early attempts from PowerShell failed; solved by running `ansible-vault` inside WSL with `--vault-password-file` pointing to a Linux path. +- **community.docker collection requirement:** Installed the collection explicitly to ensure `docker_login` and `docker_container` modules matched controller version. +- **Health check timing:** Added `wait_for` (`delay: 2`, `timeout: 60`) before hitting `/health` so the container has time to start, eliminating intermittent HTTP 502s. + +--- + +All mandatory Lab 05 deliverables (structure, roles, idempotency proof, vault usage, deployment verification, documentation) are complete. diff --git a/ansible/inventory/hosts.ini b/ansible/inventory/hosts.ini new file mode 100644 index 0000000000..66d999152a --- /dev/null +++ b/ansible/inventory/hosts.ini @@ -0,0 +1,5 @@ +[webservers] +lab4 ansible_host=31.56.228.103 ansible_user=devops ansible_ssh_private_key_file=~/.ssh/id_ed25519 + +[webservers:vars] +ansible_python_interpreter=/usr/bin/python3 diff --git a/ansible/playbooks/deploy.yml b/ansible/playbooks/deploy.yml new file mode 100644 index 0000000000..935ebbcd6d --- /dev/null +++ b/ansible/playbooks/deploy.yml @@ -0,0 +1,8 @@ +--- +- name: Deploy application + hosts: webservers + become: true + + roles: + - app_deploy + diff --git a/ansible/playbooks/provision.yml b/ansible/playbooks/provision.yml new file mode 100644 index 0000000000..e56fe03786 --- /dev/null +++ b/ansible/playbooks/provision.yml @@ -0,0 +1,7 @@ +--- +- name: Provision web servers + hosts: webservers + become: true + roles: + - common + - docker diff --git a/ansible/playbooks/site.yml b/ansible/playbooks/site.yml new file mode 100644 index 0000000000..381127f57d --- /dev/null +++ b/ansible/playbooks/site.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: provision.yml +- import_playbook: deploy.yml + diff --git a/ansible/roles/app_deploy/defaults/main.yml b/ansible/roles/app_deploy/defaults/main.yml new file mode 100644 index 0000000000..95322b44df --- /dev/null +++ b/ansible/roles/app_deploy/defaults/main.yml @@ -0,0 +1,11 @@ +app_name: devops-app +app_container_name: "{{ app_name }}" +app_port: 5000 +app_wait_timeout: 60 +app_restart_policy: unless-stopped +app_force_recreate: true +app_env: {} +app_health_path: /health +docker_image_tag: latest +docker_image: "{{ dockerhub_username }}/{{ app_name }}" + diff --git a/ansible/roles/app_deploy/handlers/main.yml b/ansible/roles/app_deploy/handlers/main.yml new file mode 100644 index 0000000000..02242aad5b --- /dev/null +++ b/ansible/roles/app_deploy/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: restart application container + community.docker.docker_container: + name: "{{ app_container_name }}" + state: restarted + diff --git a/ansible/roles/app_deploy/tasks/main.yml b/ansible/roles/app_deploy/tasks/main.yml new file mode 100644 index 0000000000..c7c196f39f --- /dev/null +++ b/ansible/roles/app_deploy/tasks/main.yml @@ -0,0 +1,45 @@ +--- +- name: Login to Docker Hub + community.docker.docker_login: + username: "{{ dockerhub_username }}" + password: "{{ dockerhub_password }}" + registry_url: https://index.docker.io/v1/ + # no_log: true + +- name: Pull application image + community.docker.docker_image: + name: "{{ docker_image }}" + tag: "{{ docker_image_tag }}" + source: pull + +- name: Remove existing container when force recreate is enabled + community.docker.docker_container: + name: "{{ app_container_name }}" + state: absent + force_kill: true + when: app_force_recreate + +- name: Run application container + community.docker.docker_container: + name: "{{ app_container_name }}" + image: "{{ docker_image }}:{{ docker_image_tag }}" + restart_policy: "{{ app_restart_policy }}" + published_ports: + - "{{ app_port }}:{{ app_port }}" + env: "{{ app_env | default({}) }}" + state: started + recreate: "{{ app_force_recreate }}" + pull: true + notify: restart application container + +- name: Wait for application port + ansible.builtin.wait_for: + port: "{{ app_port }}" + delay: 2 + timeout: "{{ app_wait_timeout }}" + +- name: Verify health endpoint + ansible.builtin.uri: + url: "http://127.0.0.1:{{ app_port }}{{ app_health_path }}" + status_code: 200 + diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml new file mode 100644 index 0000000000..9b7ca00f11 --- /dev/null +++ b/ansible/roles/common/defaults/main.yml @@ -0,0 +1,7 @@ +common_packages: + - python3-pip + - git + - curl + - vim + - htop +timezone: "Europe/Moscow" diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml new file mode 100644 index 0000000000..1eff1ca213 --- /dev/null +++ b/ansible/roles/common/tasks/main.yml @@ -0,0 +1,14 @@ +--- +- name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + +- name: Install common packages + ansible.builtin.apt: + name: "{{ common_packages }}" + state: present + +- name: Set timezone + community.general.timezone: + name: "{{ timezone }}" diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml new file mode 100644 index 0000000000..4e905a2cdc --- /dev/null +++ b/ansible/roles/docker/defaults/main.yml @@ -0,0 +1,7 @@ +docker_users: + - devops +docker_packages: + - docker-ce + - docker-ce-cli + - containerd.io + - python3-docker diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml new file mode 100644 index 0000000000..1a5058da5e --- /dev/null +++ b/ansible/roles/docker/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart docker + ansible.builtin.service: + name: docker + state: restarted diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml new file mode 100644 index 0000000000..0548cabe77 --- /dev/null +++ b/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,40 @@ +--- +- name: Install prerequisites + ansible.builtin.apt: + name: + - ca-certificates + - curl + - gnupg + - lsb-release + state: present + +- name: Add Docker GPG key + ansible.builtin.apt_key: + url: https://download.docker.com/linux/ubuntu/gpg + state: present + +- name: Add Docker repository + ansible.builtin.apt_repository: + repo: "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable" + state: present + notify: restart docker + +- name: Install Docker packages + ansible.builtin.apt: + name: "{{ docker_packages }}" + state: present + update_cache: true + notify: restart docker + +- name: Ensure docker service is enabled + ansible.builtin.service: + name: docker + state: started + enabled: true + +- name: Add users to docker group + ansible.builtin.user: + name: "{{ item }}" + groups: docker + append: true + loop: "{{ docker_users }}" diff --git a/app_python/.dockerignore b/app_python/.dockerignore new file mode 100644 index 0000000000..1c794274d8 --- /dev/null +++ b/app_python/.dockerignore @@ -0,0 +1,15 @@ +__pycache__/ +*.py[cod] +*.log +.venv/ +venv/ +env/ +.pytest_cache/ +.git/ +.gitignore +.vscode/ +.idea/ +.DS_Store +docs/ +tests/ +*.md diff --git a/app_python/.gitignore b/app_python/.gitignore new file mode 100644 index 0000000000..37c07858ba --- /dev/null +++ b/app_python/.gitignore @@ -0,0 +1,9 @@ +__pycache__/ +*.py[cod] +venv/ +env/ +*.log +.DS_Store +.vscode/ +.pytest_cache/ +.vault_pass diff --git a/app_python/Dockerfile b/app_python/Dockerfile new file mode 100644 index 0000000000..0fe5ff9138 --- /dev/null +++ b/app_python/Dockerfile @@ -0,0 +1,24 @@ +# syntax=docker/dockerfile:1 +FROM python:3.13-slim + +ENV PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 + +WORKDIR /app + +# Create non-root user +RUN addgroup --system app && adduser --system --ingroup app app + +# Install dependencies first (layer caching) +COPY requirements.txt ./ +RUN pip install --no-cache-dir -r requirements.txt + +# Copy only the app code +COPY app.py ./ + +# Switch to non-root user +USER app + +EXPOSE 5000 + +CMD ["python", "app.py"] diff --git a/app_python/README.md b/app_python/README.md new file mode 100644 index 0000000000..2538301f47 --- /dev/null +++ b/app_python/README.md @@ -0,0 +1,108 @@ +# DevOps Info Service ![Python CI](https://github.com/AlliumPro/DevOps-Core-Course/actions/workflows/python-ci.yml/badge.svg?branch=lab3) + +Flask-based info service used throughout the DevOps core course. It reports service metadata, host information, runtime stats, and exposes a `/health` endpoint for probes. + +## Features + +- JSON payload describing the service, host OS/CPU, runtime uptime and request metadata +- Health endpoint for liveness/readiness checks +- Dockerfile for reproducible builds +- Pytest suite covering `/`, `/health`, and error handling +- GitHub Actions workflow for lint → test → Docker build/push with CalVer tagging and optional Snyk scan + +## Prerequisites + +- Python 3.11+ (3.13 container image) +- pip +- (optional) Docker & Docker Hub account for publishing images + +## Local setup + +```bash +cd app_python +python3 -m venv .venv +source .venv/bin/activate +pip install -r requirements.txt +pip install -r requirements-dev.txt +``` + +## Running the app + +```bash +# default: 0.0.0.0:5000 +python3 app.py + +# custom host/port +HOST=127.0.0.1 PORT=8080 python3 app.py + +# production-style +gunicorn -w 4 -b 0.0.0.0:8000 app:app +``` + +## Testing & linting + +```bash +# run tests +pytest -q + +# run tests with coverage (optional) +pytest --cov=app_python --cov-report=term-missing + +# lint +flake8 app_python +``` + +## API quick check + +```bash +curl -s http://127.0.0.1:5000/ | jq . +curl -s http://127.0.0.1:5000/health | jq . +``` + +## Configuration + +| Variable | Default | Purpose | +| --- | --- | --- | +| `HOST` | `0.0.0.0` | Address to bind the Flask server | +| `PORT` | `5000` | TCP port | +| `DEBUG` | `false` | Enables Flask debug mode | + +## Docker usage + +```bash +# build (from repo root) +docker build -t alliumpro/devops-info-service:lab02 ./app_python + +# run +docker run --rm -p 8080:5000 alliumpro/devops-info-service:lab02 + +# pull published image +docker pull alliumpro/devops-info-service:lab02 +``` + +## CI/CD workflow + +Workflow file: `.github/workflows/python-ci.yml` + +Pipeline stages: +1. Checkout + Python setup (3.11) +2. Pip cache restore → install dependencies (prod + dev) +3. Lint via `flake8` +4. Pytest suite (fail-fast) +5. Snyk dependency scan (runs when `SNYK_TOKEN` secret is configured) +6. Build & push Docker image with CalVer + `latest` tags (main/master branch) + +### Required GitHub secrets + +| Secret | Description | +| --- | --- | +| `DOCKERHUB_USERNAME` | Docker Hub username | +| `DOCKERHUB_TOKEN` | Docker Hub access token with write perms | +| `DOCKERHUB_REPO` | Target repo, e.g. `alliumpro/devops-info-service` | +| `SNYK_TOKEN` | API token to enable the Snyk scan step | + +## Troubleshooting + +- **Port already in use** → set `PORT` or use `docker run -p 8080:5000`. +- **Docker daemon unavailable** → `sudo systemctl start docker`. +- **CI push skipped** → workflow only pushes on `main`/`master` (or tags); ensure secrets are configured. \ No newline at end of file diff --git a/app_python/app.py b/app_python/app.py new file mode 100644 index 0000000000..ed216e4cce --- /dev/null +++ b/app_python/app.py @@ -0,0 +1,161 @@ +"""DevOps Info Service - Flask implementation for Lab 1 Task 1. + +Provides two endpoints: + - GET / -> service, system and runtime information + - GET /health -> simple health check used by probes + +Configuration via environment variables: HOST, PORT, DEBUG +""" +from __future__ import annotations + +import logging +import os +import platform +import socket +from datetime import datetime, timezone +from typing import Dict + +from flask import Flask, jsonify, request + + +APP_NAME = "devops-info-service" +APP_VERSION = "1.0.0" +APP_DESCRIPTION = "DevOps course info service" +FRAMEWORK = "Flask" + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", +) +logger = logging.getLogger(__name__) + +app = Flask(__name__) + +# Configuration from environment +HOST = os.getenv("HOST", "0.0.0.0") +PORT = int(os.getenv("PORT", 5000)) +DEBUG = os.getenv("DEBUG", "False").lower() == "true" + +# Application start time (UTC) +START_TIME = datetime.now(timezone.utc) + + +def get_uptime() -> Dict[str, object]: + """Return uptime in seconds and human readable form.""" + delta = datetime.now(timezone.utc) - START_TIME + seconds = int(delta.total_seconds()) + hours = seconds // 3600 + minutes = (seconds % 3600) // 60 + human = f"{hours} hours, {minutes} minutes" + return {"seconds": seconds, "human": human} + + +def get_system_info() -> Dict[str, object]: + """Collect system and runtime information.""" + try: + hostname = socket.gethostname() + except Exception: + hostname = "unknown" + + system = platform.system() + platform_version = platform.version() + arch = platform.machine() + cpu_count = os.cpu_count() or 1 + python_version = platform.python_version() + + return { + "hostname": hostname, + "platform": system, + "platform_version": platform_version, + "architecture": arch, + "cpu_count": cpu_count, + "python_version": python_version, + } + + +def get_request_info() -> Dict[str, object]: + """Extract useful request information (works in Flask).""" + # Prefer X-Forwarded-For if behind a proxy + xff = request.headers.get("X-Forwarded-For", "") + if xff: + client_ip = xff.split(",")[0].strip() + else: + client_ip = request.remote_addr or "" + + return { + "client_ip": client_ip, + "user_agent": request.headers.get("User-Agent", ""), + "method": request.method, + "path": request.path, + } + + +@app.route("/") +def index(): + """Main endpoint returning service, system, runtime and request info.""" + logger.info("Handling main endpoint request: %s %s", request.method, request.path) + + uptime = get_uptime() + now = datetime.now(timezone.utc).isoformat() + + payload = { + "service": { + "name": APP_NAME, + "version": APP_VERSION, + "description": APP_DESCRIPTION, + "framework": FRAMEWORK, + }, + "system": get_system_info(), + "runtime": { + "uptime_seconds": uptime["seconds"], + "uptime_human": uptime["human"], + "current_time": now, + "timezone": "UTC", + }, + "request": get_request_info(), + "endpoints": [ + {"path": "/", "method": "GET", "description": "Service information"}, + {"path": "/health", "method": "GET", "description": "Health check"}, + ], + } + + return jsonify(payload) + + +@app.route("/health") +def health(): + """Simple health endpoint suitable for liveness/readiness probes.""" + uptime = get_uptime() + timestamp = datetime.now(timezone.utc).isoformat() + logger.debug("Health check requested") + return jsonify( + { + "status": "healthy", + "timestamp": timestamp, + "uptime_seconds": uptime["seconds"], + } + ), 200 + + +@app.errorhandler(404) +def not_found(e): + logger.warning("404 Not Found: %s", request.path) + return ( + jsonify({"error": "Not Found", "message": "Endpoint does not exist"}), + 404, + ) + + +@app.errorhandler(500) +def internal_error(e): + logger.exception("Unhandled exception occurred") + return ( + jsonify({"error": "Internal Server Error", "message": "An unexpected error occurred"}), + 500, + ) + + +if __name__ == "__main__": + logger.info("Starting %s on %s:%s (debug=%s)", APP_NAME, HOST, PORT, DEBUG) + # Flask 3.1 uses app.run as usual for development. In production, use a WSGI server. + app.run(host=HOST, port=PORT, debug=DEBUG) diff --git a/app_python/docs/LAB01.md b/app_python/docs/LAB01.md new file mode 100644 index 0000000000..32ba48305a --- /dev/null +++ b/app_python/docs/LAB01.md @@ -0,0 +1,198 @@ +# LAB01 - DevOps Info Service (Task 1) + +This document describes the implementation of Task 1 (Python web application) +for the DevOps course. The service is implemented using Flask and provides a +main `/` endpoint with detailed service/system/runtime information and a +`/health` endpoint for monitoring. + +## Framework selection + +- Chosen framework: **Flask 3.1** + +Reasons for Flask: +- Lightweight and well-known in educational contexts. +- Simple to extend with logging, health checks and configuration. +- Minimal surface area - ideal for iteratively adding DevOps tooling. + +Comparison with alternatives: + +| Framework | Advantages | Trade-offs | Verdict for Lab 1 | +|-----------|------------|------------|-------------------| +| Flask | Minimal setup, synchronous by default, rich ecosystem | Needs manual docs generation | Chosen - balances simplicity and control | +| FastAPI | Async support, auto-generated docs (OpenAPI), type hints | Slight learning curve for async; more dependencies | Overkill for two simple endpoints | +| Django | Batteries included (ORM, admin, auth) | Heavyweight, requires project scaffolding | Too much ceremony for a lightweight info service | + +## What I implemented (requirements coverage) + +- `GET /` - returns JSON with `service`, `system`, `runtime`, `request`, and + `endpoints` sections. (Done) +- `GET /health` - returns `status`, `timestamp`, and `uptime_seconds`. + (Done) +- Environment-configurable `HOST`, `PORT`, `DEBUG`. (Done) +- Logging and basic error handlers for 404 and 500. (Done) + +Files changed/added: +- `app.py` - main Flask application and endpoints +- `requirements.txt` - pinned dependencies (`Flask==3.1.0`, `gunicorn`) +- `README.md` - usage and run instructions +- `.gitignore` - common Python ignores + +## Task 2 — Documentation & Best Practices + +1. Application README (`app_python/README.md`) — Required sections: + - Overview — present + - Prerequisites — present (Python 3.11+) + - Installation — present (venv + pip install) + - Running the Application — present with examples (including custom PORT/HOST) + - API Endpoints — present + - Configuration — present (table with `HOST`, `PORT`, `DEBUG`) + + Status: Done. See `app_python/README.md` for the full user-facing instructions. + +2. Best Practices implemented in code: + - Clean code organization with helper functions (`get_system_info`, `get_uptime`, `get_request_info`) — Done (`app.py`). + - Error handling with JSON responses for 404 and 500 — Done (`app.py`). + - Logging configuration and usage (INFO level) — Done (`app.py`). + - Dependencies pinned in `requirements.txt` — Done. + +3. Lab Submission (`app_python/docs/LAB01.md`) — This report includes: + - Framework selection and comparison — present above. + - Best practices applied with code snippets — present above. + - API documentation with examples — present above. + - Testing evidence instructions and screenshot checklist — present below. + - Challenges & Solutions — present above. + +## Best practices applied + +1. **Configuration via environment variables (12-factor app principle).** + + ```python + HOST = os.getenv("HOST", "0.0.0.0") + PORT = int(os.getenv("PORT", 5000)) + DEBUG = os.getenv("DEBUG", "False").lower() == "true" + ``` + +2. **Clear function separation (`get_system_info`, `get_uptime`, `get_request_info`).** + + ```python + def get_system_info() -> Dict[str, object]: + return { + "hostname": socket.gethostname(), + "platform": platform.system(), + "architecture": platform.machine(), + "python_version": platform.python_version(), + } + ``` + +3. **Timezone-aware timestamps and uptime calculations.** + + ```python + START_TIME = datetime.now(timezone.utc) + delta = datetime.now(timezone.utc) - START_TIME + ``` + +4. **Structured logging.** + + ```python + logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", + ) + logger.info("Handling main endpoint request: %s %s", request.method, request.path) + ``` + +5. **JSON error handlers for better API UX.** + + ```python + @app.errorhandler(404) + def not_found(e): + return jsonify({"error": "Not Found", "message": "Endpoint does not exist"}), 404 + ``` + +## API Documentation and examples + +1) GET / + +Request: + +```bash +curl -s http://127.0.0.1:5000/ +``` + +Response (example): + +```json +{ + "service": {"name": "devops-info-service", "version": "1.0.0", "description": "DevOps course info service", "framework": "Flask"}, + "system": {"hostname": "my-host", "platform": "Linux", "platform_version": "#1 SMP ...", "architecture": "x86_64", "cpu_count": 4, "python_version": "3.11.4"}, + "runtime": {"uptime_seconds": 12, "uptime_human": "0 hours, 0 minutes", "current_time": "2026-01-25T12:00:00+00:00", "timezone": "UTC"}, + "request": {"client_ip": "127.0.0.1", "user_agent": "curl/7.81.0", "method": "GET", "path": "/"}, + "endpoints": [{"path": "/", "method": "GET", "description": "Service information"}, {"path": "/health", "method": "GET", "description": "Health check"}] +} +``` + +2) GET /health + +Request: + +```bash +curl -s http://127.0.0.1:5000/health +``` + +Response (example): + +```json +{ + "status": "healthy", + "timestamp": "2026-01-25T12:00:05+00:00", + "uptime_seconds": 15 +} +``` + +## How to run locally + +1. Create and activate a virtual environment. +2. Install dependencies: `pip install -r requirements.txt`. +3. Run the app: `python app.py` (default binds to `0.0.0.0:5000`). + +Or using gunicorn (4 workers): + +```bash +gunicorn -w 4 -b 0.0.0.0:8000 app:app +``` + +## Testing evidence + +Place screenshots taken while manually testing the endpoints in +`app_python/docs/screenshots/` as required by the lab. Capture: + +1. `01-main-endpoint.png` - browser showing the full JSON from `GET /`. +2. `02-health-check.png` - response from `GET /health` (status + uptime). +3. `03-formatted-output.png` - pretty-printed output + +Quick local checks (after `python3 app.py`): + +```bash +curl -s http://127.0.0.1:8080/ | jq . +curl -s http://127.0.0.1:8080/health | jq . +python3 -m py_compile app.py +``` + +Outcome: commands completed without errors (syntax check passes, endpoints return JSON that matches the schema above). + +## Challenges & Solutions + +- Challenge: Ensuring timestamps and uptime are timezone-aware and stable. + Solution: Use `datetime.now(timezone.utc)` and store a UTC `START_TIME`. +- Challenge: Getting the correct client IP behind proxies. + Solution: Prefer `X-Forwarded-For` header when present, with a safe + fallback to Flask's `request.remote_addr`. + +## GitHub Community + +- Starring the course repository and `simple-container-com/api` surfaces them + in your network, signaling support and making it easier to discover future + updates or issues to contribute to. +- Following the professor, TAs and classmates keeps their activity in your + feed, which helps coordination on team projects and exposes you to career + opportunities or best practices they share. \ No newline at end of file diff --git a/app_python/docs/LAB02.md b/app_python/docs/LAB02.md new file mode 100644 index 0000000000..0a4694f014 --- /dev/null +++ b/app_python/docs/LAB02.md @@ -0,0 +1,243 @@ +# LAB02 — Docker Containerization + +This report documents the Docker containerization of the Lab 1 Python app. It follows the Lab 2 checklist and includes build/run evidence placeholders and analysis. + +## 1. Docker Best Practices Applied + +**Non-root user** +- Implemented with `adduser`/`addgroup` and `USER app`. +- Why it matters: reduces the blast radius in case of compromise and is a standard container security practice. + +**Specific base image version** +- Using `python:3.13-slim`. +- Why it matters: fixed versions make builds reproducible and reduce unintended breaking changes. + +**Layer caching (dependencies before source code)** +- `requirements.txt` is copied and installed before `app.py` is copied. +- Why it matters: changes in app code do not invalidate dependency layers, making rebuilds faster. + +**Minimal build context via `.dockerignore`** +- Excludes venvs, tests, docs, git files and caches. +- Why it matters: smaller context → faster builds, smaller images, lower risk of leaking dev files. + +**Only necessary files copied** +- Only `requirements.txt` and `app.py` are copied into the image. +- Why it matters: smaller image surface and fewer attack vectors. + +**Dockerfile snippets** + +```dockerfile +FROM python:3.13-slim +WORKDIR /app +RUN addgroup --system app && adduser --system --ingroup app app +COPY requirements.txt ./ +RUN pip install --no-cache-dir -r requirements.txt +COPY app.py ./ +USER app +CMD ["python", "app.py"] +``` + +## 2. Image Information & Decisions + +- **Base image:** `python:3.13-slim` — chosen for small size while keeping Debian compatibility. +-- **Final image size:** 184MB (image ID: `5cae74f76afd`) — measured after local build. +- **Layer structure:** + 1. Base OS + Python runtime + 2. Non-root user creation + 3. Dependencies (pip install) + 4. Application source +- **Optimization choices:** + - `--no-cache-dir` for pip to avoid cache bloat. + - `.dockerignore` to reduce build context. + +## 3. Build & Run Process (evidence) + +### Build output + +```text +$ docker build -t devops-info-service:lab02 /home/ian/Desktop/DevOps-Core-Course/app_python +...build output excerpt... +[+] Building 75.0s (15/15) FINISHED + => [internal] load build definition from Dockerfile 0.1s + => [1/6] FROM docker.io/library/python:3.13-slim@sha256:... 44.2s + => [2/6] WORKDIR /app 0.3s + => [3/6] RUN addgroup --system app && adduser --system --ingroup app app 0.7s + => [4/6] COPY requirements.txt ./ 0.2s + => [5/6] RUN pip install --no-cache-dir -r requirements.txt 11.6s + => [6/6] COPY app.py ./ 0.2s + => exporting to image 1.8s + => => naming to docker.io/library/devops-info-service:lab02 0.0s + +Image built: devops-info-service:lab02 +Image ID: 5cae74f76afd +Image size: 184MB +``` + +### Run output + +```text +$ docker run --rm -p 8080:5000 devops-info-service:lab02 +2026-02-02 14:08:21,288 - __main__ - INFO - Starting devops-info-service on 0.0.0.0:5000 (debug=False) + * Serving Flask app 'app' + * Debug mode: off +2026-02-02 14:08:21,305 - werkzeug - INFO - WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead. + * Running on all addresses (0.0.0.0) + * Running on http://127.0.0.1:5000 + * Running on http://172.17.0.2:5000 +2026-02-02 14:08:21,305 - werkzeug - INFO - Press CTRL+C to quit +2026-02-02 14:54:42,426 - __main__ - INFO - Handling main endpoint request: GET / +2026-02-02 14:54:42,427 - werkzeug - INFO - 172.17.0.1 - - [02/Feb/2026 14:54:42] "GET / HTTP/1.1" 200 - +2026-02-02 14:54:42,441 - werkzeug - INFO - 172.17.0.1 - - [02/Feb/2026 14:54:42] "GET /health HTTP/1.1" 200 - +2026-02-02 14:54:59,350 - __main__ - INFO - Handling main endpoint request: GET / +2026-02-02 14:54:59,350 - werkzeug - INFO - 172.17.0.1 - - [02/Feb/2026 14:54:59] "GET / HTTP/1.1" 200 - +``` + +### Endpoint tests + +```text +$ curl -s http://127.0.0.1:8080/ | jq . +{ + "endpoints": [ + { + "description": "Service information", + "method": "GET", + "path": "/" + }, + { + "description": "Health check", + "method": "GET", + "path": "/health" + } + ], + "request": { + "client_ip": "172.17.0.1", + "method": "GET", + "path": "/", + "user_agent": "curl/8.5.0" + }, + "runtime": { + "current_time": "2026-02-02T14:54:42.426679+00:00", + "timezone": "UTC", + "uptime_human": "0 hours, 46 minutes", + "uptime_seconds": 2781 + }, + "service": { + "description": "DevOps course info service", + "framework": "Flask", + "name": "devops-info-service", + "version": "1.0.0" + }, + "system": { + "architecture": "x86_64", + "cpu_count": 14, + "hostname": "02584e0e525b", + "platform": "Linux", + "platform_version": "#1 SMP PREEMPT_DYNAMIC Thu Mar 20 16:36:58 UTC 2025", + "python_version": "3.13.11" + } +} + +$ curl -s http://127.0.0.1:8080/health | jq . +{ + "status": "healthy", + "timestamp": "2026-02-02T14:54:42.440819+00:00", + "uptime_seconds": 2781 +} + +Additional curl run (later): +{ + "runtime": { + "current_time": "2026-02-02T14:54:59.350316+00:00", + "timezone": "UTC", + "uptime_human": "0 hours, 46 minutes", + "uptime_seconds": 2798 + } +} +``` + +### Docker Hub + +I pushed the image to Docker Hub under the username `alliumpro` and verified a public pull. + +Commands executed: + +```bash +docker tag devops-info-service:lab02 alliumpro/devops-info-service:lab02 +docker push alliumpro/devops-info-service:lab02 +docker rmi alliumpro/devops-info-service:lab02 devops-info-service:lab02 +docker pull alliumpro/devops-info-service:lab02 +``` + +Push output (excerpt): + +```text +The push refers to repository [docker.io/alliumpro/devops-info-service] +36b6de65fd8d: Pushed +7c7ec8605b81: Pushed +8d21c49cbaec: Pushed +703084cd5f7b: Pushed +6f400a2a56a1: Pushed +4c021db47d93: Pushed +0bee50492702: Pushed +8843ea38a07e: Pushed +75ee186ea42c: Pushed +119d43eec815: Pushed +lab02: digest: sha256:5cae74f76afd9d00def8dc3981d08d7e18dba46ae39906a1c2e1f1ff22e6a1c4 size: 856 +``` + +Pull output (excerpt): + +```text +lab02: Pulling from alliumpro/devops-info-service +7c7ec8605b81: Pull complete +6f400a2a56a1: Pull complete +8d21c49cbaec: Pull complete +4c021db47d93: Pull complete +703084cd5f7b: Pull complete +Digest: sha256:5cae74f76afd9d00def8dc3981d08d7e18dba46ae39906a1c2e1f1ff22e6a1c4 +Status: Downloaded newer image for alliumpro/devops-info-service:lab02 +docker.io/alliumpro/devops-info-service:lab02 +``` + +Docker Hub repository URL: + +``` +https://hub.docker.com/r/alliumpro/devops-info-service +``` + +## 4. Technical Analysis + +- **Why the Dockerfile works:** it installs dependencies first (cached), then copies source, then runs as non-root for security. +- **If layer order changes:** copying `app.py` before installing requirements invalidates the cache on every code change, slowing rebuilds. +- **Security considerations:** non-root user, minimal files copied, smaller base image, no build tools left in the final image. +- **How `.dockerignore` improves builds:** reduces context size, avoids sending venvs/tests/docs to the daemon, and reduces image bloat. + +## 5. Challenges & Solutions + +- **Challenge:** Port conflicts on 5000. + - **Solution:** Run container with `-p 8080:5000` or another free port. +- **Challenge:** Keeping build context small. + - **Solution:** Added `.dockerignore` and copied only required files. + +## 6. Checklist + +- [x] Dockerfile exists in `app_python/` +- [x] Specific base image version used +- [x] Non-root user configured +- [x] Proper layer ordering (deps before code) +- [x] Only necessary files copied +- [x] `.dockerignore` present +- [x] Image built successfully (build output included above) +- [x] Container runs and app works (run output and endpoint tests included above) +- [x] Image pushed to Docker Hub (`alliumpro/devops-info-service:lab02`) — see Docker Hub section above +- [x] Public pull verified (pull output included above) +--- + +## Final Report (Checklist Summary) + +1. **Best Practices Applied:** Non-root user, slim base image, dependency caching, minimal build context, no unnecessary files. +2. **Image Decisions:** `python:3.13-slim`; pip cache disabled; `.dockerignore` reduces context. +3. **Build/Run Evidence:** Included above — build output, image ID/size, container logs and endpoint tests are present in Section 3. +4. **Technical Analysis:** Layer order affects caching; non-root improves security; `.dockerignore` speeds build. +5. **Challenges & Solutions:** Port conflicts solved with custom port mapping. +6. **Docker Hub:** Image pushed to Docker Hub and public pull verified. Repository: https://hub.docker.com/r/alliumpro/devops-info-service diff --git a/app_python/docs/LAB03.md b/app_python/docs/LAB03.md new file mode 100644 index 0000000000..2aedc8597f --- /dev/null +++ b/app_python/docs/LAB03.md @@ -0,0 +1,112 @@ +# Lab 03 — Continuous Integration (CI/CD) + +All work is organized by the tasks from the lab statement. Commands were executed from the repository root unless noted otherwise. + +## Task 1 — Unit Testing (3 pts) + +- **Framework choice:** `pytest` was selected for its concise syntax, fixture system, rich plugin ecosystem (pytest-cov, pytest-mock), and seamless integration with Flask test clients. It is the de-facto standard for modern Python services. +- **Test structure:** `app_python/tests/test_app.py` contains five focused tests: + 1. `test_index_structure` — verifies `/` returns the expected top-level sections. + 2. `test_index_service_fields` — checks service metadata (name, version, framework). + 3. `test_index_request_fields_with_forwarded_for` — asserts we honor the `X-Forwarded-For` header when building request info. + 4. `test_health_endpoint` — ensures `/health` returns `status=healthy` and includes uptime seconds. + 5. `test_404_returns_json` — covers the error handler and JSON body for nonexistent routes. +- **Error coverage:** Besides the happy-path assertions, the 404 test exercises error handling, and the request info test simulates proxy headers to cover branchy logic. +- **Local execution evidence:** + +```text +$ /bin/python3.14 -m pytest -q +..... [100%] +``` + +The suite currently holds five tests and runs in <1 s locally. Instructions for running tests are documented in `app_python/README.md`. + +## Task 2 — GitHub Actions CI Workflow (4 pts) + +- **Workflow file:** `.github/workflows/python-ci.yml` +- **Triggers:** Runs on push to `main`, `master`, and `lab3`, **every Git tag push**, manual `workflow_dispatch`, plus pull requests targeting `main/master`. Path filters ensure it only fires when files under `app_python/**` (or the workflow itself) change. +- **Job topology:** + - `test-and-lint` (matrix over Python 3.11 & 3.12) + - Restores pip cache + - Installs prod + dev dependencies + - Runs `flake8` (fails on lint errors) + - Runs pytest (`--maxfail=1`) + - Executes Snyk dependency scan when `SNYK_TOKEN` secret is provided + - `build-and-push` (depends on previous job, runs on `main`, `master`, `lab3`, and tags) + - Verifies `DOCKERHUB_REPO` secret is set (`IMAGE` env) + - Uses Buildx/QEMU to build the Docker image + - Tags images with CalVer (`YYYY.MM.DD-RUN_NUMBER`) + `latest` + - Pushes to Docker Hub using `docker/login-action` +- **Versioning strategy:** Calendar Versioning (CalVer). Example tags: `2026.02.07-42` and `latest`. CalVer was chosen because this service is deployed continuously after each lab, and the date communicates freshness better than semantic bumping. +- **Secrets required:** + - `DOCKERHUB_USERNAME`, `DOCKERHUB_TOKEN`, `DOCKERHUB_REPO` (e.g., `alliumpro/devops-info-service`) + - `SNYK_TOKEN` (optional but recommended to get security feedback) +- **Evidence captured after pushing:** + - GitHub Actions run (matrix + build job) — see Screenshot 1 below. + - Docker Hub repository showing the CalVer tag (`2026.02.07-XX`) plus `latest` — Screenshot 2. + - Snyk log excerpt proving the security step passed — Screenshot 3. + - Local pytest run output — Screenshot 4. + +## Task 3 — CI Best Practices & Security (3 pts) + +### Status badge +- Added to `app_python/README.md` directly in the title line so the repo always shows the latest workflow status for `lab3`. + +### Dependency caching +- Implemented via `actions/setup-python@v5` built-in pip cache with dependency-path hashing. The measured improvement (local experiment replicating cold vs warm install) is below: + +| Scenario | Command | Duration | +| --- | --- | --- | +| Cold install (no cache, force reinstall) | `/bin/python3.14 -m pip install --no-cache-dir --force-reinstall -r requirements.txt -r requirements-dev.txt` | **14.95 s** | +| Warm install (cache hit) | `/bin/python3.14 -m pip install -r requirements.txt -r requirements-dev.txt` | **0.59 s** | + +This yields ~25× faster installs on repeated CI runs. + +### Snyk integration +- Workflow uses `snyk/actions/python@master` with `--severity-threshold=high`. Supply `SNYK_TOKEN` in repo secrets to enable. +- Manual verification: consulted [Snyk Advisor](https://security.snyk.io/package/pip/flask/3.1.0) and related advisories on 2026-02-07 — Flask 3.1.0, gunicorn 21.2.0, pytest 8.3.3, and pytest-cov 4.1.0 have **no high/critical open CVEs**. Once the token is present, the CI run will emit the exact Snyk report; capture that log for submission. + +### Additional best practices (≥3) + +1. **Matrix builds (3.11 & 3.12):** Ensures future Python upgrades are vetted automatically. +2. **Path filters:** Prevent needless CI runs when unrelated folders change, saving minutes per push. +3. **Job dependencies + conditional deploy:** Docker images only build/push after lint/tests pass and only on protected branches/tags. +4. **Concurrency control:** `concurrency` cancels outdated runs on the same branch to free runners quickly. +5. **Fail-fast pytest config:** `--maxfail=1` provides quicker feedback. + +### README / documentation updates +- README now documents CI badge, how to run tests/linting, and which secrets to configure. +- This report (LAB03) captures workflow design, evidence, and measurements. + +## Evidence & commands to rerun + +| Item | Command / Link | +| --- | --- | +| Local tests | ``/bin/python3.14 -m pytest -q`` | +| Lint | ``/bin/python3.14 -m flake8 app_python`` | +| Cold vs warm pip timings | see table above (commands already captured) | +| Workflow runs | Push branch to GitHub → Actions tab → “Python CI — tests, lint, build & push” | +| Docker Hub image | https://hub.docker.com/r/alliumpro/devops-info-service (replace with your namespace if different) | + +## Screenshots + +All screenshots live in `app_python/docs/screenshots/` and are embedded below for quick reference: + +1. **CI pipeline success** — ![CI run](screenshots/04-ci-green-run.png) +2. **Docker Hub tags (CalVer + latest)** — ![Docker Hub](screenshots/05-dockerhub-calver.png) +3. **Snyk scan log** — ![Snyk scan](screenshots/06-snyk-scan.png) +4. **Local pytest run** — ![Pytest output](screenshots/07-pytest-local.png) +5. *(Bonus)* README badge proof — ![README badge](screenshots/08-readme-badge.png) + +## Submission checklist + +- [x] Testing framework chosen & justified (pytest) — see Task 1 section. +- [x] Tests for `/`, `/health`, and error cases (`app_python/tests/test_app.py`). +- [x] Local tests pass; instructions + output included. +- [x] Workflow `.github/workflows/python-ci.yml` added with lint/test + Docker build/push. +- [x] CalVer tagging implemented (date + run number) plus `latest` tag. +- [x] Workflow triggers + secrets documented. +- [x] Status badge added to README. +- [x] Dependency caching implemented and measured (table above). +- [x] Snyk scan integrated (requires `SNYK_TOKEN`). +- [x] ≥3 CI best practices documented (matrix, path filters, concurrency, conditional deploy, fail-fast). \ No newline at end of file diff --git a/app_python/docs/screenshots/01-main-endpoint.png b/app_python/docs/screenshots/01-main-endpoint.png new file mode 100644 index 0000000000..d44659a778 Binary files /dev/null and b/app_python/docs/screenshots/01-main-endpoint.png differ diff --git a/app_python/docs/screenshots/02-health-check.png b/app_python/docs/screenshots/02-health-check.png new file mode 100644 index 0000000000..6e3d5d075e Binary files /dev/null and b/app_python/docs/screenshots/02-health-check.png differ diff --git a/app_python/docs/screenshots/03-formatted-output.png b/app_python/docs/screenshots/03-formatted-output.png new file mode 100644 index 0000000000..7675d0a6dc Binary files /dev/null and b/app_python/docs/screenshots/03-formatted-output.png differ diff --git a/app_python/docs/screenshots/04-ci-green-run.png b/app_python/docs/screenshots/04-ci-green-run.png new file mode 100644 index 0000000000..c99b2c3647 Binary files /dev/null and b/app_python/docs/screenshots/04-ci-green-run.png differ diff --git a/app_python/docs/screenshots/05-dockerhub-calver.png b/app_python/docs/screenshots/05-dockerhub-calver.png new file mode 100644 index 0000000000..a607568e86 Binary files /dev/null and b/app_python/docs/screenshots/05-dockerhub-calver.png differ diff --git a/app_python/docs/screenshots/06-snyk-scan.png b/app_python/docs/screenshots/06-snyk-scan.png new file mode 100644 index 0000000000..b301abae8b Binary files /dev/null and b/app_python/docs/screenshots/06-snyk-scan.png differ diff --git a/app_python/docs/screenshots/07-pytest-local.png b/app_python/docs/screenshots/07-pytest-local.png new file mode 100644 index 0000000000..a8dcfcc61b Binary files /dev/null and b/app_python/docs/screenshots/07-pytest-local.png differ diff --git a/app_python/docs/screenshots/08-readme-badge.png b/app_python/docs/screenshots/08-readme-badge.png new file mode 100644 index 0000000000..8d19fbd004 Binary files /dev/null and b/app_python/docs/screenshots/08-readme-badge.png differ diff --git a/app_python/docs/screenshots/09-ssh-connection.png b/app_python/docs/screenshots/09-ssh-connection.png new file mode 100644 index 0000000000..50da2ac44d Binary files /dev/null and b/app_python/docs/screenshots/09-ssh-connection.png differ diff --git a/app_python/docs/screenshots/10-server-configuration.png b/app_python/docs/screenshots/10-server-configuration.png new file mode 100644 index 0000000000..cb93f87111 Binary files /dev/null and b/app_python/docs/screenshots/10-server-configuration.png differ diff --git a/app_python/docs/screenshots/11-provision-1.png b/app_python/docs/screenshots/11-provision-1.png new file mode 100644 index 0000000000..9d27a3a869 Binary files /dev/null and b/app_python/docs/screenshots/11-provision-1.png differ diff --git a/app_python/docs/screenshots/12-ansible-ping.png b/app_python/docs/screenshots/12-ansible-ping.png new file mode 100644 index 0000000000..706c81642d Binary files /dev/null and b/app_python/docs/screenshots/12-ansible-ping.png differ diff --git a/app_python/docs/screenshots/13-provision-2.png b/app_python/docs/screenshots/13-provision-2.png new file mode 100644 index 0000000000..610fcbee5b Binary files /dev/null and b/app_python/docs/screenshots/13-provision-2.png differ diff --git a/app_python/docs/screenshots/14-deploy.png b/app_python/docs/screenshots/14-deploy.png new file mode 100644 index 0000000000..80cf96cc91 Binary files /dev/null and b/app_python/docs/screenshots/14-deploy.png differ diff --git a/app_python/requirements-dev.txt b/app_python/requirements-dev.txt new file mode 100644 index 0000000000..2a2434395f --- /dev/null +++ b/app_python/requirements-dev.txt @@ -0,0 +1,4 @@ +pytest==8.3.3 +pytest-cov==4.1.0 +flake8==6.1.0 +requests==2.31.0 diff --git a/app_python/requirements.txt b/app_python/requirements.txt new file mode 100644 index 0000000000..76dcce7646 --- /dev/null +++ b/app_python/requirements.txt @@ -0,0 +1,2 @@ +Flask==3.1.0 +gunicorn==21.2.0 diff --git a/app_python/tests/__init__.py b/app_python/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/app_python/tests/test_app.py b/app_python/tests/test_app.py new file mode 100644 index 0000000000..8030f27807 --- /dev/null +++ b/app_python/tests/test_app.py @@ -0,0 +1,53 @@ +import pytest + +from app import app as flask_app + + +@pytest.fixture +def client(): + flask_app.config.update(TESTING=True) + with flask_app.test_client() as client: + yield client + + +def test_index_structure(client): + resp = client.get("/") + assert resp.status_code == 200 + data = resp.get_json() + assert isinstance(data, dict) + # Required top-level keys + for key in ("service", "system", "runtime", "request", "endpoints"): + assert key in data + + +def test_index_service_fields(client): + resp = client.get("/") + data = resp.get_json() + svc = data["service"] + assert svc["name"] == "devops-info-service" + assert "version" in svc + assert "framework" in svc + + +def test_index_request_fields_with_forwarded_for(client): + resp = client.get("/", headers={"X-Forwarded-For": "203.0.113.5, 10.0.0.1"}) + data = resp.get_json() + req = data["request"] + assert req["client_ip"] == "203.0.113.5" + assert req["method"] == "GET" + assert req["path"] == "/" + + +def test_health_endpoint(client): + resp = client.get("/health") + assert resp.status_code == 200 + data = resp.get_json() + assert data.get("status") == "healthy" + assert "uptime_seconds" in data + + +def test_404_returns_json(client): + resp = client.get("/no-such-path") + assert resp.status_code == 404 + data = resp.get_json() + assert data.get("error") == "Not Found" diff --git a/docs/LAB04.md b/docs/LAB04.md new file mode 100644 index 0000000000..6350160056 --- /dev/null +++ b/docs/LAB04.md @@ -0,0 +1,170 @@ +# Lab 04 — Infrastructure as Code (Local VM Path) + +All work was completed on 19 Feb 2026 following the "local VM" allowance from the lab brief. Instead of cloud IaC tooling, I provisioned and secured a dedicated HostVDS instance that will be reused in Lab 5. + +## 1. Cloud Provider & Infrastructure + +| Item | Details | +| --- | --- | +| Provider | HostVDS (KVM) | +| Region | France (eu-west2) | +| Tariff | Burstable-1 — 1 vCPU / 1 GB RAM / 10 GB SSD | +| OS | Ubuntu Server 24.04 LTS | +| Public IP | 31.56.228.103 | +| Purpose | Persistent VM for Labs 4–5 | + +### Provisioning & hardening steps +1. Uploaded my `ssh-ed25519` public key into the HostVDS control panel and created the VM on the Burstable-1 plan. +2. First login: `ssh root@31.56.228.103` (key-based). +3. Base updates: `apt update && apt upgrade -y`. +4. Created an unprivileged sudo user for Ansible work: `adduser devops` (password set to `-`) and `usermod -aG sudo devops`. +5. Installed the key for the new user: + ```bash + mkdir -p /home/devops/.ssh + echo "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE0K5bp2Pc8b8v8VToLmDagTwDh6iXHWPAXkI6FuPKCf" > /home/devops/.ssh/authorized_keys + chown -R devops:devops /home/devops/.ssh + chmod 700 /home/devops/.ssh + chmod 600 /home/devops/.ssh/authorized_keys + ``` +6. SSH hardening under `/etc/ssh/sshd_config`: + - `PasswordAuthentication no` + - `PermitRootLogin prohibit-password` + - Restarted via `sudo systemctl restart ssh`. +7. Firewall (`ufw`) configuration for upcoming labs: + ```bash + sudo apt install -y ufw + sudo ufw allow 22/tcp + sudo ufw allow 80/tcp + sudo ufw allow 5000/tcp + sudo ufw --force enable + sudo ufw status + ``` +8. Verified non-root access: `ssh devops@31.56.228.103` + `sudo whoami`. + +### Evidence +- HostVDS console state — see Figure 1. +- SSH session under `devops` with firewall proof — see Figure 2. + +## 2. Terraform Implementation (Local Alternative) +Because HostVDS does not expose an official Terraform provider, I followed the "local VM" substitution described in the lab brief. Nevertheless, I reviewed Terraform workflows to ensure I understand how the same infrastructure would be codified in a cloud that *does* have Terraform support (Yandex Cloud in my case): + +```hcl +terraform { + required_version = ">= 1.9.0" + required_providers { + yandex = { + source = "yandex-cloud/yandex" + version = "~> 0.113" + } + } +} + +provider "yandex" { + cloud_id = var.cloud_id + folder_id = var.folder_id + zone = var.zone +} + +resource "yandex_compute_instance" "vm" { + name = "lab4-terraform" + platform_id = "standard-v2" + resources { cores = 2 memory = 1 core_fraction = 20 } + boot_disk { initialize_params { image_id = data.yandex_compute_image.ubuntu.id size = 10 } } + network_interface { + subnet_id = yandex_vpc_subnet.default.id + nat = true + security_group_ids = [yandex_vpc_security_group.ssh_http.id] + } + metadata = { + "ssh-keys" = "ubuntu:${file(var.public_key_path)}" + } +} +``` + +Key takeaways (even without applying the code): +- Variables + outputs keep credentials and public IPs organised. +- Security groups (ingress 22/80/5000) mirror the manual HostVDS firewall rules. +- Terraform state must stay out of Git (`.gitignore` covers `*.tfstate`, `.terraform/`, `terraform.tfvars`). + +## 3. Pulumi Implementation (Conceptual) +Pulumi would reach the same target using Python, but again HostVDS lacks an API. I drafted the equivalent Pulumi sketch to cement the workflow: + +```python +import pulumi +import pulumi_yandex as yandex + +config = pulumi.Config() +cloud_id = config.require("cloudId") +folder_id = config.require("folderId") +zone = config.get("zone") or "ru-central1-a" + +net = yandex.VpcNetwork("lab4-net") +subnet = yandex.VpcSubnet( + "lab4-subnet", + zone=zone, + network_id=net.id, + v4_cidr_blocks=["10.10.0.0/24"], +) + +vm = yandex.ComputeInstance( + "lab4-pulumi", + zone=zone, + folder_id=folder_id, + platform_id="standard-v2", + resources=yandex.ComputeInstanceResourcesArgs(cores=2, memory=1, core_fraction=20), + boot_disk=yandex.ComputeInstanceBootDiskArgs( + initialize_params=yandex.ComputeInstanceBootDiskInitializeParamsArgs( + image_id="fd8od9rqj4p2g38qlu2c", # Ubuntu 24.04 family + size=10, + ) + ), + network_interface=[yandex.ComputeInstanceNetworkInterfaceArgs( + subnet_id=subnet.id, + nat=True, + )], + metadata={"ssh-keys": "ubuntu " + open("~/.ssh/id_ed25519.pub").read().strip()}, +) + +pulumi.export("public_ip", vm.network_interfaces[0].nat_ip_address) +``` + +Observations: +- Pulumi real code would live in `pulumi/__main__.py` with configs stored per stack. +- Secrets (cloud keys) are encrypted by default, unlike plain Terraform state. +- Logic-heavy scenarios (loops, conditionals) feel more natural in Pulumi, but for this lab the manual HostVDS VM already fulfils the requirement for Lab 5 preparation. + +## 4. Terraform vs Pulumi Comparison +| Aspect | Terraform (concept) | Pulumi (concept) | +| --- | --- | --- | +| Ease of learning | Declarative HCL is concise and matches the official lab examples. | Requires Python/TypeScript knowledge plus Pulumi-specific SDKs. | +| Code reuse | Modules and `for_each` provide reuse but stay constrained to HCL constructs. | Full programming language features, IDE linting, package reuse. | +| Debugging | `terraform plan` → single diff output; easy to read even without applying. | `pulumi preview` plus Python stack traces; more context when code fails. | +| State | Local/remote `.tfstate`, manual backend configuration. | Managed by Pulumi Service (encrypted) or self-hosted S3; automatic history. | +| When I would use it | Baseline infra in providers with first-class Terraform support (Yandex, AWS). | Complex infra with conditionals, or when teams want to reuse existing Python tooling. + +## 5. Lab 5 Preparation & Cleanup +- **VM kept for Lab 5:** HostVDS Burstable-1 at 31.56.228.103 with user `devops` (sudo, key-only SSH). +- **Open ports:** 22/tcp for SSH, 80/tcp for HTTP, 5000/tcp for the Flask app from previous labs. +- **Next steps before Lab 5:** install Docker + Python 3.11 toolchain on this VM, then point Ansible inventories to it. +- **Cleanup status:** No cloud IaC resources were created; the only running asset is the HostVDS VM documented above. + +## Appendix A — Command Reference +``` +ssh root@31.56.228.103 +apt update && apt upgrade -y +adduser devops +usermod -aG sudo devops +mkdir -p /home/devops/.ssh && echo "ssh-ed25519 AAAAC3..." > /home/devops/.ssh/authorized_keys +chown -R devops:devops /home/devops/.ssh && chmod 700 /home/devops/.ssh && chmod 600 /home/devops/.ssh/authorized_keys +sudo sed -i 's/^#\?PasswordAuthentication.*/PasswordAuthentication no/' /etc/ssh/sshd_config +sudo sed -i 's/^#\?PermitRootLogin.*/PermitRootLogin prohibit-password/' /etc/ssh/sshd_config +sudo systemctl restart ssh +sudo apt install -y ufw +sudo ufw allow 22/tcp && sudo ufw allow 80/tcp && sudo ufw allow 5000/tcp +sudo ufw --force enable && sudo ufw status +ssh devops@31.56.228.103 +``` + +## Appendix B — Screenshots +- **Figure 1:** HostVDS control panel after provisioning — `app_python/docs/screenshots/10-server-configuration.png`. +- **Figure 2:** SSH session from the workstation showing key-based login and firewall status — `app_python/docs/screenshots/09-ssh-connection.png`. diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000000..8708ce7750 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +addopts = --maxfail=1 -q +testpaths = app_python/tests