diff --git a/.github/workflows/ansible-deploy-java.yml b/.github/workflows/ansible-deploy-java.yml
new file mode 100644
index 0000000000..ee37a3a57e
--- /dev/null
+++ b/.github/workflows/ansible-deploy-java.yml
@@ -0,0 +1,86 @@
+name: Ansible Deploy Java
+
+on:
+ push:
+ branches: [master]
+ paths:
+ - 'ansible/vars/app_java.yml'
+ - 'ansible/playbooks/deploy_java.yml'
+ - 'ansible/roles/web_app/**'
+ - '.github/workflows/ansible-deploy-java.yml'
+ pull_request:
+ branches: [master]
+ paths:
+ - 'ansible/**'
+ - '!ansible/docs/**'
+ - '.github/workflows/ansible-deploy-java.yml'
+
+jobs:
+ lint:
+ name: Ansible Lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Install dependencies
+ run: pip install ansible ansible-lint
+
+ - name: Install Ansible collections
+ run: ansible-galaxy collection install -r requirements.yml
+ working-directory: ansible
+
+ - name: Create dummy vault password for lint
+ run: echo "dummy" > ansible/.vault_pass
+
+ - name: Run ansible-lint
+ run: ansible-lint playbooks/*.yml
+ working-directory: ansible
+
+ - name: Cleanup dummy vault pass
+ if: always()
+ run: rm -f ansible/.vault_pass
+
+ deploy:
+ name: Deploy Java App
+ needs: lint
+ if: github.event_name == 'push'
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Install Ansible
+ run: pip install ansible
+
+ - name: Setup SSH
+ run: |
+ mkdir -p ~/.ssh
+ echo "${{ secrets.SSH_PRIVATE_KEY }}" > ~/.ssh/id_ed25519
+ chmod 600 ~/.ssh/id_ed25519
+ ssh-keyscan -H ${{ secrets.VM_HOST }} >> ~/.ssh/known_hosts 2>/dev/null
+
+ - name: Create vault password file
+ run: echo "${{ secrets.ANSIBLE_VAULT_PASSWORD }}" > /tmp/vault_pass
+
+ - name: Run Ansible playbook
+ run: |
+ ansible-playbook playbooks/deploy_java.yml \
+ -i inventory/hosts.ini \
+ --vault-password-file /tmp/vault_pass
+ working-directory: ansible
+
+ - name: Verify deployment
+ run: |
+ sleep 10
+ curl -f http://${{ secrets.VM_HOST }}:5001/health
+
+ - name: Cleanup
+ if: always()
+ run: rm -f /tmp/vault_pass
diff --git a/.github/workflows/ansible-deploy.yml b/.github/workflows/ansible-deploy.yml
new file mode 100644
index 0000000000..e2407abb16
--- /dev/null
+++ b/.github/workflows/ansible-deploy.yml
@@ -0,0 +1,87 @@
+name: Ansible Deploy Python
+
+on:
+ push:
+ branches: [master]
+ paths:
+ - 'ansible/vars/app_python.yml'
+ - 'ansible/playbooks/deploy_python.yml'
+ - 'ansible/playbooks/deploy.yml'
+ - 'ansible/roles/web_app/**'
+ - '.github/workflows/ansible-deploy.yml'
+ pull_request:
+ branches: [master]
+ paths:
+ - 'ansible/**'
+ - '!ansible/docs/**'
+ - '.github/workflows/ansible-deploy.yml'
+
+jobs:
+ lint:
+ name: Ansible Lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Install dependencies
+ run: pip install ansible ansible-lint
+
+ - name: Install Ansible collections
+ run: ansible-galaxy collection install -r requirements.yml
+ working-directory: ansible
+
+ - name: Create dummy vault password for lint
+ run: echo "dummy" > ansible/.vault_pass
+
+ - name: Run ansible-lint
+ run: ansible-lint playbooks/*.yml
+ working-directory: ansible
+
+ - name: Cleanup dummy vault pass
+ if: always()
+ run: rm -f ansible/.vault_pass
+
+ deploy:
+ name: Deploy Python App
+ needs: lint
+ if: github.event_name == 'push'
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Install Ansible
+ run: pip install ansible
+
+ - name: Setup SSH
+ run: |
+ mkdir -p ~/.ssh
+ echo "${{ secrets.SSH_PRIVATE_KEY }}" > ~/.ssh/id_ed25519
+ chmod 600 ~/.ssh/id_ed25519
+ ssh-keyscan -H ${{ secrets.VM_HOST }} >> ~/.ssh/known_hosts 2>/dev/null
+
+ - name: Create vault password file
+ run: echo "${{ secrets.ANSIBLE_VAULT_PASSWORD }}" > /tmp/vault_pass
+
+ - name: Run Ansible playbook
+ run: |
+ ansible-playbook playbooks/deploy_python.yml \
+ -i inventory/hosts.ini \
+ --vault-password-file /tmp/vault_pass
+ working-directory: ansible
+
+ - name: Verify deployment
+ run: |
+ sleep 10
+ curl -f http://${{ secrets.VM_HOST }}:5000/health
+
+ - name: Cleanup
+ if: always()
+ run: rm -f /tmp/vault_pass
diff --git a/.github/workflows/java-ci.yml b/.github/workflows/java-ci.yml
new file mode 100644
index 0000000000..53184561f0
--- /dev/null
+++ b/.github/workflows/java-ci.yml
@@ -0,0 +1,77 @@
+name: Java CI
+
+on:
+ push:
+ branches: [master]
+ tags: ["v*.*.*"]
+ paths:
+ - "appJava/**"
+ - ".github/workflows/java-ci.yml"
+ pull_request:
+ branches: [master]
+ paths:
+ - "appJava/**"
+ - ".github/workflows/java-ci.yml"
+
+permissions:
+ contents: read
+
+concurrency:
+ group: java-ci-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ build-test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Java
+ uses: actions/setup-java@v4
+ with:
+ distribution: temurin
+ java-version: "25"
+ cache: gradle
+
+ - name: Build and test (Gradle)
+ working-directory: appJava
+ run: ./gradlew --no-daemon clean check bootJar
+
+ docker:
+ needs: build-test
+ runs-on: ubuntu-latest
+ if: ${{ github.event_name != 'pull_request' }}
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Log in to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: luminitetime
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ - name: Docker metadata
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: luminitetime/devops-info-service-java
+ tags: |
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+ type=semver,pattern={{major}}
+ type=raw,value=latest,enable={{is_default_branch}}
+ type=sha,format=short,prefix=sha-
+
+ - name: Build and push
+ uses: docker/build-push-action@v6
+ with:
+ context: ./appJava
+ file: ./appJava/Dockerfile
+ push: true
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml
new file mode 100644
index 0000000000..993fe72c9b
--- /dev/null
+++ b/.github/workflows/python-ci.yml
@@ -0,0 +1,107 @@
+name: Python CI
+
+on:
+ push:
+ branches: [master]
+ tags: ["v*.*.*"]
+ paths:
+ - "app_python/**"
+ - ".github/workflows/python-ci.yml"
+ pull_request:
+ branches: [master]
+ paths:
+ - "app_python/**"
+ - ".github/workflows/python-ci.yml"
+
+permissions:
+ contents: read
+
+concurrency:
+ group: python-ci-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ env:
+ SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
+ strategy:
+ fail-fast: true
+ matrix:
+ python-version: ["3.12", "3.13"]
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: pip
+ cache-dependency-path: |
+ app_python/requirements.txt
+ app_python/requirements-dev.txt
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r app_python/requirements.txt -r app_python/requirements-dev.txt
+
+ - name: Lint (ruff)
+ run: ruff check app_python
+
+ - name: Run tests (pytest + coverage)
+ run: pytest -q app_python/tests --cov=app_python --cov-report=term --cov-report=xml --cov-fail-under=70
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v5
+ with:
+ files: coverage.xml
+ fail_ci_if_error: false
+
+ - name: Set up Snyk
+ if: ${{ env.SNYK_TOKEN != '' }}
+ uses: snyk/actions/setup@master
+
+ - name: Snyk dependency scan
+ if: ${{ env.SNYK_TOKEN != '' }}
+ continue-on-error: true
+ run: snyk test --file=app_python/requirements.txt --severity-threshold=high
+
+ docker:
+ needs: test
+ runs-on: ubuntu-latest
+ if: ${{ github.event_name != 'pull_request' }}
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Log in to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: luminitetime
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ - name: Docker metadata
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: luminitetime/devops-info-service-python
+ tags: |
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+ type=semver,pattern={{major}}
+ type=raw,value=latest,enable={{is_default_branch}}
+ type=sha,format=short,prefix=sha-
+
+ - name: Build and push
+ uses: docker/build-push-action@v6
+ with:
+ context: ./app_python
+ file: ./app_python/Dockerfile
+ push: true
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
diff --git a/.gitignore b/.gitignore
index 30d74d2584..b89f6e5e6f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,22 @@
-test
\ No newline at end of file
+.idea
+.vscode
+
+# Terraform
+*.tfstate
+*.tfstate.*
+.terraform/
+.terraform.lock.hcl
+terraform.tfvars
+*.tfvars
+crash.log
+
+# Pulumi
+pulumi/venv/
+pulumi/.pulumi-state/
+pulumi/__pycache__/
+Pulumi.*.yaml
+
+# Ansible
+*.retry
+.vault_pass
+__pycache__/
\ No newline at end of file
diff --git a/ansible/.gitignore b/ansible/.gitignore
new file mode 100644
index 0000000000..2abf99a120
--- /dev/null
+++ b/ansible/.gitignore
@@ -0,0 +1,5 @@
+*.retry
+.vault_pass
+__pycache__/
+*.pyc
+.venv/
diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg
new file mode 100644
index 0000000000..b6f41a7bba
--- /dev/null
+++ b/ansible/ansible.cfg
@@ -0,0 +1,12 @@
+[defaults]
+inventory = inventory/hosts.ini
+roles_path = roles
+host_key_checking = False
+remote_user = yc-user
+retry_files_enabled = False
+vault_password_file = .vault_pass
+
+[privilege_escalation]
+become = True
+become_method = sudo
+become_user = root
diff --git a/ansible/docs/LAB05.md b/ansible/docs/LAB05.md
new file mode 100644
index 0000000000..0482a4aee3
--- /dev/null
+++ b/ansible/docs/LAB05.md
@@ -0,0 +1,171 @@
+# LAB05 - Ansible Fundamentals
+
+## 1. Architecture Overview
+
+Ansible version: 2.20.2 (ansible-core), installed via Homebrew.
+
+Target VM: Ubuntu 22.04 LTS on Yandex Cloud (recreated from Lab 4 Terraform code), 2 cores at 20%, 1 GB RAM.
+
+Role structure:
+
+```text
+ansible/
+ ansible.cfg
+ .vault_pass (gitignored)
+ inventory/
+ hosts.ini
+ group_vars/
+ all.yml (encrypted with ansible-vault)
+ roles/
+ common/
+ tasks/main.yml
+ defaults/main.yml
+ docker/
+ tasks/main.yml
+ handlers/main.yml
+ defaults/main.yml
+ app_deploy/
+ tasks/main.yml
+ handlers/main.yml
+ defaults/main.yml
+ playbooks/
+ site.yml
+ provision.yml
+ deploy.yml
+```
+
+Why roles instead of monolithic playbooks: roles isolate concerns (system setup, Docker, app deployment) into reusable units with standardized directory layouts. Each role can be tested, shared, and maintained independently. Playbooks become thin orchestration layers that compose roles.
+
+## 2. Roles Documentation
+
+### common
+
+- Purpose: updates apt cache, installs essential system packages, sets timezone.
+- Variables: `common_packages` (list of apt packages), `common_timezone` (default `UTC`).
+- Handlers: none.
+- Dependencies: none.
+
+### docker
+
+- Purpose: installs Docker CE from the official Docker repository, enables the service, adds the SSH user to the `docker` group, installs `python3-docker` for Ansible Docker modules.
+- Variables: `docker_user` (defaults to `ansible_user`).
+- Handlers: `Restart docker` -- triggered after Docker packages are installed.
+- Dependencies: none (but typically runs after `common`).
+
+### app_deploy
+
+- Purpose: authenticates to Docker Hub, pulls the application image, runs a container with port mapping and restart policy, waits for the port to open, and verifies the health endpoint.
+- Variables (stored in encrypted vault): `dockerhub_username`, `dockerhub_password`, `app_name`, `docker_image`, `docker_image_tag`, `app_port`, `app_container_name`. Role defaults: `app_port: 5000`, `app_restart_policy: unless-stopped`.
+- Handlers: `Restart app container` -- restarts the running container.
+- Dependencies: `docker` role must have been applied first.
+
+## 3. Idempotency Demonstration
+
+### First run
+
+```text
+TASK [common : Update apt cache] changed
+TASK [common : Install common packages] changed
+TASK [common : Set timezone] changed
+TASK [docker : Install prerequisite packages] ok
+TASK [docker : Create keyrings directory] ok
+TASK [docker : Add Docker GPG key] changed
+TASK [docker : Add Docker repository] changed
+TASK [docker : Install Docker packages] changed
+TASK [docker : Ensure Docker service running] ok
+TASK [docker : Add user to docker group] changed
+TASK [docker : Install python3-docker] changed
+RUNNING HANDLER [docker : Restart docker] changed
+
+PLAY RECAP
+lab04-vm: ok=13 changed=9 unreachable=0 failed=0
+```
+
+### Second run
+
+```text
+TASK [common : Update apt cache] ok
+TASK [common : Install common packages] ok
+TASK [common : Set timezone] ok
+TASK [docker : Install prerequisite packages] ok
+TASK [docker : Create keyrings directory] ok
+TASK [docker : Add Docker GPG key] ok
+TASK [docker : Add Docker repository] ok
+TASK [docker : Install Docker packages] ok
+TASK [docker : Ensure Docker service running] ok
+TASK [docker : Add user to docker group] ok
+TASK [docker : Install python3-docker] ok
+
+PLAY RECAP
+lab04-vm: ok=12 changed=0 unreachable=0 failed=0
+```
+
+Analysis: On the first run, 9 tasks changed - packages were installed, the Docker GPG key and repository were added, the Docker service was restarted by the handler, and the user was added to the `docker` group. On the second run, every task reported `ok` with zero changes because each module checks the current state before acting (`apt` verifies packages are present, `service` checks running status, `user` checks group membership). The handler did not fire because nothing notified it. This is idempotency: the playbook converges to the desired state and makes no further modifications.
+
+## 4. Ansible Vault Usage
+
+Credentials and app configuration are stored in `inventory/group_vars/all.yml`, encrypted with `ansible-vault`.
+
+Vault password management: a `.vault_pass` file (mode 0600, gitignored) stores the password locally. The path is set in `ansible.cfg` via `vault_password_file`, so no `--ask-vault-pass` flag is needed.
+
+Encrypted file content (first five lines):
+
+```text
+$ANSIBLE_VAULT;1.1;AES256
+39306161353663386163333465633132653565656336333236643634653862333864656638393436
+3663343237663039323566313065323761303565636661650a663662633733393561313732636330
+34623830353238656635316463643332613634613531343163386463646662313936666464626163
+3938383532363437370a613962383231323263663162393431646234613239346337313766343762
+```
+
+Why Ansible Vault is important: without it, Docker Hub tokens or other secrets would appear in plaintext in version-controlled files. Vault encrypts them with AES-256 so the YAML can be committed safely. The `no_log: true` flag on the `docker_login` task prevents credentials from leaking into Ansible output.
+
+## 5. Deployment Verification
+
+### deploy.yml output
+
+```text
+TASK [app_deploy : Log in to Docker Hub] ignored (placeholder token)
+TASK [app_deploy : Pull Docker image] changed
+TASK [app_deploy : Run application container] changed
+TASK [app_deploy : Wait for application port] ok
+TASK [app_deploy : Verify health endpoint] ok
+TASK [app_deploy : Show health check result] ok
+ health_check.json: {"status": "healthy", "timestamp": "2026-02-24T08:57:55", "uptime_seconds": 3}
+RUNNING HANDLER [app_deploy : Restart app container] changed
+
+PLAY RECAP
+lab04-vm: ok=8 changed=3 unreachable=0 failed=0 ignored=1
+```
+
+### docker ps
+
+```text
+CONTAINER ID IMAGE COMMAND STATUS PORTS NAMES
+abd8cebdcc2b luminitetime/devops-info-service-python:latest "python app.py" Up 7 seconds 0.0.0.0:5000->8080/tcp devops-info-service-python
+```
+
+### Health check (curl from local machine)
+
+```text
+$ curl http://93.77.176.203:5000/health
+{"status":"healthy","timestamp":"2026-02-24T08:58:04.818449+00:00","uptime_seconds":6}
+
+$ curl http://93.77.176.203:5000/
+{"service":{"name":"devops-info-service","version":"1.0.0", ...}, "system":{"hostname":"abd8cebdcc2b","platform":"Linux", ...}}
+```
+
+## 6. Key Decisions
+
+- Why use roles instead of plain playbooks? Roles enforce a standard directory layout that separates tasks, handlers, defaults, and templates. This makes each concern independently testable and reusable across projects without copy-pasting.
+- How do roles improve reusability? The `docker` role can provision Docker on any Ubuntu host without modification. Only the variables change. Teams can share roles via Ansible Galaxy or internal repositories.
+- What makes a task idempotent? Using declarative modules (`apt: state=present`, `service: state=started`) that check current state before acting. If the desired state already matches, no change is made. Avoid raw `command`/`shell` tasks that always report changed.
+- How do handlers improve efficiency? Handlers run only once at the end of the play, even if notified multiple times. This avoids redundant service restarts; for example, Docker is restarted once after all packages are installed rather than after each package individually.
+- Why is Ansible Vault necessary? Infrastructure code belongs in version control, but secrets (API tokens, passwords) must not appear in plaintext. Vault encrypts sensitive files with AES-256 so they can be committed alongside the rest of the code without exposing credentials.
+
+## 7. Challenges
+
+- `group_vars/all.yml` was not loaded because Ansible searches for `group_vars` adjacent to the inventory or playbook directory, not the project root. Fixed by moving it to `inventory/group_vars/all.yml`.
+- The `ansible_distribution_release` fact triggered a deprecation warning in Ansible 2.20. Switched to `ansible_facts['distribution_release']`.
+- Docker login fails with a placeholder token. Added `ignore_errors: true` since the image is public and can be pulled without authentication. In production the vault would contain a real access token.
+
diff --git a/ansible/docs/LAB06.md b/ansible/docs/LAB06.md
new file mode 100644
index 0000000000..24e639d2d7
--- /dev/null
+++ b/ansible/docs/LAB06.md
@@ -0,0 +1,238 @@
+[](https://github.com/LuminiteTime/DevOps-Core-Course/actions/workflows/ansible-deploy.yml)
+[](https://github.com/LuminiteTime/DevOps-Core-Course/actions/workflows/ansible-deploy-java.yml)
+
+# LAB06 - Advanced Ansible and CI/CD
+
+## 1. Overview
+
+Technologies: Ansible 2.20, Docker Compose v2, GitHub Actions, Jinja2 templating.
+
+## 2. Blocks and Tags
+
+### Block usage in each role
+
+**common role** - package installation tasks are wrapped in a `block` with a `rescue` that runs `apt-get update --fix-missing` on failure, and an `always` that logs completion to `/tmp/ansible_common_done`. The block carries tag `packages`.
+
+**docker role** - installation tasks (GPG key, repository, packages) are grouped in a block tagged `docker_install` with a rescue that pauses 10 seconds and retries apt update, and an always that ensures the Docker service is enabled. Configuration tasks (user group, python3-docker) are in a separate block tagged `docker_config`.
+
+### Tag strategy
+
+```text
+provision.yml tags: common, docker, docker_install, docker_config, packages
+deploy.yml tags: app_deploy, compose, docker_install, docker_config, web_app_wipe
+```
+
+### Selective execution
+
+```text
+$ ansible-playbook playbooks/provision.yml --tags "docker"
+PLAY RECAP
+lab04-vm: ok=9 changed=0 (only docker tasks ran, common skipped)
+
+$ ansible-playbook playbooks/provision.yml --list-tags
+ TASK TAGS: [common, docker, docker_config, docker_install, packages]
+```
+
+### Rescue block evidence
+
+The Java app deployment triggered the rescue block when the health check failed (Spring Boot startup latency):
+
+```text
+TASK [web_app : Verify health endpoint] fatal (connection reset by peer)
+TASK [web_app : Log deployment failure] ok => "Deployment of devops-java failed"
+PLAY RECAP: rescued=1
+```
+
+## 3. Docker Compose Migration
+
+### Template
+
+File: `roles/web_app/templates/docker-compose.yml.j2`
+
+```yaml
+services:
+ {{ app_name }}:
+ image: {{ docker_image }}:{{ docker_tag }}
+ container_name: {{ app_name }}
+ ports:
+ - "{{ app_port }}:{{ app_internal_port }}"
+ restart: {{ app_restart_policy }}
+```
+
+### Before/after comparison
+
+Before (Lab 5): individual `docker_container` module calls, manual stop/remove/run sequence.
+After (Lab 6): a single `docker compose up -d --pull always` manages the full lifecycle from a templated YAML file. Compose handles container recreation, image pulling, and restart policy declaratively.
+
+### Role dependencies
+
+`roles/web_app/meta/main.yml` declares `docker` as a dependency. Running `deploy.yml` automatically provisions Docker first without requiring `provision.yml` to be run separately.
+
+### Idempotency
+
+Second provision run: `ok=13 changed=1` (only the timestamped log file changes). All Docker and package tasks report `ok`.
+
+## 4. Wipe Logic
+
+### Implementation
+
+Controlled by two gates:
+1. Variable `web_app_wipe` (default: `false`) in `roles/web_app/defaults/main.yml`
+2. Tag `web_app_wipe` on the `include_tasks` and the wipe block
+
+Both must be active for wipe to execute. The wipe tasks are included at the top of `main.yml` so a clean-reinstall flow (wipe then deploy) works in a single playbook run.
+
+### Test results
+
+**Scenario 1 - normal deployment (wipe should NOT run):**
+
+```text
+$ ansible-playbook playbooks/deploy_python.yml
+TASK [web_app : Include wipe tasks] included
+TASK [web_app : Stop and remove ...] skipping (web_app_wipe=false)
+TASK [web_app : Create application dir] changed
+TASK [web_app : Verify health endpoint] ok
+```
+
+**Scenario 2 - wipe only:**
+
+```text
+$ ansible-playbook playbooks/deploy_python.yml -e "web_app_wipe=true" --tags web_app_wipe
+TASK [web_app : Stop and remove containers] changed
+TASK [web_app : Remove docker-compose file] changed
+TASK [web_app : Remove application directory] changed
+TASK [web_app : Log wipe completion] "Application devops-python wiped successfully"
+PLAY RECAP: ok=6 changed=3
+```
+
+Verified: `docker ps` shows no Python container; `/opt/devops-python` removed; Java app still running.
+
+**Scenario 3 - clean reinstall (wipe then deploy):**
+
+```text
+$ ansible-playbook playbooks/deploy_python.yml -e "web_app_wipe=true"
+TASK [web_app : Stop and remove ...] ignoring (dir already absent)
+TASK [web_app : Create application dir] changed
+TASK [web_app : Pull and start] changed
+TASK [web_app : Verify health endpoint] ok => {"status": "healthy"}
+PLAY RECAP: ok=20 changed=3 ignored=1
+```
+
+**Scenario 4a - tag specified, variable false (blocked by when):**
+
+```text
+$ ansible-playbook playbooks/deploy_python.yml --tags web_app_wipe
+TASK [web_app : Stop and remove ...] skipping
+TASK [web_app : Remove ...] skipping
+TASK [web_app : Remove ...] skipping
+TASK [web_app : Log wipe ...] skipping
+PLAY RECAP: ok=2 changed=0 skipped=4
+```
+
+### Research answers
+
+1. Why use both variable AND tag? Neither alone is sufficient. The variable alone still allows accidental wipe during `ansible-playbook deploy.yml -e "web_app_wipe=true"` without intent to wipe. The tag alone requires the user to explicitly request wipe via `--tags`, but without the variable check a stray tag inclusion could still trigger it. Together they form a double safety mechanism.
+
+2. Difference from `never` tag: the `never` tag makes tasks unconditionally skipped unless explicitly included. The variable+tag approach is more flexible - it allows the clean-reinstall pattern (run without `--tags` filter but with `web_app_wipe=true`) which the `never` tag cannot support.
+
+3. Why must wipe come BEFORE deployment? This enables the clean-reinstall use case: old state is removed first, then fresh deployment follows in the same playbook run.
+
+4. Clean reinstall vs rolling update: clean reinstall is safer when the application state is corrupt or the Docker image/tag changed significantly. Rolling update (just `compose up`) is faster for routine updates where no cleanup is needed.
+
+5. Extending to wipe images/volumes: add `docker image prune -f` and `docker volume rm {{ app_name }}_*` tasks after removing containers.
+
+## 5. CI/CD Integration
+
+### Workflow architecture
+
+Two separate GitHub Actions workflows:
+- `ansible-deploy.yml` - deploys the Python app on push to `master` when `ansible/` files change
+- `ansible-deploy-java.yml` - deploys the Java app independently
+
+Each workflow has two jobs:
+1. `lint` - runs `ansible-lint` on all playbooks
+2. `deploy` (needs: lint, push-only) - sets up SSH, writes vault password from secrets, runs `ansible-playbook`, verifies health endpoint, cleans up
+
+### Required GitHub Secrets
+
+- `ANSIBLE_VAULT_PASSWORD` - decrypts `group_vars/all.yml`
+- `SSH_PRIVATE_KEY` - ed25519 private key for `yc-user`
+- `VM_HOST` - target VM IP
+
+### Path filters
+
+Python workflow triggers on: `ansible/vars/app_python.yml`, `ansible/playbooks/deploy_python.yml`, `ansible/roles/web_app/**`.
+Java workflow triggers on: `ansible/vars/app_java.yml`, `ansible/playbooks/deploy_java.yml`, `ansible/roles/web_app/**`.
+Both exclude `ansible/docs/**`. A role change triggers both workflows.
+
+### Research answers
+
+1. Security of SSH keys in GitHub Secrets: secrets are encrypted at rest and masked in logs, but any workflow in the repository can access them. Limit write access to the repository, use environment-scoped secrets for production, and consider short-lived credentials.
+
+2. Staging-to-production pipeline: add separate inventory files (`staging.ini`, `production.ini`), use GitHub environments with approval gates, deploy to staging first, run smoke tests, then promote to production.
+
+3. Rollbacks: pin Docker image tags (e.g., `sha-abc123` instead of `latest`), store the previous tag as a variable, and create a rollback playbook that sets the tag to the previous value and re-runs deployment.
+
+4. Self-hosted runner security: the runner has direct network access to the VM (no SSH key in CI), secrets never leave the infrastructure, and the attack surface is smaller than sharing keys with GitHub-hosted runners.
+
+## 6. Testing Results
+
+### Both apps running
+
+```text
+$ ssh yc-user@VM "docker ps"
+NAMES IMAGE STATUS PORTS
+devops-java luminitetime/devops-info-service-java:latest Up About a minute 0.0.0.0:5001->8081/tcp
+devops-python luminitetime/devops-info-service-python:latest Up 4 minutes 0.0.0.0:5000->8080/tcp
+```
+
+### Health checks (from inside the VM)
+
+```text
+$ curl http://localhost:5000/health
+{"status":"healthy","timestamp":"2026-02-26T18:15:40.365237+00:00","uptime_seconds":259}
+
+$ curl http://localhost:5001/health
+{"status":"healthy","timestamp":"2026-02-26T18:15:40.350962273Z","uptime_seconds":68}
+```
+
+### Independent wipe
+
+Wiping only the Python app removed its container and `/opt/devops-python` directory while the Java app continued running unaffected (verified via `docker ps`).
+
+## 7. Challenges and Solutions
+
+- Java app internal port: the Spring Boot image listens on 8081, not 8080 as initially assumed. Discovered via `docker logs`; fixed `app_internal_port` in `vars/app_java.yml`.
+- Health check timeout: Java/Spring Boot takes 5-8 seconds to start. Increased `wait_for` timeout to 60 seconds and `delay` to 5. The first deployment still hit the rescue block, which logged the failure gracefully.
+- Ansible deprecation warning: `ansible_distribution_release` triggers INJECT_FACTS_AS_VARS deprecation in Ansible 2.20. Switched to `ansible_facts['distribution_release']`.
+- group_vars placement: Ansible searches for `group_vars` adjacent to the inventory file, not the project root. Moved to `inventory/group_vars/all.yml`.
+
+## Bonus Part 1 - Multi-App Deployment
+
+### Architecture
+
+The `web_app` role is parameterized and reused for both applications. Each app has its own variable file (`vars/app_python.yml`, `vars/app_java.yml`) that overrides `app_name`, `docker_image`, `app_port`, and `app_internal_port`. Separate playbooks (`deploy_python.yml`, `deploy_java.yml`) include the same role with different vars. A combined `deploy_all.yml` deploys both using `include_role`.
+
+### Port allocation
+
+Python app: host 5000 -> container 8080.
+Java app: host 5001 -> container 8081.
+
+### Role reusability
+
+The `web_app` role has no hardcoded values. The Jinja2 template generates a unique `docker-compose.yml` per app in `/opt/{{ app_name }}/`. Wipe logic is also app-specific because `compose_project_dir` differs per app.
+
+## Bonus Part 2 - Multi-App CI/CD
+
+### Workflow strategy
+
+Separate workflows (Approach A) - one per app. Each workflow has its own path filter, deployment playbook reference, and verification port.
+
+### Path filter strategy
+
+Changes to `ansible/roles/web_app/**` trigger both workflows (shared role). Changes to `ansible/vars/app_python.yml` trigger only the Python workflow. Changes to `ansible/vars/app_java.yml` trigger only the Java workflow.
+
+### Files
+
+- `.github/workflows/ansible-deploy.yml` - Python app
+- `.github/workflows/ansible-deploy-java.yml` - Java app
diff --git a/ansible/inventory/group_vars/all.yml b/ansible/inventory/group_vars/all.yml
new file mode 100644
index 0000000000..24f501a011
--- /dev/null
+++ b/ansible/inventory/group_vars/all.yml
@@ -0,0 +1,18 @@
+$ANSIBLE_VAULT;1.1;AES256
+39306161353663386163333465633132653565656336333236643634653862333864656638393436
+3663343237663039323566313065323761303565636661650a663662633733393561313732636330
+34623830353238656635316463643332613634613531343163386463646662313936666464626163
+3938383532363437370a613962383231323263663162393431646234613239346337313766343762
+32396134633663326239356534613239306437333065636530393432313235336536383338633234
+65303362346638303864613337623165373963363531653138386161383938306633356163646234
+39663231633432303834376461383564353335376333366130323130346536336463393861366138
+30633432396635303139663761333563623063396464363538323663666232326264393133346261
+32383133376337613162303061653631613538326566323630656166336132356334636238663936
+65396363336231383737306233303037323264323534623235626631393038303461343538623765
+65646661663139366535376632353861303066636566313431373264663864313539336633366264
+65333965396531353563306162306637623361396530373537383835303064366662366538313761
+63343135666666373336363335386136326530336265356337343637316131666631363762356332
+61396236663634643262373037353839386634383861653963333062323137663761663834333432
+64336261386361643834653735313631613833353933613262303664373765363332613361646139
+62313939393464323463373139336466316662363633663431343633393764313338313261636365
+3463
diff --git a/ansible/inventory/hosts.ini b/ansible/inventory/hosts.ini
new file mode 100644
index 0000000000..aecaecbcb5
--- /dev/null
+++ b/ansible/inventory/hosts.ini
@@ -0,0 +1,2 @@
+[webservers]
+lab04-vm ansible_host=93.77.186.113 ansible_user=yc-user ansible_ssh_private_key_file=~/.ssh/id_ed25519
diff --git a/ansible/playbooks/deploy.yml b/ansible/playbooks/deploy.yml
new file mode 100644
index 0000000000..95174b9e0e
--- /dev/null
+++ b/ansible/playbooks/deploy.yml
@@ -0,0 +1,7 @@
+---
+- name: Deploy application
+ hosts: webservers
+ become: true
+
+ roles:
+ - web_app
diff --git a/ansible/playbooks/deploy_all.yml b/ansible/playbooks/deploy_all.yml
new file mode 100644
index 0000000000..645ae987f3
--- /dev/null
+++ b/ansible/playbooks/deploy_all.yml
@@ -0,0 +1,23 @@
+---
+- name: Deploy all applications
+ hosts: webservers
+ become: true
+
+ tasks:
+ - name: Deploy Python app
+ ansible.builtin.include_role:
+ name: web_app
+ vars:
+ web_app_name: devops-python
+ web_app_image: luminitetime/devops-info-service-python
+ web_app_port: 5000
+ web_app_internal_port: 8080
+
+ - name: Deploy Java app
+ ansible.builtin.include_role:
+ name: web_app
+ vars:
+ web_app_name: devops-java
+ web_app_image: luminitetime/devops-info-service-java
+ web_app_port: 5001
+ web_app_internal_port: 8081
diff --git a/ansible/playbooks/deploy_java.yml b/ansible/playbooks/deploy_java.yml
new file mode 100644
index 0000000000..c2566b792a
--- /dev/null
+++ b/ansible/playbooks/deploy_java.yml
@@ -0,0 +1,9 @@
+---
+- name: Deploy Java application
+ hosts: webservers
+ become: true
+ vars_files:
+ - ../vars/app_java.yml
+
+ roles:
+ - web_app
diff --git a/ansible/playbooks/deploy_python.yml b/ansible/playbooks/deploy_python.yml
new file mode 100644
index 0000000000..5342a33f5d
--- /dev/null
+++ b/ansible/playbooks/deploy_python.yml
@@ -0,0 +1,9 @@
+---
+- name: Deploy Python application
+ hosts: webservers
+ become: true
+ vars_files:
+ - ../vars/app_python.yml
+
+ roles:
+ - web_app
diff --git a/ansible/playbooks/provision.yml b/ansible/playbooks/provision.yml
new file mode 100644
index 0000000000..362e19a8b2
--- /dev/null
+++ b/ansible/playbooks/provision.yml
@@ -0,0 +1,10 @@
+---
+- name: Provision web servers
+ hosts: webservers
+ become: true
+
+ roles:
+ - role: common
+ tags: [common]
+ - role: docker
+ tags: [docker]
diff --git a/ansible/playbooks/site.yml b/ansible/playbooks/site.yml
new file mode 100644
index 0000000000..fad1d7038b
--- /dev/null
+++ b/ansible/playbooks/site.yml
@@ -0,0 +1,12 @@
+---
+- name: Full site setup
+ hosts: webservers
+ become: true
+
+ roles:
+ - role: common
+ tags: [common]
+ - role: docker
+ tags: [docker]
+ - role: web_app
+ tags: [app_deploy]
diff --git a/ansible/requirements.yml b/ansible/requirements.yml
new file mode 100644
index 0000000000..db9b0aa20b
--- /dev/null
+++ b/ansible/requirements.yml
@@ -0,0 +1,6 @@
+---
+collections:
+ - name: community.general
+ version: ">=8.0.0"
+ - name: community.docker
+ version: ">=3.0.0"
diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml
new file mode 100644
index 0000000000..c300a07bb6
--- /dev/null
+++ b/ansible/roles/common/defaults/main.yml
@@ -0,0 +1,12 @@
+---
+common_packages:
+ - python3-pip
+ - curl
+ - git
+ - vim
+ - htop
+ - ca-certificates
+ - gnupg
+ - lsb-release
+
+common_timezone: "UTC"
diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml
new file mode 100644
index 0000000000..37e3c767d9
--- /dev/null
+++ b/ansible/roles/common/tasks/main.yml
@@ -0,0 +1,40 @@
+---
+- name: Install system packages
+ become: true
+ tags:
+ - packages
+ block:
+ - name: Update apt cache
+ ansible.builtin.apt:
+ update_cache: true
+ cache_valid_time: 3600
+
+ - name: Install common packages
+ ansible.builtin.apt:
+ name: "{{ common_packages }}"
+ state: present
+
+ rescue:
+ - name: Fix apt and retry
+ ansible.builtin.apt:
+ update_cache: true
+ force: true
+
+ - name: Retry package installation
+ ansible.builtin.apt:
+ name: "{{ common_packages }}"
+ state: present
+
+ always:
+ - name: Log package setup completion
+ ansible.builtin.copy:
+ content: "common role packages completed {{ ansible_facts['date_time']['iso8601'] }}\n"
+ dest: /tmp/ansible_common_done
+ mode: "0644"
+
+- name: Set timezone
+ community.general.timezone:
+ name: "{{ common_timezone }}"
+ become: true
+ tags:
+ - common
diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml
new file mode 100644
index 0000000000..795fd1fd78
--- /dev/null
+++ b/ansible/roles/docker/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+docker_user: "{{ ansible_user }}"
diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml
new file mode 100644
index 0000000000..07aa0eb290
--- /dev/null
+++ b/ansible/roles/docker/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: Restart docker
+ ansible.builtin.service:
+ name: docker
+ state: restarted
diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml
new file mode 100644
index 0000000000..c79a77d5ba
--- /dev/null
+++ b/ansible/roles/docker/tasks/main.yml
@@ -0,0 +1,79 @@
+---
+- name: Install Docker engine
+ become: true
+ tags:
+ - docker_install
+ block:
+ - name: Install prerequisite packages
+ ansible.builtin.apt:
+ name:
+ - ca-certificates
+ - curl
+ - gnupg
+ state: present
+
+ - name: Create keyrings directory
+ ansible.builtin.file:
+ path: /etc/apt/keyrings
+ state: directory
+ mode: "0755"
+
+ - name: Add Docker GPG key
+ ansible.builtin.get_url:
+ url: https://download.docker.com/linux/ubuntu/gpg
+ dest: /etc/apt/keyrings/docker.asc
+ mode: "0644"
+
+ - name: Add Docker repository
+ ansible.builtin.apt_repository:
+ repo: >-
+ deb [arch={{ ansible_facts['architecture'] | regex_replace('x86_64', 'amd64') }}
+ signed-by=/etc/apt/keyrings/docker.asc]
+ https://download.docker.com/linux/ubuntu
+ {{ ansible_facts['distribution_release'] }} stable
+ state: present
+ filename: docker
+
+ - name: Install Docker packages
+ ansible.builtin.apt:
+ name:
+ - docker-ce
+ - docker-ce-cli
+ - containerd.io
+ - docker-buildx-plugin
+ - docker-compose-plugin
+ state: present
+ update_cache: true
+ notify: Restart docker
+
+ rescue:
+ - name: Wait before retrying
+ ansible.builtin.pause:
+ seconds: 10
+
+ - name: Retry apt update
+ ansible.builtin.apt:
+ update_cache: true
+
+ always:
+ - name: Ensure Docker service is enabled and started
+ ansible.builtin.service:
+ name: docker
+ state: started
+ enabled: true
+
+- name: Configure Docker access
+ become: true
+ tags:
+ - docker_config
+ block:
+ - name: Add user to docker group
+ ansible.builtin.user:
+ name: "{{ docker_user }}"
+ groups: docker
+ append: true
+
+ - name: Install python3-docker for Ansible modules
+ ansible.builtin.apt:
+ name: python3-docker
+ state: present
diff --git a/ansible/roles/web_app/defaults/main.yml b/ansible/roles/web_app/defaults/main.yml
new file mode 100644
index 0000000000..876a10ee9d
--- /dev/null
+++ b/ansible/roles/web_app/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+web_app_name: devops-app
+web_app_image: "luminitetime/devops-info-service-python"
+web_app_tag: latest
+web_app_port: 5000
+web_app_internal_port: 8080
+web_app_project_dir: "/opt/{{ web_app_name }}"
+web_app_restart_policy: unless-stopped
+
+web_app_wipe: false
diff --git a/ansible/roles/web_app/handlers/main.yml b/ansible/roles/web_app/handlers/main.yml
new file mode 100644
index 0000000000..885e640c25
--- /dev/null
+++ b/ansible/roles/web_app/handlers/main.yml
@@ -0,0 +1,6 @@
+---
+- name: Restart app container
+ ansible.builtin.command:
+ cmd: docker compose restart
+ chdir: "{{ web_app_project_dir }}"
+ changed_when: true
diff --git a/ansible/roles/web_app/meta/main.yml b/ansible/roles/web_app/meta/main.yml
new file mode 100644
index 0000000000..cb7d8e0460
--- /dev/null
+++ b/ansible/roles/web_app/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - role: docker
diff --git a/ansible/roles/web_app/tasks/main.yml b/ansible/roles/web_app/tasks/main.yml
new file mode 100644
index 0000000000..e72beec705
--- /dev/null
+++ b/ansible/roles/web_app/tasks/main.yml
@@ -0,0 +1,50 @@
+---
+- name: Include wipe tasks
+ ansible.builtin.include_tasks: wipe.yml
+ tags:
+ - web_app_wipe
+
+- name: Deploy application with Docker Compose
+ tags:
+ - app_deploy
+ - compose
+ block:
+ - name: Create application directory
+ ansible.builtin.file:
+ path: "{{ web_app_project_dir }}"
+ state: directory
+ mode: "0755"
+
+ - name: Template docker-compose file
+ ansible.builtin.template:
+ src: docker-compose.yml.j2
+ dest: "{{ web_app_project_dir }}/docker-compose.yml"
+ mode: "0644"
+
+ - name: Pull and start application
+ ansible.builtin.command:
+ cmd: docker compose up -d --pull always
+ chdir: "{{ web_app_project_dir }}"
+ register: web_app_compose_result
+ changed_when: "'Started' in web_app_compose_result.stderr or 'Created' in web_app_compose_result.stderr or 'Pulling' in web_app_compose_result.stderr"
+
+ - name: Wait for application port
+ ansible.builtin.wait_for:
+ port: "{{ web_app_port }}"
+ delay: 5
+ timeout: 60
+
+ - name: Verify health endpoint
+ ansible.builtin.uri:
+ url: "http://localhost:{{ web_app_port }}/health"
+ return_content: true
+ register: web_app_health_check
+
+ - name: Show health check result
+ ansible.builtin.debug:
+ var: web_app_health_check.json
+
+ rescue:
+ - name: Log deployment failure
+ ansible.builtin.debug:
+ msg: "Deployment of {{ web_app_name }} failed"
diff --git a/ansible/roles/web_app/tasks/wipe.yml b/ansible/roles/web_app/tasks/wipe.yml
new file mode 100644
index 0000000000..8a2dd7eb4b
--- /dev/null
+++ b/ansible/roles/web_app/tasks/wipe.yml
@@ -0,0 +1,27 @@
+---
+- name: Wipe web application
+ when: web_app_wipe | bool
+ tags:
+ - web_app_wipe
+ block:
+ - name: Stop and remove containers via compose
+ ansible.builtin.command:
+ cmd: docker compose down --remove-orphans
+ chdir: "{{ web_app_project_dir }}"
+ register: web_app_wipe_result
+ changed_when: web_app_wipe_result.rc == 0
+ failed_when: false
+
+ - name: Remove docker-compose file
+ ansible.builtin.file:
+ path: "{{ web_app_project_dir }}/docker-compose.yml"
+ state: absent
+
+ - name: Remove application directory
+ ansible.builtin.file:
+ path: "{{ web_app_project_dir }}"
+ state: absent
+
+ - name: Log wipe completion
+ ansible.builtin.debug:
+ msg: "Application {{ web_app_name }} wiped successfully"
diff --git a/ansible/roles/web_app/templates/docker-compose.yml.j2 b/ansible/roles/web_app/templates/docker-compose.yml.j2
new file mode 100644
index 0000000000..12e746422a
--- /dev/null
+++ b/ansible/roles/web_app/templates/docker-compose.yml.j2
@@ -0,0 +1,7 @@
+services:
+ {{ web_app_name }}:
+ image: {{ web_app_image }}:{{ web_app_tag }}
+ container_name: {{ web_app_name }}
+ ports:
+ - "{{ web_app_port }}:{{ web_app_internal_port }}"
+ restart: {{ web_app_restart_policy }}
diff --git a/ansible/vars/app_java.yml b/ansible/vars/app_java.yml
new file mode 100644
index 0000000000..0cc0b9f044
--- /dev/null
+++ b/ansible/vars/app_java.yml
@@ -0,0 +1,7 @@
+---
+web_app_name: devops-java
+web_app_image: luminitetime/devops-info-service-java
+web_app_tag: latest
+web_app_port: 5001
+web_app_internal_port: 8081
+web_app_project_dir: "/opt/{{ web_app_name }}"
diff --git a/ansible/vars/app_python.yml b/ansible/vars/app_python.yml
new file mode 100644
index 0000000000..10332b6ab3
--- /dev/null
+++ b/ansible/vars/app_python.yml
@@ -0,0 +1,7 @@
+---
+web_app_name: devops-python
+web_app_image: luminitetime/devops-info-service-python
+web_app_tag: latest
+web_app_port: 5000
+web_app_internal_port: 8080
+web_app_project_dir: "/opt/{{ web_app_name }}"
diff --git a/appJava/.dockerignore b/appJava/.dockerignore
new file mode 100644
index 0000000000..6b675fb320
--- /dev/null
+++ b/appJava/.dockerignore
@@ -0,0 +1,16 @@
+.git/
+.gitignore
+
+.gradle/
+build/
+out/
+bin/
+
+.idea/
+.vscode/
+*.iml
+.DS_Store
+
+docs/
+screenshots/
+HELP.md
diff --git a/appJava/.gitattributes b/appJava/.gitattributes
new file mode 100644
index 0000000000..8af972cded
--- /dev/null
+++ b/appJava/.gitattributes
@@ -0,0 +1,3 @@
+/gradlew text eol=lf
+*.bat text eol=crlf
+*.jar binary
diff --git a/appJava/.gitignore b/appJava/.gitignore
new file mode 100644
index 0000000000..c2065bc262
--- /dev/null
+++ b/appJava/.gitignore
@@ -0,0 +1,37 @@
+HELP.md
+.gradle
+build/
+!gradle/wrapper/gradle-wrapper.jar
+!**/src/main/**/build/
+!**/src/test/**/build/
+
+### STS ###
+.apt_generated
+.classpath
+.factorypath
+.project
+.settings
+.springBeans
+.sts4-cache
+bin/
+!**/src/main/**/bin/
+!**/src/test/**/bin/
+
+### IntelliJ IDEA ###
+.idea
+*.iws
+*.iml
+*.ipr
+out/
+!**/src/main/**/out/
+!**/src/test/**/out/
+
+### NetBeans ###
+/nbproject/private/
+/nbbuild/
+/dist/
+/nbdist/
+/.nb-gradle/
+
+### VS Code ###
+.vscode/
diff --git a/appJava/Dockerfile b/appJava/Dockerfile
new file mode 100644
index 0000000000..9fac32a5fa
--- /dev/null
+++ b/appJava/Dockerfile
@@ -0,0 +1,27 @@
+FROM eclipse-temurin:25-jdk AS builder
+
+WORKDIR /workspace
+
+COPY gradlew build.gradle settings.gradle ./
+COPY gradle/ gradle/
+RUN chmod +x gradlew
+
+RUN ./gradlew --no-daemon --version
+
+COPY src/ src/
+RUN ./gradlew --no-daemon clean bootJar
+
+FROM eclipse-temurin:25-jre
+
+WORKDIR /app
+
+RUN addgroup --system app && adduser --system --ingroup app app
+
+COPY --from=builder /workspace/build/libs/appJava-0.0.1-SNAPSHOT.jar /app/app.jar
+RUN chown -R app:app /app
+
+USER app
+
+EXPOSE 8081
+
+CMD ["java", "-jar", "/app/app.jar"]
diff --git a/appJava/README.md b/appJava/README.md
new file mode 100644
index 0000000000..4cb558568a
--- /dev/null
+++ b/appJava/README.md
@@ -0,0 +1,89 @@
+# DevOps Info Service (Java / Spring Boot)
+
+## Overview
+
+This service exposes a small HTTP API that reports information about the running host, the Java runtime, and the incoming request. It mirrors the Python implementation and is used as a base for later DevOps labs.
+
+The API provides:
+- `GET /` – service, system, runtime, and request metadata
+- `GET /health` – simple health check with uptime info
+
+## Prerequisites
+
+- Java 25 (JDK)
+
+All Java dependencies are managed via Gradle in [appJava/build.gradle](appJava/build.gradle).
+
+## Installation
+
+You can build the project using the included Gradle wrapper:
+
+```bash
+cd appJava
+./gradlew clean build
+```
+
+This will download the necessary Gradle components (if needed), resolve dependencies, generate code from `openapi.yaml`, compile the application, and run tests.
+
+## Running the Application
+
+Default configuration (HOST=0.0.0.0, PORT=8081, DEBUG=false):
+
+```bash
+cd appJava
+./gradlew bootRun
+```
+
+Running from the executable JAR after a build:
+
+```bash
+cd appJava
+./gradlew bootJar
+java -jar build/libs/appJava-0.0.1-SNAPSHOT.jar
+```
+
+Custom configuration with environment variables:
+
+```bash
+# Run on localhost:8081
+cd appJava
+HOST=127.0.0.1 PORT=8081 java -jar build/libs/appJava-0.0.1-SNAPSHOT.jar
+
+# Run on 127.0.0.1:3000 with debug logging
+HOST=127.0.0.1 PORT=3000 DEBUG=true java -jar build/libs/appJava-0.0.1-SNAPSHOT.jar
+```
+
+After start, you can test the endpoints with curl (default app config used in commands):
+
+```bash
+curl http://localhost:8081/
+curl http://localhost:8081/health
+```
+
+## API Endpoints
+
+- `GET /`
+ - Returns JSON with the following top-level sections:
+ - `service` – name, version, description, framework (Spring Boot)
+ - `system` – hostname, platform, platform_version, architecture, cpu_count, python_version (mapped from Java runtime)
+ - `runtime` – uptime_seconds, uptime_human, current_time, timezone
+ - `request` – client_ip, user_agent, method, path
+ - `endpoints` – list of available paths and their purpose
+
+- `GET /health`
+ - Returns a compact health document:
+ - `status` – string status ("healthy")
+ - `timestamp` – current UTC timestamp in ISO 8601 format
+ - `uptime_seconds` – number of seconds the process has been running
+
+## Configuration
+
+The application is configured via environment variables and Spring Boot configuration in [appJava/src/main/resources/application.yml](appJava/src/main/resources/application.yml):
+
+| Variable | Default | Description |
+|----------|------------|---------------------------------------------|
+| `HOST` | `0.0.0.0` | Address the app binds to |
+| `PORT` | `8081` | TCP port the app listens on |
+| `DEBUG` | `false` | When `true`, enables DEBUG logging level |
+
+All variables are optional. If they are not set, the defaults above are used.
diff --git a/appJava/build.gradle b/appJava/build.gradle
new file mode 100644
index 0000000000..9f81800585
--- /dev/null
+++ b/appJava/build.gradle
@@ -0,0 +1,85 @@
+plugins {
+ id 'java'
+ id 'org.springframework.boot' version '4.0.2'
+ id 'io.spring.dependency-management' version '1.1.7'
+ id 'org.openapi.generator' version '7.8.0'
+ id 'checkstyle'
+}
+
+group = 'luminais.tech'
+version = '0.0.1-SNAPSHOT'
+description = 'appJava'
+
+java {
+ toolchain {
+ languageVersion = JavaLanguageVersion.of(25)
+ }
+}
+
+repositories {
+ mavenCentral()
+}
+
+dependencies {
+ implementation 'org.springframework.boot:spring-boot-starter-webmvc'
+ implementation 'org.openapitools:jackson-databind-nullable:0.2.6'
+ implementation 'org.springdoc:springdoc-openapi-starter-webmvc-ui:2.7.0'
+ implementation 'org.springframework.boot:spring-boot-starter-validation'
+ testImplementation 'org.springframework.boot:spring-boot-starter-webmvc-test'
+ testImplementation 'org.springframework.boot:spring-boot-starter-test'
+ testRuntimeOnly 'org.junit.platform:junit-platform-launcher'
+}
+
+springBoot {
+ mainClass.set('luminais.tech.appjava.AppJavaApplication')
+}
+
+openApiGenerate {
+ generatorName = 'spring'
+ inputSpec = "$rootDir/src/main/resources/openapi.yaml"
+ outputDir = "$buildDir/generated"
+ apiPackage = 'luminais.tech.appjava.api'
+ modelPackage = 'luminais.tech.appjava.model'
+ configOptions = [
+ useSpringBoot3 : 'true',
+ delegatePattern: 'true',
+ interfaceOnly : 'false',
+ useTags : 'true',
+ dateLibrary : 'java8'
+ ]
+}
+
+sourceSets {
+ main {
+ java {
+ srcDir "$buildDir/generated/src/main/java"
+ }
+ }
+}
+
+tasks.named('compileJava') {
+ dependsOn tasks.named('openApiGenerate')
+}
+
+tasks.named('openApiGenerate') {
+ doFirst {
+ delete("$buildDir/generated")
+ }
+}
+
+tasks.named('test') {
+ useJUnitPlatform()
+}
+
+checkstyle {
+ toolVersion = '13.2.0'
+ configFile = file("$rootDir/config/checkstyle/checkstyle.xml")
+}
+
+tasks.named('checkstyleMain') {
+ source = fileTree('src/main/java') { include '**/*.java' }
+}
+
+tasks.named('checkstyleTest') {
+ source = fileTree('src/test/java') { include '**/*.java' }
+}
diff --git a/appJava/config/checkstyle/checkstyle.xml b/appJava/config/checkstyle/checkstyle.xml
new file mode 100644
index 0000000000..8dcdc7da22
--- /dev/null
+++ b/appJava/config/checkstyle/checkstyle.xml
@@ -0,0 +1,25 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/appJava/docs/JAVA.md b/appJava/docs/JAVA.md
new file mode 100644
index 0000000000..59accba554
--- /dev/null
+++ b/appJava/docs/JAVA.md
@@ -0,0 +1,25 @@
+# JAVA – Language Justification
+
+## 1. Why Java / Spring Boot
+
+- Strong ecosystem: Spring Boot is a de-facto standard for microservices in many companies.
+- Tooling: great support for OpenAPI generation, testing, and observability libraries.
+- Consistency: easy to align with enterprise stacks that already use Java.
+
+## 2. Fit for the DevOps Info Service
+
+- HTTP API: Spring Boot + WebMVC make it trivial to expose JSON endpoints.
+- Spec-first: the OpenAPI generator integrates cleanly into Gradle, keeping the contract as the single source of truth.
+- Configuration: Spring’s configuration properties system maps to the env variable and config requirements.
+
+## 3. Comparison with Alternatives
+
+- Go: produces smaller static binaries and very fast startup, but tooling and ecosystem for spec-first APIs and enterprise integrations is less standardized.
+- Rust: excellent safety and performance, but slower to develop and overkill for a simple JSON info service.
+- Java/Spring Boot: slightly heavier runtime, but very productive, well-known, and easy to integrate with existing DevOps tooling.
+
+## 4. Build & Runtime Notes
+
+- Build command: `./gradlew clean build`.
+- Typical jar size: tens of MBs vs a few MBs for Go, but fine for this lab and realistic for enterprise Java services.
+- The same JSON contract as in the Python service is documented in the OpenAPI spec.
diff --git a/appJava/docs/LAB01.md b/appJava/docs/LAB01.md
new file mode 100644
index 0000000000..8610f270a9
--- /dev/null
+++ b/appJava/docs/LAB01.md
@@ -0,0 +1,68 @@
+# LAB01 – DevOps Info Service (Java / Spring Boot)
+
+## 1. Overview
+
+This Java service implements the same DevOps info API as the Python app, using a Spring Boot application generated from an OpenAPI spec. It exposes `GET /` and `GET /health` and uses the same JSON structure.
+
+## 2. Architecture
+
+- OpenAPI-first: `src/main/resources/openapi.yaml` defines both endpoints and all models (`RootResponse`, `ServiceInfo`, etc.).
+- Generated layer: Gradle + `org.openapi.generator` generates controllers, delegates, and models into `build/generated`.
+- Operation layer: `luminais.tech.appjava.operation.DevopsSystemInfoApiDelegateImpl` implements the generated delegate and just forwards to the service.
+- Service layer: `luminais.tech.appjava.service.DevopsInfoService` builds all response objects and encapsulates uptime, system, and request logic.
+
+## 3. Configuration & Environment
+
+- Application YAML: `src/main/resources/application.yml`
+ - `server.port` = `${PORT:8081}`
+ - `server.address` = `${HOST:0.0.0.0}`
+ - `devops.service.*` and `devops.endpoints[]` configure `ServiceInfo` and the endpoints list.
+- ENV variables:
+ - `PORT` – override HTTP port (e.g. `PORT=9090`)
+ - `HOST` – bind address (e.g. `HOST=127.0.0.1`)
+ - `DEBUG` – when `true`, sets `logging.level.root=DEBUG` before Spring starts.
+
+## 4. Build & Run
+
+```bash
+cd appJava
+./gradlew clean build
+./gradlew bootRun
+# or
+java -jar build/libs/appJava-0.0.1-SNAPSHOT.jar
+```
+
+Test endpoints (default port 8081):
+
+```bash
+curl http://localhost:8081/ | jq
+curl http://localhost:8081/health | jq
+```
+
+## 5. Mapping to Lab Requirements
+
+- Endpoints: `/` and `/health` return the same JSON structure as the Python version.
+- Structure:
+ - Root response: `service`, `system`, `runtime`, `request`, `endpoints`.
+ - Health response: `status`, `timestamp`, `uptime_seconds`.
+- Best practices:
+ - OpenAPI-driven models (no manual Java DTOs).
+ - Clear service/operation separation with constructor injection.
+ - Config-driven constants via `DevopsProperties`.
+ - Env-based host, port, and logging level.
+
+## 6. Testing Evidence
+
+Commands used during manual verification:
+
+```bash
+cd appJava
+./gradlew bootRun
+
+curl http://localhost:8081/ | jq '.service, .system, .runtime, .request, .endpoints'
+curl http://localhost:8081/health | jq
+```
+
+Screenshots (to be stored under `appJava/docs/screenshots/`):
+- Java service running and responding to `/`.
+- Java service responding to `/health`.
diff --git a/appJava/docs/LAB02.md b/appJava/docs/LAB02.md
new file mode 100644
index 0000000000..b7abf6d13c
--- /dev/null
+++ b/appJava/docs/LAB02.md
@@ -0,0 +1,189 @@
+# LAB02 — Docker Containerization (Java / Spring Boot, Multi-Stage)
+
+## 1. Multi-Stage Build Strategy
+
+This image uses two stages:
+
+- Builder stage: compiles the Spring Boot application into an executable JAR using Gradle.
+- Runtime stage: runs only the JRE and the compiled JAR.
+
+Relevant Dockerfile excerpt:
+
+```dockerfile
+FROM eclipse-temurin:25-jdk AS builder
+WORKDIR /workspace
+COPY gradlew build.gradle settings.gradle ./
+COPY gradle/ gradle/
+COPY src/ src/
+RUN ./gradlew --no-daemon clean bootJar
+
+FROM eclipse-temurin:25-jre
+WORKDIR /app
+COPY --from=builder /workspace/build/libs/appJava-0.0.1-SNAPSHOT.jar /app/app.jar
+CMD ["java", "-jar", "/app/app.jar"]
+```
+
+## 2. Size Comparison & Analysis
+
+Builder image size vs final image size:
+
+```text
+devops-info-service-java-builder:lab02 = 751MB
+devops-info-service-java:lab02 = 390MB
+```
+
+Why this matters:
+
+- Smaller runtime images reduce the attack surface (fewer tools/libraries available).
+- Smaller images pull faster and start faster, improving deploy times.
+
+## 3. Build & Run Evidence
+
+### 3.1 Build output
+
+```text
+#0 building with "orbstack" instance using docker driver
+
+#1 [internal] load build definition from Dockerfile
+#1 transferring dockerfile: 557B done
+#1 DONE 0.0s
+
+#2 [internal] load metadata for docker.io/library/eclipse-temurin:25-jdk
+#2 DONE 2.7s
+
+#3 [internal] load metadata for docker.io/library/eclipse-temurin:25-jre
+#3 DONE 3.0s
+
+#4 [internal] load .dockerignore
+#4 transferring context: 145B done
+#4 DONE 0.0s
+
+#5 [builder 1/8] FROM docker.io/library/eclipse-temurin:25-jdk@sha256:42fc3fe6804ec612f5ef8a613f8c06d8dd578de6207336077387d4cb32edaa9b
+#5 ...
+
+#6 [internal] load build context
+#6 transferring context: 69.00kB 0.0s done
+#6 DONE 0.1s
+
+#7 [stage-1 1/5] FROM docker.io/library/eclipse-temurin:25-jre@sha256:9d1d3068b16f2c4127be238ca06439012ff14a8fdf38f8f62472160f9058464a
+#7 ...
+
+#8 [stage-1 2/5] WORKDIR /app
+#8 DONE 0.3s
+
+#9 [builder 2/8] WORKDIR /workspace
+#9 DONE 0.2s
+
+#10 [builder 3/8] COPY gradlew build.gradle settings.gradle ./
+#10 DONE 0.0s
+
+#11 [builder 4/8] COPY gradle/ gradle/
+#11 DONE 0.0s
+
+#12 [builder 5/8] RUN chmod +x gradlew
+#12 DONE 0.1s
+
+#13 [stage-1 3/5] RUN addgroup --system app && adduser --system --ingroup app app
+#13 DONE 0.2s
+
+#14 [builder 6/8] RUN ./gradlew --no-daemon --version
+#14 DONE 10.4s
+
+#15 [builder 7/8] COPY src/ src/
+#15 DONE 0.0s
+
+#16 [builder 8/8] RUN ./gradlew --no-daemon clean bootJar
+#16 DONE 52.9s
+
+#23 [stage-1 4/5] COPY --from=builder /workspace/build/libs/appJava-0.0.1-SNAPSHOT.jar /app/app.jar
+#23 DONE 0.1s
+
+#24 [stage-1 5/5] RUN chown -R app:app /app
+#24 DONE 0.1s
+
+#25 exporting to image
+#25 writing image sha256:4391bcb658ad6acae8f3fe5e4d3bcf5c1e48b8e8c22a44b87a83a70380db0294 done
+#25 naming to docker.io/library/devops-info-service-java:lab02 done
+#25 DONE 0.1s
+```
+
+### 3.2 Image sizes output
+
+```text
+REPOSITORY TAG IMAGE ID CREATED SIZE
+devops-info-service-java lab02 4391bcb658ad 9 seconds ago 390MB
+devops-info-service-java-builder lab02 9e5402ef437f 9 seconds ago 751MB
+```
+
+### 3.3 Container running output
+
+```text
+b835816bfad0262f3818428796ffb07d47f36b1a1e1ae9d4e176eb157ba9f9b6
+CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+b835816bfad0 devops-info-service-java:lab02 "/__cacert_entrypoin…" 1 second ago Up 1 second 0.0.0.0:8084->8081/tcp, [::]:8084->8081/tcp devops-info-java-ps
+```
+
+### 3.4 Endpoint tests
+
+```text
+GET /health
+{
+ "status": "healthy",
+ "timestamp": "2026-01-31T11:11:06.847230599Z",
+ "uptime_seconds": 1
+}
+
+GET /
+{
+ "service": {
+ "name": "devops-info-service",
+ "version": "1.0.0",
+ "description": "DevOps course info service (Java implementation)",
+ "framework": "Spring Boot"
+ },
+ "system": {
+ "hostname": "8507a1b302a1",
+ "platform": "Linux",
+ "platform_version": "6.17.8-orbstack-00308-g8f9c941121b1",
+ "architecture": "aarch64",
+ "cpu_count": 12,
+ "python_version": "25.0.1"
+ },
+ "runtime": {
+ "uptime_seconds": 1,
+ "uptime_human": "0 hours, 0 minutes",
+ "current_time": "2026-01-31T11:11:06.888902979Z",
+ "timezone": "UTC"
+ },
+ "request": {
+ "client_ip": "192.168.215.1",
+ "user_agent": "curl/8.7.1",
+ "method": "GET",
+ "path": "/"
+ },
+ "endpoints": [
+ {
+ "path": "/",
+ "method": "GET",
+ "description": "Service information"
+ },
+ {
+ "path": "/health",
+ "method": "GET",
+ "description": "Health check"
+ }
+ ]
+}
+```
+
+## 4. Technical Notes
+
+- Each stage purpose:
+ - Builder: provides JDK + Gradle wrapper to produce the JAR.
+ - Runtime: contains only what is needed to execute the JAR.
+- Trade-offs:
+ - The builder stage can be large and includes build tooling; the final stage does not.
+
+Security notes:
+
+- The final image runs as a non-root user (`uid=100(app)`).
diff --git a/appJava/gradle/wrapper/gradle-wrapper.jar b/appJava/gradle/wrapper/gradle-wrapper.jar
new file mode 100644
index 0000000000..f8e1ee3125
Binary files /dev/null and b/appJava/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/appJava/gradle/wrapper/gradle-wrapper.properties b/appJava/gradle/wrapper/gradle-wrapper.properties
new file mode 100644
index 0000000000..23449a2b54
--- /dev/null
+++ b/appJava/gradle/wrapper/gradle-wrapper.properties
@@ -0,0 +1,7 @@
+distributionBase=GRADLE_USER_HOME
+distributionPath=wrapper/dists
+distributionUrl=https\://services.gradle.org/distributions/gradle-9.2.1-bin.zip
+networkTimeout=10000
+validateDistributionUrl=true
+zipStoreBase=GRADLE_USER_HOME
+zipStorePath=wrapper/dists
diff --git a/appJava/gradlew b/appJava/gradlew
new file mode 100755
index 0000000000..adff685a03
--- /dev/null
+++ b/appJava/gradlew
@@ -0,0 +1,248 @@
+#!/bin/sh
+
+#
+# Copyright © 2015 the original authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+##############################################################################
+#
+# Gradle start up script for POSIX generated by Gradle.
+#
+# Important for running:
+#
+# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is
+# noncompliant, but you have some other compliant shell such as ksh or
+# bash, then to run this script, type that shell name before the whole
+# command line, like:
+#
+# ksh Gradle
+#
+# Busybox and similar reduced shells will NOT work, because this script
+# requires all of these POSIX shell features:
+# * functions;
+# * expansions «$var», «${var}», «${var:-default}», «${var+SET}»,
+# «${var#prefix}», «${var%suffix}», and «$( cmd )»;
+# * compound commands having a testable exit status, especially «case»;
+# * various built-in commands including «command», «set», and «ulimit».
+#
+# Important for patching:
+#
+# (2) This script targets any POSIX shell, so it avoids extensions provided
+# by Bash, Ksh, etc; in particular arrays are avoided.
+#
+# The "traditional" practice of packing multiple parameters into a
+# space-separated string is a well documented source of bugs and security
+# problems, so this is (mostly) avoided, by progressively accumulating
+# options in "$@", and eventually passing that to Java.
+#
+# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS,
+# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly;
+# see the in-line comments for details.
+#
+# There are tweaks for specific operating systems such as AIX, CygWin,
+# Darwin, MinGW, and NonStop.
+#
+# (3) This script is generated from the Groovy template
+# https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt
+# within the Gradle project.
+#
+# You can find Gradle at https://github.com/gradle/gradle/.
+#
+##############################################################################
+
+# Attempt to set APP_HOME
+
+# Resolve links: $0 may be a link
+app_path=$0
+
+# Need this for daisy-chained symlinks.
+while
+ APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path
+ [ -h "$app_path" ]
+do
+ ls=$( ls -ld "$app_path" )
+ link=${ls#*' -> '}
+ case $link in #(
+ /*) app_path=$link ;; #(
+ *) app_path=$APP_HOME$link ;;
+ esac
+done
+
+# This is normally unused
+# shellcheck disable=SC2034
+APP_BASE_NAME=${0##*/}
+# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036)
+APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s\n' "$PWD" ) || exit
+
+# Use the maximum available, or set MAX_FD != -1 to use that value.
+MAX_FD=maximum
+
+warn () {
+ echo "$*"
+} >&2
+
+die () {
+ echo
+ echo "$*"
+ echo
+ exit 1
+} >&2
+
+# OS specific support (must be 'true' or 'false').
+cygwin=false
+msys=false
+darwin=false
+nonstop=false
+case "$( uname )" in #(
+ CYGWIN* ) cygwin=true ;; #(
+ Darwin* ) darwin=true ;; #(
+ MSYS* | MINGW* ) msys=true ;; #(
+ NONSTOP* ) nonstop=true ;;
+esac
+
+
+
+# Determine the Java command to use to start the JVM.
+if [ -n "$JAVA_HOME" ] ; then
+ if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+ # IBM's JDK on AIX uses strange locations for the executables
+ JAVACMD=$JAVA_HOME/jre/sh/java
+ else
+ JAVACMD=$JAVA_HOME/bin/java
+ fi
+ if [ ! -x "$JAVACMD" ] ; then
+ die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+ fi
+else
+ JAVACMD=java
+ if ! command -v java >/dev/null 2>&1
+ then
+ die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+ fi
+fi
+
+# Increase the maximum file descriptors if we can.
+if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then
+ case $MAX_FD in #(
+ max*)
+ # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked.
+ # shellcheck disable=SC2039,SC3045
+ MAX_FD=$( ulimit -H -n ) ||
+ warn "Could not query maximum file descriptor limit"
+ esac
+ case $MAX_FD in #(
+ '' | soft) :;; #(
+ *)
+ # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked.
+ # shellcheck disable=SC2039,SC3045
+ ulimit -n "$MAX_FD" ||
+ warn "Could not set maximum file descriptor limit to $MAX_FD"
+ esac
+fi
+
+# Collect all arguments for the java command, stacking in reverse order:
+# * args from the command line
+# * the main class name
+# * -classpath
+# * -D...appname settings
+# * --module-path (only if needed)
+# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables.
+
+# For Cygwin or MSYS, switch paths to Windows format before running java
+if "$cygwin" || "$msys" ; then
+ APP_HOME=$( cygpath --path --mixed "$APP_HOME" )
+
+ JAVACMD=$( cygpath --unix "$JAVACMD" )
+
+ # Now convert the arguments - kludge to limit ourselves to /bin/sh
+ for arg do
+ if
+ case $arg in #(
+ -*) false ;; # don't mess with options #(
+ /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath
+ [ -e "$t" ] ;; #(
+ *) false ;;
+ esac
+ then
+ arg=$( cygpath --path --ignore --mixed "$arg" )
+ fi
+ # Roll the args list around exactly as many times as the number of
+ # args, so each arg winds up back in the position where it started, but
+ # possibly modified.
+ #
+ # NB: a `for` loop captures its iteration list before it begins, so
+ # changing the positional parameters here affects neither the number of
+ # iterations, nor the values presented in `arg`.
+ shift # remove old arg
+ set -- "$@" "$arg" # push replacement arg
+ done
+fi
+
+
+# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
+
+# Collect all arguments for the java command:
+# * DEFAULT_JVM_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments,
+# and any embedded shellness will be escaped.
+# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be
+# treated as '${Hostname}' itself on the command line.
+
+set -- \
+ "-Dorg.gradle.appname=$APP_BASE_NAME" \
+ -jar "$APP_HOME/gradle/wrapper/gradle-wrapper.jar" \
+ "$@"
+
+# Stop when "xargs" is not available.
+if ! command -v xargs >/dev/null 2>&1
+then
+ die "xargs is not available"
+fi
+
+# Use "xargs" to parse quoted args.
+#
+# With -n1 it outputs one arg per line, with the quotes and backslashes removed.
+#
+# In Bash we could simply go:
+#
+# readarray ARGS < <( xargs -n1 <<<"$var" ) &&
+# set -- "${ARGS[@]}" "$@"
+#
+# but POSIX shell has neither arrays nor command substitution, so instead we
+# post-process each arg (as a line of input to sed) to backslash-escape any
+# character that might be a shell metacharacter, then use eval to reverse
+# that process (while maintaining the separation between arguments), and wrap
+# the whole thing up as a single "set" statement.
+#
+# This will of course break if any of these variables contains a newline or
+# an unmatched quote.
+#
+
+eval "set -- $(
+ printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" |
+ xargs -n1 |
+ sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' |
+ tr '\n' ' '
+ )" '"$@"'
+
+exec "$JAVACMD" "$@"
diff --git a/appJava/gradlew.bat b/appJava/gradlew.bat
new file mode 100644
index 0000000000..c4bdd3ab8e
--- /dev/null
+++ b/appJava/gradlew.bat
@@ -0,0 +1,93 @@
+@rem
+@rem Copyright 2015 the original author or authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem https://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+@rem SPDX-License-Identifier: Apache-2.0
+@rem
+
+@if "%DEBUG%"=="" @echo off
+@rem ##########################################################################
+@rem
+@rem Gradle startup script for Windows
+@rem
+@rem ##########################################################################
+
+@rem Set local scope for the variables with windows NT shell
+if "%OS%"=="Windows_NT" setlocal
+
+set DIRNAME=%~dp0
+if "%DIRNAME%"=="" set DIRNAME=.
+@rem This is normally unused
+set APP_BASE_NAME=%~n0
+set APP_HOME=%DIRNAME%
+
+@rem Resolve any "." and ".." in APP_HOME to make it shorter.
+for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
+
+@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
+
+@rem Find java.exe
+if defined JAVA_HOME goto findJavaFromJavaHome
+
+set JAVA_EXE=java.exe
+%JAVA_EXE% -version >NUL 2>&1
+if %ERRORLEVEL% equ 0 goto execute
+
+echo. 1>&2
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2
+echo. 1>&2
+echo Please set the JAVA_HOME variable in your environment to match the 1>&2
+echo location of your Java installation. 1>&2
+
+goto fail
+
+:findJavaFromJavaHome
+set JAVA_HOME=%JAVA_HOME:"=%
+set JAVA_EXE=%JAVA_HOME%/bin/java.exe
+
+if exist "%JAVA_EXE%" goto execute
+
+echo. 1>&2
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2
+echo. 1>&2
+echo Please set the JAVA_HOME variable in your environment to match the 1>&2
+echo location of your Java installation. 1>&2
+
+goto fail
+
+:execute
+@rem Setup the command line
+
+
+
+@rem Execute Gradle
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -jar "%APP_HOME%\gradle\wrapper\gradle-wrapper.jar" %*
+
+:end
+@rem End local scope for the variables with windows NT shell
+if %ERRORLEVEL% equ 0 goto mainEnd
+
+:fail
+rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
+rem the _cmd.exe /c_ return code!
+set EXIT_CODE=%ERRORLEVEL%
+if %EXIT_CODE% equ 0 set EXIT_CODE=1
+if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE%
+exit /b %EXIT_CODE%
+
+:mainEnd
+if "%OS%"=="Windows_NT" endlocal
+
+:omega
diff --git a/appJava/screenshots/01-compilation-execution.png b/appJava/screenshots/01-compilation-execution.png
new file mode 100644
index 0000000000..f60217f7bf
Binary files /dev/null and b/appJava/screenshots/01-compilation-execution.png differ
diff --git a/appJava/screenshots/02-health-check.png b/appJava/screenshots/02-health-check.png
new file mode 100644
index 0000000000..1be19a8135
Binary files /dev/null and b/appJava/screenshots/02-health-check.png differ
diff --git a/appJava/settings.gradle b/appJava/settings.gradle
new file mode 100644
index 0000000000..2ea4870508
--- /dev/null
+++ b/appJava/settings.gradle
@@ -0,0 +1 @@
+rootProject.name = 'appJava'
diff --git a/appJava/src/main/java/luminais/tech/appjava/AppJavaApplication.java b/appJava/src/main/java/luminais/tech/appjava/AppJavaApplication.java
new file mode 100644
index 0000000000..44c2db8771
--- /dev/null
+++ b/appJava/src/main/java/luminais/tech/appjava/AppJavaApplication.java
@@ -0,0 +1,22 @@
+package luminais.tech.appjava;
+
+import org.springframework.boot.SpringApplication;
+import org.springframework.boot.autoconfigure.SpringBootApplication;
+import org.springframework.boot.context.properties.EnableConfigurationProperties;
+
+import luminais.tech.appjava.model.DevopsProperties;
+
+@SpringBootApplication
+@EnableConfigurationProperties(DevopsProperties.class)
+public class AppJavaApplication {
+
+ public static void main(String[] args) {
+ String debugEnv = System.getenv("DEBUG");
+ if (debugEnv != null && debugEnv.equalsIgnoreCase("true")) {
+ System.setProperty("logging.level.root", "DEBUG");
+ }
+
+ SpringApplication.run(AppJavaApplication.class, args);
+ }
+
+}
diff --git a/appJava/src/main/java/luminais/tech/appjava/model/DevopsProperties.java b/appJava/src/main/java/luminais/tech/appjava/model/DevopsProperties.java
new file mode 100644
index 0000000000..391c146b04
--- /dev/null
+++ b/appJava/src/main/java/luminais/tech/appjava/model/DevopsProperties.java
@@ -0,0 +1,30 @@
+package luminais.tech.appjava.model;
+
+import java.util.List;
+
+import org.springframework.boot.context.properties.ConfigurationProperties;
+
+/**
+ * Configuration properties for the DevOps info service.
+ *
+ * Values are loaded from the {@code devops.*} section in application.yml.
+ */
+@ConfigurationProperties(prefix = "devops")
+public record DevopsProperties(
+ Service service,
+ List endpoints
+) {
+
+ public record Service(
+ String name,
+ String version,
+ String description,
+ String framework
+ ) {}
+
+ public record Endpoint(
+ String path,
+ String method,
+ String description
+ ) {}
+}
diff --git a/appJava/src/main/java/luminais/tech/appjava/operation/DevopsSystemInfoApiDelegateImpl.java b/appJava/src/main/java/luminais/tech/appjava/operation/DevopsSystemInfoApiDelegateImpl.java
new file mode 100644
index 0000000000..7f6947d489
--- /dev/null
+++ b/appJava/src/main/java/luminais/tech/appjava/operation/DevopsSystemInfoApiDelegateImpl.java
@@ -0,0 +1,44 @@
+package luminais.tech.appjava.operation;
+
+import jakarta.servlet.http.HttpServletRequest;
+
+import org.springframework.http.ResponseEntity;
+import org.springframework.stereotype.Service;
+import org.springframework.web.context.request.RequestContextHolder;
+import org.springframework.web.context.request.ServletRequestAttributes;
+
+import luminais.tech.appjava.api.DevopsSystemInfoApiDelegate;
+import luminais.tech.appjava.model.HealthResponse;
+import luminais.tech.appjava.model.RootResponse;
+import luminais.tech.appjava.service.DevopsInfoService;
+
+/**
+ * Operation layer: OpenAPI delegate that forwards to the service.
+ */
+@Service
+public class DevopsSystemInfoApiDelegateImpl implements DevopsSystemInfoApiDelegate {
+
+ private final DevopsInfoService devopsInfoService;
+
+ public DevopsSystemInfoApiDelegateImpl(DevopsInfoService devopsInfoService) {
+ this.devopsInfoService = devopsInfoService;
+ }
+
+ @Override
+ public ResponseEntity getInfo() {
+ HttpServletRequest request = currentRequest();
+ RootResponse body = devopsInfoService.buildRootResponse(request);
+ return ResponseEntity.ok(body);
+ }
+
+ @Override
+ public ResponseEntity getHealth() {
+ HealthResponse body = devopsInfoService.buildHealthResponse();
+ return ResponseEntity.ok(body);
+ }
+
+ private HttpServletRequest currentRequest() {
+ ServletRequestAttributes attrs = (ServletRequestAttributes) RequestContextHolder.getRequestAttributes();
+ return attrs != null ? attrs.getRequest() : null;
+ }
+}
diff --git a/appJava/src/main/java/luminais/tech/appjava/service/DevopsInfoService.java b/appJava/src/main/java/luminais/tech/appjava/service/DevopsInfoService.java
new file mode 100644
index 0000000000..356c656c50
--- /dev/null
+++ b/appJava/src/main/java/luminais/tech/appjava/service/DevopsInfoService.java
@@ -0,0 +1,135 @@
+package luminais.tech.appjava.service;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.time.Duration;
+import java.time.Instant;
+import java.time.OffsetDateTime;
+import java.time.ZoneOffset;
+import java.util.ArrayList;
+import java.util.List;
+
+import jakarta.servlet.http.HttpServletRequest;
+
+import org.springframework.stereotype.Service;
+
+import luminais.tech.appjava.model.DevopsProperties;
+import luminais.tech.appjava.model.EndpointInfo;
+import luminais.tech.appjava.model.HealthResponse;
+import luminais.tech.appjava.model.RequestInfo;
+import luminais.tech.appjava.model.RootResponse;
+import luminais.tech.appjava.model.RuntimeInfo;
+import luminais.tech.appjava.model.ServiceInfo;
+import luminais.tech.appjava.model.SystemInfo;
+
+/**
+ * Service layer for building DevOps info responses.
+ */
+@Service
+public class DevopsInfoService {
+
+ private static final Instant START_TIME = Instant.now();
+
+ private final DevopsProperties properties;
+
+ public DevopsInfoService(DevopsProperties properties) {
+ this.properties = properties;
+ }
+
+ public RootResponse buildRootResponse(HttpServletRequest request) {
+ return new RootResponse(
+ buildServiceInfo(),
+ buildSystemInfo(),
+ buildRuntimeInfo(),
+ buildRequestInfo(request),
+ buildEndpoints()
+ );
+ }
+
+ public HealthResponse buildHealthResponse() {
+ long uptimeSeconds = getUptimeSeconds();
+
+ return new HealthResponse(
+ "healthy",
+ OffsetDateTime.now(ZoneOffset.UTC),
+ uptimeSeconds
+ );
+ }
+
+ private ServiceInfo buildServiceInfo() {
+ DevopsProperties.Service cfg = properties.service();
+ return new ServiceInfo(
+ cfg.name(),
+ cfg.version(),
+ cfg.description(),
+ cfg.framework()
+ );
+ }
+
+ private SystemInfo buildSystemInfo() {
+ String hostname = "unknown";
+ try {
+ hostname = InetAddress.getLocalHost().getHostName();
+ } catch (UnknownHostException ignored) {
+ // use default hostname
+ }
+
+ return new SystemInfo(
+ hostname,
+ System.getProperty("os.name", "unknown"),
+ System.getProperty("os.version", "unknown"),
+ System.getProperty("os.arch", "unknown"),
+ Runtime.getRuntime().availableProcessors(),
+ System.getProperty("java.version", "unknown")
+ );
+ }
+
+ private RuntimeInfo buildRuntimeInfo() {
+ long uptimeSeconds = getUptimeSeconds();
+ long hours = uptimeSeconds / 3600;
+ long minutes = (uptimeSeconds % 3600) / 60;
+
+ return new RuntimeInfo(
+ uptimeSeconds,
+ hours + " hours, " + minutes + " minutes",
+ OffsetDateTime.now(ZoneOffset.UTC),
+ "UTC"
+ );
+ }
+
+ private RequestInfo buildRequestInfo(HttpServletRequest request) {
+ if (request == null) {
+ return new RequestInfo(
+ "unknown",
+ "unknown",
+ "UNKNOWN",
+ "/"
+ );
+ }
+
+ String userAgent = request.getHeader("User-Agent");
+
+ return new RequestInfo(
+ request.getRemoteAddr(),
+ userAgent != null ? userAgent : "unknown",
+ request.getMethod(),
+ request.getRequestURI()
+ );
+ }
+
+ private List buildEndpoints() {
+ List endpoints = new ArrayList<>();
+ for (DevopsProperties.Endpoint cfg : properties.endpoints()) {
+ endpoints.add(new EndpointInfo(
+ cfg.path(),
+ cfg.method(),
+ cfg.description()
+ ));
+ }
+ return endpoints;
+ }
+
+ private long getUptimeSeconds() {
+ return Duration.between(START_TIME, Instant.now()).getSeconds();
+ }
+}
diff --git a/appJava/src/main/resources/application.yml b/appJava/src/main/resources/application.yml
new file mode 100644
index 0000000000..5908fc5daa
--- /dev/null
+++ b/appJava/src/main/resources/application.yml
@@ -0,0 +1,21 @@
+spring:
+ application:
+ name: devops-info-service-java
+
+server:
+ port: ${PORT:8081}
+ address: ${HOST:0.0.0.0}
+
+devops:
+ service:
+ name: devops-info-service
+ version: 1.0.0
+ description: DevOps course info service (Java implementation)
+ framework: Spring Boot
+ endpoints:
+ - path: "/"
+ method: GET
+ description: "Service information"
+ - path: "/health"
+ method: GET
+ description: "Health check"
diff --git a/appJava/src/main/resources/openapi.yaml b/appJava/src/main/resources/openapi.yaml
new file mode 100644
index 0000000000..167e1e7cf5
--- /dev/null
+++ b/appJava/src/main/resources/openapi.yaml
@@ -0,0 +1,159 @@
+openapi: 3.0.3
+info:
+ title: DevOps Info Service (Java)
+ version: 1.0.0
+ description: DevOps course info service implemented in Java/Spring Boot.
+tags:
+ - name: DevopsSystemInfo
+ description: System and app info
+paths:
+ /:
+ get:
+ operationId: getInfo
+ summary: Service and system information
+ tags:
+ - DevopsSystemInfo
+ responses:
+ "200":
+ description: Successful response with service info.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/RootResponse"
+ /health:
+ get:
+ operationId: getHealth
+ summary: Health check endpoint
+ tags:
+ - DevopsSystemInfo
+ responses:
+ "200":
+ description: Health status with uptime.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/HealthResponse"
+components:
+ schemas:
+ ServiceInfo:
+ type: object
+ properties:
+ name:
+ type: string
+ version:
+ type: string
+ description:
+ type: string
+ framework:
+ type: string
+ required:
+ - name
+ - version
+ - description
+ - framework
+ SystemInfo:
+ type: object
+ properties:
+ hostname:
+ type: string
+ platform:
+ type: string
+ platform_version:
+ type: string
+ architecture:
+ type: string
+ cpu_count:
+ type: integer
+ format: int32
+ python_version:
+ type: string
+ required:
+ - hostname
+ - platform
+ - platform_version
+ - architecture
+ - cpu_count
+ - python_version
+ RuntimeInfo:
+ type: object
+ properties:
+ uptime_seconds:
+ type: integer
+ format: int64
+ uptime_human:
+ type: string
+ current_time:
+ type: string
+ format: date-time
+ timezone:
+ type: string
+ required:
+ - uptime_seconds
+ - uptime_human
+ - current_time
+ - timezone
+ RequestInfo:
+ type: object
+ properties:
+ client_ip:
+ type: string
+ user_agent:
+ type: string
+ method:
+ type: string
+ path:
+ type: string
+ required:
+ - client_ip
+ - user_agent
+ - method
+ - path
+ EndpointInfo:
+ type: object
+ properties:
+ path:
+ type: string
+ method:
+ type: string
+ description:
+ type: string
+ required:
+ - path
+ - method
+ - description
+ RootResponse:
+ type: object
+ properties:
+ service:
+ $ref: "#/components/schemas/ServiceInfo"
+ system:
+ $ref: "#/components/schemas/SystemInfo"
+ runtime:
+ $ref: "#/components/schemas/RuntimeInfo"
+ request:
+ $ref: "#/components/schemas/RequestInfo"
+ endpoints:
+ type: array
+ items:
+ $ref: "#/components/schemas/EndpointInfo"
+ required:
+ - service
+ - system
+ - runtime
+ - request
+ - endpoints
+ HealthResponse:
+ type: object
+ properties:
+ status:
+ type: string
+ timestamp:
+ type: string
+ format: date-time
+ uptime_seconds:
+ type: integer
+ format: int64
+ required:
+ - status
+ - timestamp
+ - uptime_seconds
diff --git a/appJava/src/test/java/luminais/tech/appjava/AppJavaApplicationTests.java b/appJava/src/test/java/luminais/tech/appjava/AppJavaApplicationTests.java
new file mode 100644
index 0000000000..db9521944c
--- /dev/null
+++ b/appJava/src/test/java/luminais/tech/appjava/AppJavaApplicationTests.java
@@ -0,0 +1,13 @@
+package luminais.tech.appjava;
+
+import org.junit.jupiter.api.Test;
+import org.springframework.boot.test.context.SpringBootTest;
+
+@SpringBootTest
+class AppJavaApplicationTests {
+
+ @Test
+ void contextLoads() {
+ }
+
+}
diff --git a/appJava/src/test/java/luminais/tech/appjava/DevopsEndpointsTest.java b/appJava/src/test/java/luminais/tech/appjava/DevopsEndpointsTest.java
new file mode 100644
index 0000000000..663cb6ccc1
--- /dev/null
+++ b/appJava/src/test/java/luminais/tech/appjava/DevopsEndpointsTest.java
@@ -0,0 +1,56 @@
+package luminais.tech.appjava;
+
+import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
+import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath;
+import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
+
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.test.context.SpringBootTest;
+import org.springframework.test.web.servlet.MockMvc;
+import org.springframework.test.web.servlet.setup.MockMvcBuilders;
+import org.springframework.web.context.WebApplicationContext;
+
+@SpringBootTest
+class DevopsEndpointsTest {
+
+ @Autowired
+ private WebApplicationContext context;
+
+ private MockMvc mockMvc;
+
+ @BeforeEach
+ void setUp() {
+ mockMvc = MockMvcBuilders.webAppContextSetup(context).build();
+ }
+
+ @Test
+ void healthReturnsExpectedFields() throws Exception {
+ mockMvc.perform(get("/health"))
+ .andExpect(status().isOk())
+ .andExpect(jsonPath("$.status").value("healthy"))
+ .andExpect(jsonPath("$.timestamp").isString())
+ .andExpect(jsonPath("$.uptime_seconds").isNumber());
+ }
+
+ @Test
+ void rootReturnsExpectedStructure() throws Exception {
+ mockMvc.perform(get("/"))
+ .andExpect(status().isOk())
+ .andExpect(jsonPath("$.service.name").value("devops-info-service"))
+ .andExpect(jsonPath("$.service.version").isString())
+ .andExpect(jsonPath("$.service.framework").value("Spring Boot"))
+ .andExpect(jsonPath("$.system.hostname").isString())
+ .andExpect(jsonPath("$.runtime.uptime_seconds").isNumber())
+ .andExpect(jsonPath("$.request.method").value("GET"))
+ .andExpect(jsonPath("$.request.path").value("/"))
+ .andExpect(jsonPath("$.endpoints").isArray());
+ }
+
+ @Test
+ void unknownPathReturns404() throws Exception {
+ mockMvc.perform(get("/does-not-exist"))
+ .andExpect(status().isNotFound());
+ }
+}
diff --git a/app_python/.dockerignore b/app_python/.dockerignore
new file mode 100644
index 0000000000..6bceca3085
--- /dev/null
+++ b/app_python/.dockerignore
@@ -0,0 +1,19 @@
+__pycache__/
+*.py[cod]
+*.pyo
+
+venv/
+.venv/
+.env
+.env/
+
+.git/
+.gitignore
+
+.vscode/
+.idea/
+.DS_Store
+
+docs/
+tests/
+README.md
diff --git a/app_python/.gitignore b/app_python/.gitignore
new file mode 100644
index 0000000000..e5e8e9eda9
--- /dev/null
+++ b/app_python/.gitignore
@@ -0,0 +1,27 @@
+# Python bytecode and cache
+__pycache__/
+*.py[cod]
+*.pyo
+
+# Virtual environments
+venv/
+.venv/
+.venv-ci/
+.env/
+.env
+
+# Logs
+*.log
+
+# Test / coverage artifacts
+.pytest_cache/
+.ruff_cache/
+.coverage
+coverage.xml
+
+# IDE configuration
+.vscode/
+.idea/
+
+# macOS
+.DS_Store
diff --git a/app_python/Dockerfile b/app_python/Dockerfile
new file mode 100644
index 0000000000..5be23710a6
--- /dev/null
+++ b/app_python/Dockerfile
@@ -0,0 +1,20 @@
+FROM python:3.13-slim
+
+ENV PYTHONDONTWRITEBYTECODE=1 \
+ PYTHONUNBUFFERED=1
+
+WORKDIR /app
+
+RUN addgroup --system app && adduser --system --ingroup app app
+
+COPY requirements.txt .
+RUN pip install --no-cache-dir -r requirements.txt
+
+COPY app.py .
+RUN chown -R app:app /app
+
+USER app
+
+EXPOSE 8080
+
+CMD ["python", "app.py"]
diff --git a/app_python/README.md b/app_python/README.md
new file mode 100644
index 0000000000..2138f620e8
--- /dev/null
+++ b/app_python/README.md
@@ -0,0 +1,128 @@
+# DevOps Info Service (Python / FastAPI)
+
+[](https://github.com/LuminiteTime/DevOps-Core-Course/actions/workflows/python-ci.yml)
+[](https://github.com/LuminiteTime/DevOps-Core-Course/actions/workflows/java-ci.yml)
+[](https://codecov.io/gh/LuminiteTime/DevOps-Core-Course)
+
+## Overview
+
+This service exposes a small HTTP API that reports information about the running host, the Python runtime, and the incoming request. It is used as a base for later DevOps labs.
+
+The API provides:
+- `GET /` – service, system, runtime, and request metadata
+- `GET /health` – simple health check with uptime info
+
+## Prerequisites
+
+- Python 3.11+
+
+Python dependencies are listed and pinned with versions in [app_python/requirements.txt](app_python/requirements.txt).
+
+## Installation
+
+```bash
+cd app_python
+python -m venv venv
+source venv/bin/activate
+pip install -r requirements.txt
+```
+
+## Running the Application
+
+Default configuration (HOST=0.0.0.0, PORT=8080, DEBUG=false):
+
+```bash
+cd app_python
+source venv/bin/activate
+python app.py
+```
+
+Custom configuration with environment variables:
+
+```bash
+# Run on localhost:8080
+cd app_python
+source venv/bin/activate
+PORT=8080 python app.py
+
+# Run on 127.0.0.1:3000 with debug reload
+HOST=127.0.0.1 PORT=3000 DEBUG=true python app.py
+```
+
+After start, you can test the endpoints with curl (default app config used in commands):
+
+```bash
+curl http://localhost:8080/
+curl http://localhost:8080/health
+```
+
+## Docker
+
+### Build (local)
+
+```bash
+cd app_python
+docker build -t .
+```
+
+### Run
+
+```bash
+docker run --rm --name -p :8080
+```
+
+Test endpoints:
+
+```bash
+curl http://localhost:/
+curl http://localhost:/health
+```
+
+### Pull from Docker Hub
+
+```bash
+docker pull /:
+docker run --rm -p :8080 /:
+```
+
+## Testing
+
+Install dev dependencies and run tests:
+
+```bash
+cd app_python
+python -m venv venv
+source venv/bin/activate
+pip install -r requirements.txt -r requirements-dev.txt
+
+ruff check .
+pytest
+```
+
+## API Endpoints
+
+- `GET /`
+ - Returns JSON with the following top-level sections:
+ - `service` – name, version, description, framework
+ - `system` – hostname, platform, platform_version, architecture, cpu_count, python_version
+ - `runtime` – uptime_seconds, uptime_human, current_time, timezone
+ - `request` – client_ip, user_agent, method, path
+ - `endpoints` – list of available paths and their purpose
+
+- `GET /health`
+ - Returns a compact health document:
+ - `status` – string status ("healthy")
+ - `timestamp` – current UTC timestamp in ISO 8601 format
+ - `uptime_seconds` – number of seconds the process has been running
+
+## Configuration
+
+The application is configured via environment variables read in [app_python/app.py](app_python/app.py):
+
+| Variable | Default | Description |
+|----------|------------|---------------------------------------|
+| `HOST` | `0.0.0.0` | Address app binds to |
+| `PORT` | `8080` | TCP port app listens on |
+| `DEBUG` | `False` | When `true`, enables app reload |
+
+All variables are optional. If they are not set, the defaults above are used.
diff --git a/app_python/__init__.py b/app_python/__init__.py
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/app_python/__init__.py
@@ -0,0 +1 @@
+
diff --git a/app_python/app.py b/app_python/app.py
new file mode 100644
index 0000000000..da7910b03f
--- /dev/null
+++ b/app_python/app.py
@@ -0,0 +1,225 @@
+"""DevOps Info Service application.
+
+Main application module.
+For now it is all contained in a single file for simplicity, but is planned to be modularized later.
+"""
+
+from __future__ import annotations
+
+import logging
+import os
+import platform
+import socket
+from datetime import datetime, timezone
+from typing import Any, Dict, List
+
+from fastapi import FastAPI, Request
+from fastapi.exceptions import RequestValidationError
+from fastapi.responses import JSONResponse
+from starlette.exceptions import HTTPException as StarletteHTTPException
+import uvicorn
+
+# Configuration
+HOST: str = os.getenv("HOST", "0.0.0.0")
+PORT: int = int(os.getenv("PORT", "8080"))
+DEBUG: bool = os.getenv("DEBUG", "False").lower() == "true"
+
+APP_VERSION: str = "1.0.0"
+APP_NAME: str = "devops-info-service"
+APP_DESCRIPTION: str = "DevOps course info service"
+APP_FRAMEWORK: str = "FastAPI"
+
+logging.basicConfig(
+ level=logging.DEBUG if DEBUG else logging.INFO,
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
+)
+logger = logging.getLogger(APP_NAME)
+
+# Application start time
+START_TIME: datetime = datetime.now(timezone.utc)
+
+
+def get_system_info() -> Dict[str, Any]:
+ """Return information about the current host and Python runtime."""
+
+ return {
+ "hostname": socket.gethostname(),
+ "platform": platform.system(),
+ "platform_version": platform.version(),
+ "architecture": platform.machine(),
+ "cpu_count": os.cpu_count() or 1,
+ "python_version": platform.python_version(),
+ }
+
+
+def get_uptime() -> Dict[str, Any]:
+ """Calculate uptime in seconds and a human readable form."""
+
+ delta = datetime.now(timezone.utc) - START_TIME
+ seconds = int(delta.total_seconds())
+ hours = seconds // 3600
+ minutes = (seconds % 3600) // 60
+ return {
+ "uptime_seconds": seconds,
+ "uptime_human": f"{hours} hours, {minutes} minutes",
+ }
+
+
+def get_runtime_info() -> Dict[str, Any]:
+ """Build runtime information including uptime and current UTC time."""
+
+ uptime = get_uptime()
+ return {
+ "uptime_seconds": uptime["uptime_seconds"],
+ "uptime_human": uptime["uptime_human"],
+ "current_time": datetime.now(timezone.utc).isoformat(),
+ "timezone": "UTC",
+ }
+
+
+def get_request_info(request: Request) -> Dict[str, Any]:
+ """Extract client and request metadata from the incoming HTTP request."""
+
+ client_ip = request.client.host if request.client else "unknown"
+ user_agent = request.headers.get("user-agent", "unknown")
+ return {
+ "client_ip": client_ip,
+ "user_agent": user_agent,
+ "method": request.method,
+ "path": request.url.path,
+ }
+
+
+def get_endpoints() -> List[Dict[str, str]]:
+ """Describe public endpoints exposed by this service."""
+
+ return [
+ {
+ "path": "/",
+ "method": "GET",
+ "description": "Service information",
+ },
+ {
+ "path": "/health",
+ "method": "GET",
+ "description": "Health check",
+ },
+ ]
+
+
+app: FastAPI = FastAPI(
+ title=APP_NAME,
+ version=APP_VERSION,
+ description=APP_DESCRIPTION
+)
+
+
+@app.exception_handler(StarletteHTTPException)
+async def http_exception_handler(
+ request: Request, exc: StarletteHTTPException
+) -> JSONResponse:
+ """Return structured JSON for HTTP errors such as 404."""
+
+ logger.warning("HTTP error %s on %s", exc.status_code, request.url.path)
+ if exc.status_code == 404:
+ return JSONResponse(
+ status_code=404,
+ content={
+ "error": "Not Found",
+ "message": "Endpoint does not exist",
+ },
+ )
+ return JSONResponse(
+ status_code=exc.status_code,
+ content={
+ "error": "HTTP Error",
+ "message": exc.detail,
+ },
+ )
+
+
+@app.exception_handler(RequestValidationError)
+async def validation_exception_handler(
+ request: Request, exc: RequestValidationError
+) -> JSONResponse:
+ """Handle FastAPI request validation errors with JSON details."""
+
+ logger.warning("Validation error on %s: %s",
+ request.url.path, exc.errors())
+ return JSONResponse(
+ status_code=422,
+ content={
+ "error": "Validation Error",
+ "message": "Request parameters failed validation",
+ "details": exc.errors(),
+ },
+ )
+
+
+@app.exception_handler(Exception)
+async def unhandled_exception_handler(
+ request: Request, exc: Exception
+) -> JSONResponse:
+ """Catch and log unexpected exceptions as HTTP 500 responses."""
+
+ logger.exception("Unhandled error on %s: %s", request.url.path, exc)
+ return JSONResponse(
+ status_code=500,
+ content={
+ "error": "Internal Server Error",
+ "message": "An unexpected error occurred",
+ },
+ )
+
+
+@app.get("/")
+async def index(request: Request) -> Dict[str, Any]:
+ """Main endpoint returning service information."""
+
+ logger.info("Handling request on %s %s from %s",
+ request.method,
+ request.url.path,
+ request.client.host if request.client else "unknown"
+ )
+
+ response: Dict[str, Any] = {
+ "service": {
+ "name": APP_NAME,
+ "version": APP_VERSION,
+ "description": APP_DESCRIPTION,
+ "framework": APP_FRAMEWORK,
+ },
+ "system": get_system_info(),
+ "runtime": get_runtime_info(),
+ "request": get_request_info(request),
+ "endpoints": get_endpoints(),
+ }
+ return response
+
+
+@app.get("/health")
+async def health() -> Dict[str, Any]:
+ """Health check endpoint returning service status and uptime seconds."""
+
+ uptime = get_uptime()
+ return {
+ "status": "healthy",
+ "timestamp": datetime.now(timezone.utc).isoformat(),
+ "uptime_seconds": uptime["uptime_seconds"],
+ }
+
+
+def main() -> None:
+ """Run the FastAPI application using uvicorn with configured settings."""
+
+ logger.info(
+ "Starting DevOps Info Service on %s:%s (debug=%s)",
+ HOST,
+ PORT,
+ DEBUG,
+ )
+ uvicorn.run("app:app", host=HOST, port=PORT, reload=DEBUG)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/app_python/docs/LAB01.md b/app_python/docs/LAB01.md
new file mode 100644
index 0000000000..fc4f3377e4
--- /dev/null
+++ b/app_python/docs/LAB01.md
@@ -0,0 +1,205 @@
+# LAB01 – DevOps Info Service (Python / FastAPI)
+
+## 1. Framework Selection
+
+I chose **FastAPI** because it is async, type‑hint friendly, and gives automatic request validation and OpenAPI docs while staying small and easy to start.
+
+| Framework | Why use it | Why I did not pick it |
+|-----------|------------------------------------------|--------------------------------------|
+| FastAPI | Async, type hints, auto docs, small core | - |
+| Flask | Very simple, huge ecosystem | No built‑in typing/validation layer |
+| Django | Full stack (ORM, admin, templates) | Too heavy for only `/` and `/health` |
+
+## 2. Best Practices Applied
+
+- **Clean code organization** – configuration, helpers, and endpoints are separate:
+
+ ```python
+ def get_system_info() -> dict:
+ return {"hostname": socket.gethostname(), "platform": platform.system()}
+
+ @app.get("/")
+ async def index(request: Request) -> dict:
+ return {"system": get_system_info(), "request": get_request_info(request)}
+ ```
+
+ This keeps each function small and focused, which makes later changes (for tests or metrics) safer.
+
+- **PEP 8 and docstrings** – descriptive names and module/function docstrings instead of inline comments:
+
+ ```python
+ START_TIME: datetime = datetime.now(timezone.utc)
+
+ def get_uptime() -> dict:
+ """Calculate uptime in seconds and human readable form."""
+ ```
+
+ Consistent style and docstrings make the code easier to read and to auto‑document.
+
+- **Error handling** – custom handlers return JSON instead of HTML error pages:
+
+ ```python
+ @app.exception_handler(StarletteHTTPException)
+ async def http_exception_handler(request: Request, exc: StarletteHTTPException):
+ return JSONResponse({"error": "HTTP Error", "message": exc.detail}, exc.status_code)
+ ```
+
+ This gives clients predictable and readable error structures.
+
+- **Logging** – one logging configuration used across the app:
+
+ ```python
+ logging.basicConfig(level=logging.INFO)
+ logger = logging.getLogger(APP_NAME)
+ logger.info("Starting DevOps Info Service on %s:%s", HOST, PORT)
+ ```
+
+ Logs make it clear when the app starts, what requests hit it, helps to debug failures.
+
+- **Pinned dependencies and .gitignore** – reproducible installs and no noisy files in git:
+
+ ```text
+ # requirements.txt
+ fastapi==0.115.0
+ uvicorn[standard]==0.32.0
+ ```
+
+ ```gitignore
+ __pycache__/
+ venv/
+ *.log
+ .DS_Store
+ ```
+
+ This keeps environments consistent between machines and avoids committing local or OS‑specific files.
+
+## 3. API Documentation
+
+`jq` is used in the examples below to pretty‑print JSON responses, it can be installed by following the official [documentation](https://jqlang.github.io/jq).
+
+### 3.1 `GET /`
+
+- **Description:** Returns service, system, runtime, request, and endpoint metadata.
+- **Example request:**
+
+```bash
+curl http://localhost:5000/ | jq
+```
+
+- **Example response:**
+
+```json
+{
+ "service": {
+ "name": "devops-info-service",
+ "version": "1.0.0",
+ "description": "DevOps course info service",
+ "framework": "FastAPI"
+ },
+ "system": {
+ "hostname": "Mikhails-MacBook-Pro.local",
+ "platform": "Darwin",
+ "platform_version": "Darwin Kernel Version 24.6.0: Wed Nov 5 21:30:44 PST 2025; root:xnu-11417.140.69.705.2~1/RELEASE_ARM64_T6041",
+ "architecture": "arm64",
+ "cpu_count": 12,
+ "python_version": "3.11.14"
+ },
+ "runtime": {
+ "uptime_seconds": 1009,
+ "uptime_human": "0 hours, 16 minutes",
+ "current_time": "2026-01-24T11:11:49.388345+00:00",
+ "timezone": "UTC"
+ },
+ "request": {
+ "client_ip": "127.0.0.1",
+ "user_agent": "curl/8.7.1",
+ "method": "GET",
+ "path": "/"
+ },
+ "endpoints": [
+ {
+ "path": "/",
+ "method": "GET",
+ "description": "Service information"
+ },
+ {
+ "path": "/health",
+ "method": "GET",
+ "description": "Health check"
+ }
+ ]
+}
+```
+
+### 3.2 `GET /health`
+
+- **Description:** Lightweight health check used later for probes.
+- **Example request:**
+
+```bash
+curl http://localhost:5000/health | jq
+```
+
+- **Example response:**
+
+```json
+{
+ "status": "healthy",
+ "timestamp": "2026-01-24T11:12:15.235487+00:00",
+ "uptime_seconds": 1034
+}
+```
+
+### 3.3 Testing commands
+
+Commands used to test both endpoints locally (default port 8080):
+
+```bash
+# Start the app
+cd app_python
+source venv/bin/activate
+python app.py
+
+# Test main endpoint
+curl http://localhost:8080/
+curl http://localhost:8080/ | jq
+
+# Test health endpoint
+curl http://localhost:8080/health
+curl http://localhost:8080/health | jq
+```
+
+## 4. Testing Evidence
+
+Main endpoint JSON response:
+
+
+Healthcheck endpoint JSON response:
+
+
+Terminal output from this app run :
+
+```text
+(venv) ➜ app_python git:(lab1) ✗ python app.py
+2026-01-24 14:24:21,364 - devops-info-service - INFO - Starting DevOps Info Service on 0.0.0.0:8080 (debug=False)
+INFO: Started server process [67087]
+INFO: Waiting for application startup.
+INFO: Application startup complete.
+INFO: Uvicorn running on http://0.0.0.0:8080 (Press CTRL+C to quit)
+2026-01-24 14:24:31,229 - devops-info-service - INFO - Handling request on GET / from 127.0.0.1
+INFO: 127.0.0.1:65190 - "GET / HTTP/1.1" 200 OK
+INFO: 127.0.0.1:65226 - "GET /health HTTP/1.1" 200 OK
+```
+
+## 5. Challenges & Solutions
+
+- **Challenge:** Getting accurate client IP when running locally through FastAPI.
+ - **Solution:** Used `request.client.host` when available and fell back to `"unknown"` to avoid crashes when the client object is missing.
+- **Challenge:** Keeping uptime logic reusable across endpoints.
+ - **Solution:** Implemented a single `get_uptime` helper used by both `/` and `/health`.
+- **Challenge:** Returning consistent JSON errors instead of the default HTML pages.
+ - **Solution:** Added exception handlers for `StarletteHTTPException`, `RequestValidationError`, and generic `Exception` to log the error and send structured JSON with `error` and `message` fields.
+
+## 6. GitHub Community
+
+Starring repositories helps expose useful tools to the community and signals to maintainers that their work is valuable. Following professors, TAs, and classmates makes it easier to discover new projects, track team activity, and grow a professional network around real coursework.
diff --git a/app_python/docs/LAB02.md b/app_python/docs/LAB02.md
new file mode 100644
index 0000000000..d07b499c66
--- /dev/null
+++ b/app_python/docs/LAB02.md
@@ -0,0 +1,315 @@
+# LAB02 — Docker Containerization (Python / FastAPI)
+
+## 1. Docker Best Practices Applied
+
+- Non-root user
+ - Why it matters: limits damage if the process is compromised; avoids privileged filesystem access inside the container.
+- Specific base image version
+ - Why it matters: reproducible builds; reduces surprise changes from upstream.
+- Layer caching (dependencies before code)
+ - Why it matters: rebuilding after a code change does not reinstall dependencies.
+- Only copy runtime files
+ - Why it matters: smaller image and less accidental leakage of dev artifacts.
+- `.dockerignore`
+ - Why it matters: smaller build context, faster builds, fewer irrelevant files included in layers.
+
+Relevant Dockerfile excerpt:
+
+```dockerfile
+FROM python:3.13-slim
+WORKDIR /app
+COPY requirements.txt .
+RUN pip install --no-cache-dir -r requirements.txt
+COPY app.py .
+RUN addgroup --system app && adduser --system --ingroup app app
+USER app
+```
+
+## 2. Image Information & Decisions
+
+- Base image: `python:3.13-slim`
+ - Reason: small Debian-based image with wide compatibility; specific Python major/minor pinned.
+- Optimization choices:
+ - `--no-cache-dir` for pip to avoid caching wheels in the final image.
+ - Minimal COPY set (`requirements.txt`, then `app.py`).
+
+Image size:
+
+```text
+devops-info-service-python:lab02 = 181MB
+```
+
+- The size is reasonable for `python:3.13-slim` plus FastAPI + uvicorn[standard]. The biggest contributors are the Python runtime and binary wheels for `uvicorn[standard]` extras.
+
+Layer structure notes:
+
+- `requirements.txt` copied and installed before application code to maximize cache reuse.
+- Application code copied last so code changes invalidate the smallest possible part of the build.
+
+## 3. Build & Run Process
+
+### 3.1 Build output
+
+```text
+#0 building with "orbstack" instance using docker driver
+
+#1 [internal] load build definition from Dockerfile
+#1 transferring dockerfile: 362B done
+#1 DONE 0.0s
+
+#2 [internal] load metadata for docker.io/library/python:3.13-slim
+#2 DONE 0.0s
+
+#3 [internal] load .dockerignore
+#3 transferring context: 163B done
+#3 DONE 0.0s
+
+#4 [1/7] FROM docker.io/library/python:3.13-slim
+#4 DONE 0.0s
+
+#5 [internal] load build context
+#5 transferring context: 6.39kB done
+#5 DONE 0.0s
+
+#6 [2/7] WORKDIR /app
+#6 DONE 0.0s
+
+#7 [3/7] RUN addgroup --system app && adduser --system --ingroup app app
+#7 DONE 0.2s
+
+#8 [4/7] COPY requirements.txt .
+#8 DONE 0.0s
+
+#9 [5/7] RUN pip install --no-cache-dir -r requirements.txt
+#9 1.053 Collecting fastapi==0.115.0 (from -r requirements.txt (line 1))
+#9 1.390 Downloading fastapi-0.115.0-py3-none-any.whl.metadata (27 kB)
+#9 1.504 Collecting uvicorn==0.32.0 (from uvicorn[standard]==0.32.0->-r requirements.txt (line 2))
+#9 1.560 Downloading uvicorn-0.32.0-py3-none-any.whl.metadata (6.6 kB)
+#9 1.661 Collecting starlette<0.39.0,>=0.37.2 (from fastapi==0.115.0->-r requirements.txt (line 1))
+#9 1.716 Downloading starlette-0.38.6-py3-none-any.whl.metadata (6.0 kB)
+#9 1.938 Collecting pydantic!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,!=2.1.0,<3.0.0,>=1.7.4 (from fastapi==0.115.0->-r requirements.txt (line 1))
+#9 1.992 Downloading pydantic-2.12.5-py3-none-any.whl.metadata (90 kB)
+#9 2.124 Collecting typing-extensions>=4.8.0 (from fastapi==0.115.0->-r requirements.txt (line 1))
+#9 2.180 Downloading typing_extensions-4.15.0-py3-none-any.whl.metadata (3.3 kB)
+#9 2.264 Collecting click>=7.0 (from uvicorn==0.32.0->uvicorn[standard]==0.32.0->-r requirements.txt (line 2))
+#9 2.318 Downloading click-8.3.1-py3-none-any.whl.metadata (2.6 kB)
+#9 2.384 Collecting h11>=0.8 (from uvicorn==0.32.0->uvicorn[standard]==0.32.0->-r requirements.txt (line 2))
+#9 2.438 Downloading h11-0.16.0-py3-none-any.whl.metadata (8.3 kB)
+#9 2.529 Collecting httptools>=0.5.0 (from uvicorn[standard]==0.32.0->-r requirements.txt (line 2))
+#9 2.585 Downloading httptools-0.7.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl.metadata (3.5 kB)
+#9 2.666 Collecting python-dotenv>=0.13 (from uvicorn[standard]==0.32.0->-r requirements.txt (line 2))
+#9 2.723 Downloading python_dotenv-1.2.1-py3-none-any.whl.metadata (25 kB)
+#9 2.827 Collecting pyyaml>=5.1 (from uvicorn[standard]==0.32.0->-r requirements.txt (line 2))
+#9 2.882 Downloading pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl.metadata (2.4 kB)
+#9 2.994 Collecting uvloop!=0.15.0,!=0.15.1,>=0.14.0 (from uvicorn[standard]==0.32.0->-r requirements.txt (line 2))
+#9 3.051 Downloading uvloop-0.22.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl.metadata (4.9 kB)
+#9 3.174 Collecting watchfiles>=0.13 (from uvicorn[standard]==0.32.0->-r requirements.txt (line 2))
+#9 3.230 Downloading watchfiles-1.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl.metadata (4.9 kB)
+#9 3.363 Collecting websockets>=10.4 (from uvicorn[standard]==0.32.0->-r requirements.txt (line 2))
+#9 3.417 Downloading websockets-16.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl.metadata (6.8 kB)
+#9 3.480 Collecting annotated-types>=0.6.0 (from pydantic!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,!=2.1.0,<3.0.0,>=1.7.4->fastapi==0.115.0->-r requirements.txt (line 1))
+#9 3.538 Downloading annotated_types-0.7.0-py3-none-any.whl.metadata (15 kB)
+#9 3.955 Collecting pydantic-core==2.41.5 (from pydantic!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,!=2.1.0,<3.0.0,>=1.7.4->fastapi==0.115.0->-r requirements.txt (line 1))
+#9 4.008 Downloading pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl.metadata (7.3 kB)
+#9 4.068 Collecting typing-inspection>=0.4.2 (from pydantic!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,!=2.1.0,<3.0.0,>=1.7.4->fastapi==0.115.0->-r requirements.txt (line 1))
+#9 4.122 Downloading typing_inspection-0.4.2-py3-none-any.whl.metadata (2.6 kB)
+#9 4.213 Collecting anyio<5,>=3.4.0 (from starlette<0.39.0,>=0.37.2->fastapi==0.115.0->-r requirements.txt (line 1))
+#9 4.270 Downloading anyio-4.12.1-py3-none-any.whl.metadata (4.3 kB)
+#9 4.346 Collecting idna>=2.8 (from anyio<5,>=3.4.0->starlette<0.39.0,>=0.37.2->fastapi==0.115.0->-r requirements.txt (line 1))
+#9 4.404 Downloading idna-3.11-py3-none-any.whl.metadata (8.4 kB)
+#9 4.487 Downloading fastapi-0.115.0-py3-none-any.whl (94 kB)
+#9 4.558 Downloading uvicorn-0.32.0-py3-none-any.whl (63 kB)
+#9 4.619 Downloading pydantic-2.12.5-py3-none-any.whl (463 kB)
+#9 4.749 Downloading pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl (1.9 MB)
+#9 4.935 Downloading starlette-0.38.6-py3-none-any.whl (71 kB)
+#9 4.999 Downloading anyio-4.12.1-py3-none-any.whl (113 kB)
+#9 5.058 Downloading annotated_types-0.7.0-py3-none-any.whl (13 kB)
+#9 5.114 Downloading click-8.3.1-py3-none-any.whl (108 kB)
+#9 5.181 Downloading h11-0.16.0-py3-none-any.whl (37 kB)
+#9 5.240 Downloading httptools-0.7.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl (473 kB)
+#9 5.317 Downloading idna-3.11-py3-none-any.whl (71 kB)
+#9 5.376 Downloading python_dotenv-1.2.1-py3-none-any.whl (21 kB)
+#9 5.434 Downloading pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl (767 kB)
+#9 5.548 Downloading typing_extensions-4.15.0-py3-none-any.whl (44 kB)
+#9 5.605 Downloading typing_inspection-0.4.2-py3-none-any.whl (14 kB)
+#9 5.664 Downloading uvloop-0.22.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl (4.3 MB)
+#9 5.912 Downloading watchfiles-1.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl (449 kB)
+#9 5.987 Downloading websockets-16.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl (186 kB)
+#9 6.048 Installing collected packages: websockets, uvloop, typing-extensions, pyyaml, python-dotenv, idna, httptools, h11, click, annotated-types, uvicorn, typing-inspection, pydantic-core, anyio, watchfiles, starlette, pydantic, fastapi
+#9 6.524 Successfully installed annotated-types-0.7.0 anyio-4.12.1 click-8.3.1 fastapi-0.115.0 h11-0.16.0 httptools-0.7.1 idna-3.11 pydantic-2.12.5 pydantic-core-2.41.5 python-dotenv-1.2.1 pyyaml-6.0.3 starlette-0.38.6 typing-extensions-4.15.0 typing-inspection-0.4.2 uvicorn-0.32.0 uvloop-0.22.1 watchfiles-1.1.1 websockets-16.0
+#9 DONE 6.9s
+
+#10 [6/7] COPY app.py .
+#10 DONE 0.0s
+
+#11 [7/7] RUN chown -R app:app /app
+#11 DONE 0.1s
+
+#12 exporting to image
+#12 exporting layers 0.1s done
+#12 writing image sha256:9635ee51eeb6a6c1e65741bd2f136a0ff77cd1b2fb38b052ad4c459d21bc812f done
+#12 naming to docker.io/library/devops-info-service-python:lab02 done
+#12 DONE 0.1s
+```
+
+### 3.2 Container running output
+
+```text
+43fdf2d5ae55aa3058135566c590c81cd1acd1bc04f7c38b91c38561f7854f42
+CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+43fdf2d5ae55 devops-info-service-python:lab02 "python app.py" 1 second ago Up 1 second 0.0.0.0:8080->8080/tcp, [::]:8080->8080/tcp devops-info-python
+```
+
+### 3.3 Endpoint tests
+
+```text
+GET /
+{
+ "service": {
+ "name": "devops-info-service",
+ "version": "1.0.0",
+ "description": "DevOps course info service",
+ "framework": "FastAPI"
+ },
+ "system": {
+ "hostname": "9d5b7640e4b7",
+ "platform": "Linux",
+ "platform_version": "#1 SMP PREEMPT Thu Nov 20 09:34:02 UTC 2025",
+ "architecture": "aarch64",
+ "cpu_count": 12,
+ "python_version": "3.13.11"
+ },
+ "runtime": {
+ "uptime_seconds": 0,
+ "uptime_human": "0 hours, 0 minutes",
+ "current_time": "2026-01-31T11:11:05.687867+00:00",
+ "timezone": "UTC"
+ },
+ "request": {
+ "client_ip": "192.168.215.1",
+ "user_agent": "curl/8.7.1",
+ "method": "GET",
+ "path": "/"
+ },
+ "endpoints": [
+ {
+ "path": "/",
+ "method": "GET",
+ "description": "Service information"
+ },
+ {
+ "path": "/health",
+ "method": "GET",
+ "description": "Health check"
+ }
+ ]
+}
+
+GET /health
+{
+ "status": "healthy",
+ "timestamp": "2026-01-31T11:11:05.696534+00:00",
+ "uptime_seconds": 0
+}
+```
+
+Docker Hub repository:
+
+- `https://hub.docker.com/repository/docker/luminitetime/devops-info-service-python`
+
+Tagging strategy:
+
+- `luminitetime/devops-info-service-python:lab02` for the lab submission state
+- `luminitetime/devops-info-service-python:latest` for the most recent build
+
+Docker push output:
+
+```text
+The push refers to repository [docker.io/luminitetime/devops-info-service-python]
+4aca6a960a6c: Preparing
+6b2fdc7164d9: Preparing
+55eb172d93ab: Preparing
+1a245b95a27a: Preparing
+c8ae99256e9c: Preparing
+d0749c8b4e23: Preparing
+9661772b6bf6: Preparing
+7b42a1e79f8b: Preparing
+c66c050e39d8: Preparing
+37127a0fa4c7: Preparing
+d0749c8b4e23: Waiting
+9661772b6bf6: Waiting
+7b42a1e79f8b: Waiting
+c66c050e39d8: Waiting
+37127a0fa4c7: Waiting
+1a245b95a27a: Pushed
+4aca6a960a6c: Pushed
+c8ae99256e9c: Pushed
+6b2fdc7164d9: Pushed
+55eb172d93ab: Pushed
+9661772b6bf6: Pushed
+d0749c8b4e23: Pushed
+c66c050e39d8: Pushed
+7b42a1e79f8b: Pushed
+37127a0fa4c7: Pushed
+lab02: digest: sha256:c795ea48a004f19b8a2c12c33c895522b0fe0dfd477941b8f2ec47b600c16f9e size: 2408
+The push refers to repository [docker.io/luminitetime/devops-info-service-python]
+4aca6a960a6c: Preparing
+6b2fdc7164d9: Preparing
+55eb172d93ab: Preparing
+1a245b95a27a: Preparing
+c8ae99256e9c: Preparing
+d0749c8b4e23: Preparing
+9661772b6bf6: Preparing
+7b42a1e79f8b: Preparing
+c66c050e39d8: Preparing
+37127a0fa4c7: Preparing
+9661772b6bf6: Waiting
+7b42a1e79f8b: Waiting
+c66c050e39d8: Waiting
+37127a0fa4c7: Waiting
+d0749c8b4e23: Waiting
+1a245b95a27a: Layer already exists
+55eb172d93ab: Layer already exists
+c8ae99256e9c: Layer already exists
+6b2fdc7164d9: Layer already exists
+4aca6a960a6c: Layer already exists
+7b42a1e79f8b: Layer already exists
+d0749c8b4e23: Layer already exists
+9661772b6bf6: Layer already exists
+c66c050e39d8: Layer already exists
+37127a0fa4c7: Layer already exists
+latest: digest: sha256:c795ea48a004f19b8a2c12c33c895522b0fe0dfd477941b8f2ec47b600c16f9e size: 2408
+```
+
+Pull and run from Docker Hub:
+
+```text
+lab02: Pulling from luminitetime/devops-info-service-python
+Digest: sha256:c795ea48a004f19b8a2c12c33c895522b0fe0dfd477941b8f2ec47b600c16f9e
+Status: Downloaded newer image for luminitetime/devops-info-service-python:lab02
+docker.io/luminitetime/devops-info-service-python:lab02
+{
+ "status": "healthy",
+ "timestamp": "2026-01-31T11:11:05.686163+00:00",
+ "uptime_seconds": 0
+}
+```
+
+## 4. Technical Analysis
+
+- Why the Dockerfile works:
+ - Dependencies are installed into the image, then the FastAPI app is started with `python app.py` (uvicorn binds to `0.0.0.0:8080` by default).
+- What if layer order changes:
+ - Copying the whole project before installing dependencies would invalidate the dependency layer on every code change, slowing rebuilds.
+- Security considerations implemented:
+ - The container runs as a non-root user and does not require elevated privileges.
+- How `.dockerignore` improves the build:
+ - It reduces the amount of data sent to the Docker daemon and prevents dev-only files from being added to layers.
+
+## 5. Challenges & Solutions
+
+```text
+- Issue: `docker push` initially failed with "denied: requested access to the resource is denied".
+ - Cause: the image was tagged with a different Docker Hub namespace than the account actually logged in on this machine.
+ - Fix: confirmed the Docker Hub username via `docker login` output (Username: luminitetime), re-tagged the image as `luminitetime/devops-info-service-python:`, and re-ran `docker push`.
+```
diff --git a/app_python/docs/LAB03.md b/app_python/docs/LAB03.md
new file mode 100644
index 0000000000..3a92bed654
--- /dev/null
+++ b/app_python/docs/LAB03.md
@@ -0,0 +1,165 @@
+# LAB03 — Continuous Integration (CI/CD)
+
+## 1. Overview
+
+Testing framework: `pytest`
+
+- Why: concise syntax, strong ecosystem, good FastAPI support via `TestClient`, easy coverage integration with `pytest-cov`.
+
+Test coverage:
+
+- `GET /` structure and required fields
+- `GET /health` structure and required fields
+- Error cases:
+ - unknown path returns structured 404
+ - method not allowed (405)
+
+CI triggers:
+
+- Runs on `push` / `pull_request` to `master` with path filter `app_python/**` (and workflow file changes).
+- Docker push job runs only on non-PR events.
+
+Versioning strategy: Semantic Versioning (SemVer)
+
+- Git tags: `vMAJOR.MINOR.PATCH` (example: `v1.2.3`)
+- Docker tags produced in CI:
+ - `MAJOR.MINOR.PATCH`, `MAJOR.MINOR`, `MAJOR`
+ - `latest` on default branch
+ - `sha-` on pushes
+
+Required GitHub Secrets for full CI/CD:
+
+- `DOCKERHUB_TOKEN`: Docker Hub access token for `luminitetime`
+- `SNYK_TOKEN`: Snyk API token (enables Snyk scan steps)
+- `CODECOV_TOKEN` (optional for public repos): Codecov upload token
+
+## 2. Workflow Evidence
+
+- Workflow file: `.github/workflows/python-ci.yml`
+- Actions page: `https://github.com/LuminiteTime/DevOps-Core-Course/actions/workflows/python-ci.yml`
+
+Local tests passing:
+
+```text
+All checks passed!
+.... [100%]
+================================ tests coverage ================================
+______________ coverage: platform darwin, python 3.11.14-final-0 _______________
+
+Name Stmts Miss Cover
+---------------------------------------------
+__init__.py 0 0 100%
+app.py 68 7 90%
+tests/__init__.py 0 0 100%
+tests/test_endpoints.py 44 0 100%
+---------------------------------------------
+TOTAL 112 7 94%
+Coverage XML written to file coverage.xml
+Required test coverage of 70% reached. Total coverage: 93.75%
+4 passed in 0.20s
+```
+
+Docker image:
+
+- `https://hub.docker.com/r/luminitetime/devops-info-service-python`
+- Verified SemVer tags pushed: `1.0.0`, `1.0`, `1`, `latest`
+
+Docker push evidence (local):
+
+```text
+1.0.0: digest: sha256:c795ea48a004f19b8a2c12c33c895522b0fe0dfd477941b8f2ec47b600c16f9e size: 2408
+1.0: digest: sha256:c795ea48a004f19b8a2c12c33c895522b0fe0dfd477941b8f2ec47b600c16f9e size: 2408
+1: digest: sha256:c795ea48a004f19b8a2c12c33c895522b0fe0dfd477941b8f2ec47b600c16f9e size: 2408
+latest: digest: sha256:c795ea48a004f19b8a2c12c33c895522b0fe0dfd477941b8f2ec47b600c16f9e size: 2408
+```
+
+Status badge (README):
+
+- `https://github.com/LuminiteTime/DevOps-Core-Course/actions/workflows/python-ci.yml`
+
+## 3. Best Practices Implemented
+
+- Fail-fast matrix: stops quickly on failures.
+- Split jobs with dependencies: Docker build/push runs only after tests succeed.
+- Path filters: avoids running Python CI when only non-Python parts change.
+- Concurrency: cancels outdated runs on the same ref.
+- Dependency caching: pip cache via `actions/setup-python` and Gradle cache via `actions/setup-java`.
+- Docker layer caching: Buildx cache to GitHub Actions cache (`type=gha`).
+- Least privilege: workflow `permissions: contents: read`.
+- Snyk: dependency vulnerability scan (high severity threshold).
+
+Caching metrics:
+
+```text
+pip install -q -r requirements.txt -r requirements-dev.txt ... 5.708 total
+pip install -q -r requirements.txt -r requirements-dev.txt ... 5.647 total
+```
+
+Snyk results:
+
+```text
+Workflow runs `snyk test --file=app_python/requirements.txt --severity-threshold=high`
+when `SNYK_TOKEN` is configured as a repository secret.
+```
+
+## 4. Key Decisions
+
+- Versioning Strategy:
+ - SemVer fits a service with explicit release tags and clear “breaking vs non-breaking” change signaling.
+- Docker Tags:
+ - `latest` for default branch, SemVer tags for releases, and `sha-*` for traceability on pushes.
+- Workflow Triggers:
+ - PRs run lint/tests only; pushes to `master` also publish images (when secrets exist).
+- Test Coverage:
+ - Focused on API contract: structure, required fields, and error handling. Not asserting exact hostname/platform values because they vary by environment.
+
+## 5. Challenges
+
+- Running security scanning in CI requires external credentials (`SNYK_TOKEN`). The workflow is configured to skip Snyk when the secret is not present.
+
+## Multi-App CI + Coverage
+
+Multi-app CI:
+
+- Java workflow: `.github/workflows/java-ci.yml`
+- Java tests: `appJava/src/test/java/luminais/tech/appjava/DevopsEndpointsTest.java` (covers `GET /`, `GET /health`, and 404)
+- Java linting: Checkstyle (`./gradlew checkstyleMain checkstyleTest`)
+- Path filters:
+ - Python CI runs only on `app_python/**` changes
+ - Java CI runs only on `appJava/**` changes
+- Benefit: avoids wasting CI minutes and keeps feedback focused (monorepo-friendly).
+
+Path filter proof plan:
+
+- Commit changing only `app_python/**` → only Python CI should run.
+- Commit changing only `appJava/**` → only Java CI should run.
+- Commit changing only `labs/**` or `lectures/**` → neither CI should run.
+
+Coverage:
+
+- Coverage is generated by pytest (`coverage.xml`) and uploaded via `codecov/codecov-action@v5`.
+- Current local coverage: 93.75% (threshold in CI: 70%).
+
+Java build evidence (local):
+
+```text
+> Task :openApiGenerate
+Successfully generated code to .../appJava/build/generated
+...
+> Task :test
+BUILD SUCCESSFUL in 8s
+```
+
+Java Docker image (bonus):
+
+- `https://hub.docker.com/r/luminitetime/devops-info-service-java`
+- Verified SemVer tags pushed: `1.0.0`, `1.0`, `1`, `latest`
+
+Java Docker push evidence (local):
+
+```text
+1.0.0: digest: sha256:cadca4df9db5dea4f19829635e7d7330a46b7f52381ac054dd1dbe9c4afd2337 size: 2205
+1.0: digest: sha256:cadca4df9db5dea4f19829635e7d7330a46b7f52381ac054dd1dbe9c4afd2337 size: 2205
+1: digest: sha256:cadca4df9db5dea4f19829635e7d7330a46b7f52381ac054dd1dbe9c4afd2337 size: 2205
+latest: digest: sha256:cadca4df9db5dea4f19829635e7d7330a46b7f52381ac054dd1dbe9c4afd2337 size: 2205
+```
diff --git a/app_python/docs/screenshots/01-main-endpoint.png b/app_python/docs/screenshots/01-main-endpoint.png
new file mode 100644
index 0000000000..306fd6e55b
Binary files /dev/null and b/app_python/docs/screenshots/01-main-endpoint.png differ
diff --git a/app_python/docs/screenshots/02-health-check.png b/app_python/docs/screenshots/02-health-check.png
new file mode 100644
index 0000000000..df827e0062
Binary files /dev/null and b/app_python/docs/screenshots/02-health-check.png differ
diff --git a/app_python/requirements-dev.txt b/app_python/requirements-dev.txt
new file mode 100644
index 0000000000..26a82805d1
--- /dev/null
+++ b/app_python/requirements-dev.txt
@@ -0,0 +1,4 @@
+pytest==8.4.2
+pytest-cov==6.3.0
+httpx==0.28.1
+ruff==0.12.11
diff --git a/app_python/requirements.txt b/app_python/requirements.txt
new file mode 100644
index 0000000000..792449289f
--- /dev/null
+++ b/app_python/requirements.txt
@@ -0,0 +1,2 @@
+fastapi==0.115.0
+uvicorn[standard]==0.32.0
diff --git a/app_python/tests/__init__.py b/app_python/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/app_python/tests/test_endpoints.py b/app_python/tests/test_endpoints.py
new file mode 100644
index 0000000000..5a6c4bccd8
--- /dev/null
+++ b/app_python/tests/test_endpoints.py
@@ -0,0 +1,62 @@
+from fastapi.testclient import TestClient
+
+from app_python.app import app
+
+
+client = TestClient(app)
+
+
+def test_get_root_returns_expected_structure() -> None:
+ res = client.get("/", headers={"user-agent": "pytest"})
+ assert res.status_code == 200
+ data = res.json()
+
+ assert set(data.keys()) == {"service", "system", "runtime", "request", "endpoints"}
+
+ service = data["service"]
+ assert service["name"] == "devops-info-service"
+ assert service["version"] == "1.0.0"
+ assert service["framework"] == "FastAPI"
+
+ system = data["system"]
+ assert isinstance(system["hostname"], str) and system["hostname"]
+ assert isinstance(system["cpu_count"], int) and system["cpu_count"] >= 1
+ assert isinstance(system["python_version"], str) and system["python_version"]
+
+ runtime = data["runtime"]
+ assert isinstance(runtime["uptime_seconds"], int) and runtime["uptime_seconds"] >= 0
+ assert runtime["timezone"] == "UTC"
+ assert isinstance(runtime["current_time"], str) and runtime["current_time"]
+
+ request = data["request"]
+ assert request["method"] == "GET"
+ assert request["path"] == "/"
+ assert request["user_agent"] == "pytest"
+ assert isinstance(request["client_ip"], str) and request["client_ip"]
+
+ endpoints = data["endpoints"]
+ assert isinstance(endpoints, list) and endpoints
+ paths = {e["path"] for e in endpoints}
+ assert paths == {"/", "/health"}
+
+
+def test_get_health_returns_expected_structure() -> None:
+ res = client.get("/health")
+ assert res.status_code == 200
+ data = res.json()
+
+ assert data["status"] == "healthy"
+ assert isinstance(data["timestamp"], str) and data["timestamp"]
+ assert isinstance(data["uptime_seconds"], int) and data["uptime_seconds"] >= 0
+
+
+def test_unknown_endpoint_returns_structured_404() -> None:
+ res = client.get("/does-not-exist")
+ assert res.status_code == 404
+ data = res.json()
+ assert data == {"error": "Not Found", "message": "Endpoint does not exist"}
+
+
+def test_method_not_allowed_returns_405() -> None:
+ res = client.post("/health")
+ assert res.status_code == 405
diff --git a/docs/LAB04.md b/docs/LAB04.md
new file mode 100644
index 0000000000..7f8efbbb48
--- /dev/null
+++ b/docs/LAB04.md
@@ -0,0 +1,252 @@
+# LAB04 - Infrastructure as Code (Terraform and Pulumi)
+
+## 1. Cloud Provider and Infrastructure
+
+- Provider: Yandex Cloud
+ - Reason: recommended for Russia-based students, free tier available, no credit card required.
+- Region/zone: ru-central1-a
+- Instance type: standard-v2, 2 cores at 20% fraction, 1 GB RAM
+- Boot disk: 10 GB network-hdd, Ubuntu 22.04 LTS
+- Total cost: $0 (within free tier limits)
+- Resources created (per tool):
+ - VPC network
+ - VPC subnet (10.0.1.0/24)
+ - Security group (SSH/22, HTTP/80, App/5000 inbound; all outbound)
+ - Compute instance with public IP
+
+## 2. Terraform Implementation
+
+Terraform version: 1.5.7 (Homebrew, last open-source release)
+
+Yandex Cloud provider: yandex-cloud/yandex v0.187.0 (installed via `terraform-mirror.yandexcloud.net` because `registry.terraform.io` is blocked from Russia).
+
+Project structure:
+
+```text
+terraform/
+ .gitignore # state, tfvars, .terraform/
+ main.tf # provider, data source, resources
+ variables.tf # input variables with defaults
+ outputs.tf # public IP, private IP, SSH command
+```
+
+Key decisions:
+
+- All configurable values (zone, platform, cores, memory, disk, image family, SSH user) extracted into variables with sensible defaults.
+- Outputs expose the public IP and a ready-to-paste SSH command.
+- Authentication via environment variables (`YC_CLOUD_ID`, `YC_FOLDER_ID`, `YC_SERVICE_ACCOUNT_KEY_FILE`) -- no secrets in code.
+- Used `user-data` cloud-config metadata instead of `ssh-keys` because the latter did not propagate the key on the Ubuntu 22.04 image.
+
+Challenges:
+
+- `registry.terraform.io` is unreachable from the network. Fixed by configuring `~/.terraformrc` with `terraform-mirror.yandexcloud.net` as a network mirror.
+- The `ssh-keys` metadata field was not picked up by cloud-init on the Ubuntu 22.04 image. Switched to `user-data` with full cloud-config and recreated the VM.
+
+### terraform init
+
+```text
+Initializing the backend...
+Initializing provider plugins...
+- Finding yandex-cloud/yandex versions matching "~> 0.135"...
+- Installing yandex-cloud/yandex v0.187.0...
+- Installed yandex-cloud/yandex v0.187.0 (unauthenticated)
+
+Terraform has been successfully initialized!
+```
+
+### terraform plan
+
+```text
+data.yandex_compute_image.ubuntu: Read complete after 0s [id=fd8t9g30r3pc23et5krl]
+
+Terraform will perform the following actions:
+
+ # yandex_compute_instance.lab will be created
+ + resource "yandex_compute_instance" "lab" {
+ + hostname = "lab04-vm"
+ + name = "lab04-vm"
+ + platform_id = "standard-v2"
+ + zone = "ru-central1-a"
+ + labels = { "project" = "devops-lab04", "tool" = "terraform" }
+ + resources { cores = 2, core_fraction = 20, memory = 1 }
+ + boot_disk { image_id = "fd8t9g30r3pc23et5krl", size = 10, type = "network-hdd" }
+ + network_interface { subnet_id = (known after apply), nat = true }
+ }
+
+ # yandex_vpc_network.lab will be created
+ + resource "yandex_vpc_network" "lab" { name = "lab04-network" }
+
+ # yandex_vpc_security_group.lab will be created
+ + resource "yandex_vpc_security_group" "lab" {
+ + name = "lab04-sg"
+ + ingress { description = "SSH", port = 22, protocol = "TCP" }
+ + ingress { description = "HTTP", port = 80, protocol = "TCP" }
+ + ingress { description = "App", port = 5000, protocol = "TCP" }
+ + egress { description = "Allow all outbound", protocol = "ANY" }
+ }
+
+ # yandex_vpc_subnet.lab will be created
+ + resource "yandex_vpc_subnet" "lab" {
+ + name = "lab04-subnet", zone = "ru-central1-a", v4_cidr_blocks = ["10.0.1.0/24"]
+ }
+
+Plan: 4 to add, 0 to change, 0 to destroy.
+```
+
+### terraform apply
+
+```text
+yandex_vpc_network.lab: Creating...
+yandex_vpc_network.lab: Creation complete after 3s [id=enpvte4s34qj0q38is12]
+yandex_vpc_subnet.lab: Creating...
+yandex_vpc_security_group.lab: Creating...
+yandex_vpc_subnet.lab: Creation complete after 0s [id=e9b5viveamq80l3p8ibs]
+yandex_vpc_security_group.lab: Creation complete after 2s [id=enp7oirsi4f9st9tqhov]
+yandex_compute_instance.lab: Creating...
+yandex_compute_instance.lab: Creation complete after 43s [id=fhmfdsq5cbtis1qg36j3]
+
+Apply complete! Resources: 4 added, 0 changed, 0 destroyed.
+
+Outputs:
+ssh_command = "ssh -i ~/.ssh/id_ed25519 yc-user@93.77.187.164"
+vm_private_ip = "10.0.1.20"
+vm_public_ip = "93.77.187.164"
+```
+
+### SSH access proof
+
+```text
+$ ssh -i ~/.ssh/id_ed25519 yc-user@93.77.187.164 "hostname && uname -a && uptime"
+lab04-vm
+Linux lab04-vm 5.15.0-170-generic #180-Ubuntu SMP Fri Jan 9 16:10:31 UTC 2026 x86_64 x86_64 x86_64 GNU/Linux
+ 09:51:44 up 0 min, 0 users, load average: 0.23, 0.07, 0.02
+```
+
+## 3. Pulumi Implementation
+
+Pulumi version: 3.222.0, language: Python 3.11
+
+Provider package: `pulumi-yandex` (installed via pip into a virtual environment).
+
+Project structure:
+
+```text
+pulumi/
+ .gitignore # venv/, .pulumi-state/, Pulumi.*.yaml, __pycache__/
+ Pulumi.yaml # project metadata (name, runtime)
+ requirements.txt # pulumi, pulumi-yandex
+ __main__.py # all infrastructure definitions
+```
+
+How code differs from Terraform:
+
+- Resources are Python objects (`yandex.VpcNetwork(...)`) instead of HCL blocks.
+- Data source call is a regular function (`yandex.get_compute_image(family=...)`).
+- Outputs use `pulumi.export(...)` instead of `output` blocks.
+- Variables are plain Python / Pulumi config (`config.get(...)`) rather than a separate `variables.tf` file.
+- The security group rules use typed `*Args` dataclasses.
+
+Advantages discovered:
+
+- IDE autocompletion and type hints reduce guessing about argument names.
+- Composing user-data strings with f-strings is more natural than HCL `<<-EOF` heredocs.
+- Everything lives in a single `.py` file without requiring multiple `.tf` files.
+
+Challenges:
+
+- Local backend requires explicitly setting `PULUMI_BACKEND_URL` and `PULUMI_CONFIG_PASSPHRASE`.
+- The `pulumi-yandex` provider emits a deprecation warning about `pkg_resources` - cosmetic, no functional impact.
+
+### terraform destroy (before Pulumi)
+
+```text
+yandex_compute_instance.lab: Destroying... [id=fhmfdsq5cbtis1qg36j3]
+yandex_compute_instance.lab: Destruction complete after 43s
+yandex_vpc_subnet.lab: Destroying... [id=e9b5viveamq80l3p8ibs]
+yandex_vpc_security_group.lab: Destroying... [id=enp7oirsi4f9st9tqhov]
+yandex_vpc_security_group.lab: Destruction complete after 1s
+yandex_vpc_subnet.lab: Destruction complete after 5s
+yandex_vpc_network.lab: Destroying... [id=enpvte4s34qj0q38is12]
+yandex_vpc_network.lab: Destruction complete after 2s
+
+Destroy complete! Resources: 4 destroyed.
+```
+
+### pulumi preview
+
+```text
+Previewing update (dev):
+ + pulumi:pulumi:Stack lab04-pulumi-dev create
+ + yandex:index:VpcNetwork lab04-network create
+ + yandex:index:VpcSubnet lab04-subnet create
+ + yandex:index:VpcSecurityGroup lab04-sg create
+ + yandex:index:ComputeInstance lab04-vm create
+
+Resources:
+ + 5 to create
+```
+
+### pulumi up
+
+```text
+Updating (dev):
+ + yandex:index:VpcNetwork lab04-network created (2s)
+ + yandex:index:VpcSecurityGroup lab04-sg created (2s)
+ + yandex:index:VpcSubnet lab04-subnet created (0.85s)
+ + yandex:index:ComputeInstance lab04-vm created (59s)
+ + pulumi:pulumi:Stack lab04-pulumi-dev created (65s)
+
+Outputs:
+ ssh_command : "ssh -i ~/.ssh/id_ed25519 yc-user@93.77.191.242"
+ vm_private_ip: "10.0.1.10"
+ vm_public_ip : "93.77.191.242"
+
+Resources:
+ + 5 created
+Duration: 1m6s
+```
+
+### SSH access proof (Pulumi VM)
+
+```text
+$ ssh -i ~/.ssh/id_ed25519 yc-user@93.77.191.242 "hostname && uname -a && uptime"
+lab04-vm
+Linux lab04-vm 5.15.0-170-generic #180-Ubuntu SMP Fri Jan 9 16:10:31 UTC 2026 x86_64 x86_64 x86_64 GNU/Linux
+ 09:56:11 up 0 min, 0 users, load average: 0.12, 0.04, 0.01
+```
+
+## 4. Terraform vs Pulumi Comparison
+
+- Ease of Learning: Terraform was slightly easier to start with because HCL is purpose-built for infrastructure and the documentation structure maps directly to resource blocks. Pulumi requires knowing both the cloud API and the Python SDK conventions.
+
+- Code Readability: For this small project both are comparable. Terraform's declarative style makes it obvious what resources exist. Pulumi's Python code is familiar to developers but mixes infrastructure declarations with imperative logic.
+
+- Debugging: Terraform errors reference specific HCL blocks and line numbers, which is straightforward. Pulumi errors include Python tracebacks, which can be noisier but give more context for complex logic errors.
+
+- Documentation: Terraform has a larger community and more examples online. The Yandex Cloud Terraform provider docs are comprehensive. The Pulumi Yandex provider docs are thinner and the package itself shows deprecation warnings.
+
+- Use Case: Terraform fits well for declarative, auditable infrastructure definitions where the team includes non-developers. Pulumi is better when infrastructure requires complex logic (loops, conditionals, abstractions) and the team is comfortable writing code.
+
+## 5. Lab 5 Preparation and Cleanup
+
+VM for Lab 5: No - all cloud resources have been destroyed. Will recreate using Terraform when Lab 5 begins (takes under a minute with the existing code).
+
+### Cleanup status
+
+Both tools' resources fully destroyed:
+
+Terraform:
+
+```text
+Destroy complete! Resources: 4 destroyed.
+```
+
+Pulumi:
+
+```text
+Resources:
+ - 5 deleted
+Duration: 48s
+```
+
+No compute instances, networks, or security groups remain in the Yandex Cloud folder.
diff --git a/pulumi/.gitignore b/pulumi/.gitignore
new file mode 100644
index 0000000000..1bc27a2a3d
--- /dev/null
+++ b/pulumi/.gitignore
@@ -0,0 +1,5 @@
+*.pyc
+venv/
+.pulumi-state/
+Pulumi.*.yaml
+__pycache__/
diff --git a/pulumi/Pulumi.yaml b/pulumi/Pulumi.yaml
new file mode 100644
index 0000000000..762fcf0f92
--- /dev/null
+++ b/pulumi/Pulumi.yaml
@@ -0,0 +1,7 @@
+name: lab04-pulumi
+description: Lab04 Yandex Cloud VM
+runtime: python
+config:
+ pulumi:tags:
+ value:
+ pulumi:template: python
diff --git a/pulumi/__main__.py b/pulumi/__main__.py
new file mode 100644
index 0000000000..68aa358388
--- /dev/null
+++ b/pulumi/__main__.py
@@ -0,0 +1,92 @@
+import pulumi
+import pulumi_yandex as yandex
+
+config = pulumi.Config()
+zone = config.get("zone") or "ru-central1-a"
+ssh_username = config.get("sshUsername") or "yc-user"
+ssh_pub_key_path = config.get("sshPubKeyPath") or "~/.ssh/id_ed25519.pub"
+
+import os
+
+ssh_pub_key = open(os.path.expanduser(ssh_pub_key_path)).read().strip()
+
+labels = {"project": "devops-lab04", "tool": "pulumi"}
+
+image = yandex.get_compute_image(family="ubuntu-2204-lts")
+
+network = yandex.VpcNetwork("lab04-network", name="lab04-network", labels=labels)
+
+subnet = yandex.VpcSubnet(
+ "lab04-subnet",
+ name="lab04-subnet",
+ zone=zone,
+ network_id=network.id,
+ v4_cidr_blocks=["10.0.1.0/24"],
+ labels=labels,
+)
+
+sg = yandex.VpcSecurityGroup(
+ "lab04-sg",
+ name="lab04-sg",
+ network_id=network.id,
+ labels=labels,
+ ingresses=[
+ yandex.VpcSecurityGroupIngressArgs(
+ description="SSH", protocol="TCP", port=22, v4_cidr_blocks=["0.0.0.0/0"]
+ ),
+ yandex.VpcSecurityGroupIngressArgs(
+ description="HTTP", protocol="TCP", port=80, v4_cidr_blocks=["0.0.0.0/0"]
+ ),
+ yandex.VpcSecurityGroupIngressArgs(
+ description="App", protocol="TCP", port=5000, v4_cidr_blocks=["0.0.0.0/0"]
+ ),
+ ],
+ egresses=[
+ yandex.VpcSecurityGroupEgressArgs(
+ description="Allow all outbound",
+ protocol="ANY",
+ v4_cidr_blocks=["0.0.0.0/0"],
+ ),
+ ],
+)
+
+user_data = f"""#cloud-config
+users:
+ - name: {ssh_username}
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ - {ssh_pub_key}
+"""
+
+vm = yandex.ComputeInstance(
+ "lab04-vm",
+ name="lab04-vm",
+ hostname="lab04-vm",
+ platform_id="standard-v2",
+ zone=zone,
+ labels=labels,
+ resources=yandex.ComputeInstanceResourcesArgs(
+ cores=2, core_fraction=20, memory=1
+ ),
+ boot_disk=yandex.ComputeInstanceBootDiskArgs(
+ initialize_params=yandex.ComputeInstanceBootDiskInitializeParamsArgs(
+ image_id=image.id, size=10, type="network-hdd"
+ )
+ ),
+ network_interfaces=[
+ yandex.ComputeInstanceNetworkInterfaceArgs(
+ subnet_id=subnet.id, nat=True, security_group_ids=[sg.id]
+ )
+ ],
+ metadata={"user-data": user_data},
+)
+
+pulumi.export("vm_public_ip", vm.network_interfaces[0].nat_ip_address)
+pulumi.export("vm_private_ip", vm.network_interfaces[0].ip_address)
+pulumi.export(
+ "ssh_command",
+ vm.network_interfaces[0].nat_ip_address.apply(
+ lambda ip: f"ssh -i ~/.ssh/id_ed25519 {ssh_username}@{ip}"
+ ),
+)
diff --git a/pulumi/requirements.txt b/pulumi/requirements.txt
new file mode 100644
index 0000000000..ad106a5476
--- /dev/null
+++ b/pulumi/requirements.txt
@@ -0,0 +1,2 @@
+pulumi>=3.0.0,<4.0.0
+pulumi-yandex>=0.13.0
diff --git a/terraform/.gitignore b/terraform/.gitignore
new file mode 100644
index 0000000000..c501803dca
--- /dev/null
+++ b/terraform/.gitignore
@@ -0,0 +1,11 @@
+*.tfstate
+*.tfstate.*
+.terraform/
+.terraform.lock.hcl
+terraform.tfvars
+*.tfvars
+crash.log
+override.tf
+override.tf.json
+*_override.tf
+*_override.tf.json
diff --git a/terraform/main.tf b/terraform/main.tf
new file mode 100644
index 0000000000..708c0b7d5a
--- /dev/null
+++ b/terraform/main.tf
@@ -0,0 +1,103 @@
+terraform {
+ required_providers {
+ yandex = {
+ source = "yandex-cloud/yandex"
+ version = "~> 0.135"
+ }
+ }
+ required_version = ">= 1.5"
+}
+
+provider "yandex" {
+ zone = var.zone
+}
+
+data "yandex_compute_image" "ubuntu" {
+ family = var.image_family
+}
+
+resource "yandex_vpc_network" "lab" {
+ name = "lab04-network"
+ labels = var.labels
+}
+
+resource "yandex_vpc_subnet" "lab" {
+ name = "lab04-subnet"
+ zone = var.zone
+ network_id = yandex_vpc_network.lab.id
+ v4_cidr_blocks = ["10.0.1.0/24"]
+ labels = var.labels
+}
+
+resource "yandex_vpc_security_group" "lab" {
+ name = "lab04-sg"
+ network_id = yandex_vpc_network.lab.id
+ labels = var.labels
+
+ ingress {
+ description = "SSH"
+ protocol = "TCP"
+ port = 22
+ v4_cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ ingress {
+ description = "HTTP"
+ protocol = "TCP"
+ port = 80
+ v4_cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ ingress {
+ description = "App"
+ protocol = "TCP"
+ port = 5000
+ v4_cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ egress {
+ description = "Allow all outbound"
+ protocol = "ANY"
+ v4_cidr_blocks = ["0.0.0.0/0"]
+ }
+}
+
+resource "yandex_compute_instance" "lab" {
+ name = "lab04-vm"
+ hostname = "lab04-vm"
+ platform_id = var.platform_id
+ zone = var.zone
+ labels = var.labels
+
+ resources {
+ cores = var.cores
+ core_fraction = var.core_fraction
+ memory = var.memory
+ }
+
+ boot_disk {
+ initialize_params {
+ image_id = data.yandex_compute_image.ubuntu.id
+ size = var.disk_size
+ type = var.disk_type
+ }
+ }
+
+ network_interface {
+ subnet_id = yandex_vpc_subnet.lab.id
+ nat = true
+ security_group_ids = [yandex_vpc_security_group.lab.id]
+ }
+
+ metadata = {
+ user-data = <<-EOF
+ #cloud-config
+ users:
+ - name: ${var.ssh_username}
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ shell: /bin/bash
+ ssh_authorized_keys:
+ - ${file(var.ssh_public_key_path)}
+ EOF
+ }
+}
diff --git a/terraform/outputs.tf b/terraform/outputs.tf
new file mode 100644
index 0000000000..9cda5a702b
--- /dev/null
+++ b/terraform/outputs.tf
@@ -0,0 +1,14 @@
+output "vm_public_ip" {
+ description = "Public IP address of the VM"
+ value = yandex_compute_instance.lab.network_interface[0].nat_ip_address
+}
+
+output "vm_private_ip" {
+ description = "Private IP address of the VM"
+ value = yandex_compute_instance.lab.network_interface[0].ip_address
+}
+
+output "ssh_command" {
+ description = "SSH command to connect to the VM"
+ value = "ssh -i ~/.ssh/id_ed25519 ${var.ssh_username}@${yandex_compute_instance.lab.network_interface[0].nat_ip_address}"
+}
diff --git a/terraform/variables.tf b/terraform/variables.tf
new file mode 100644
index 0000000000..d854717153
--- /dev/null
+++ b/terraform/variables.tf
@@ -0,0 +1,68 @@
+variable "zone" {
+ description = "Yandex Cloud availability zone"
+ type = string
+ default = "ru-central1-a"
+}
+
+variable "platform_id" {
+ description = "VM platform identifier"
+ type = string
+ default = "standard-v2"
+}
+
+variable "cores" {
+ description = "Number of CPU cores"
+ type = number
+ default = 2
+}
+
+variable "core_fraction" {
+ description = "Guaranteed vCPU share (%)"
+ type = number
+ default = 20
+}
+
+variable "memory" {
+ description = "RAM in GB"
+ type = number
+ default = 1
+}
+
+variable "disk_size" {
+ description = "Boot disk size in GB"
+ type = number
+ default = 10
+}
+
+variable "disk_type" {
+ description = "Boot disk type"
+ type = string
+ default = "network-hdd"
+}
+
+variable "image_family" {
+ description = "OS image family"
+ type = string
+ default = "ubuntu-2204-lts"
+}
+
+variable "ssh_username" {
+ description = "SSH user created on the VM"
+ type = string
+ default = "yc-user"
+}
+
+variable "ssh_public_key_path" {
+ description = "Path to SSH public key"
+ type = string
+ default = "~/.ssh/id_ed25519.pub"
+}
+
+variable "labels" {
+ description = "Resource labels"
+ type = map(string)
+ default = {
+ project = "devops-lab04"
+ tool = "terraform"
+ }
+}