diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml new file mode 100644 index 0000000000..748594ddcb --- /dev/null +++ b/.github/workflows/python-ci.yml @@ -0,0 +1,150 @@ +name: python-ci + +on: + push: + branches: [ "master" ] + paths: + - "app_python/**" + - ".github/workflows/python-ci.yml" + pull_request: + branches: [ "master" ] + paths: + - "app_python/**" + - ".github/workflows/python-ci.yml" + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +env: + PY_APP_DIR: app_python + DOCKER_IMAGE_PYTHON: gghost1/devops-lab-app-python + +jobs: + test-lint: + runs-on: ubuntu-latest + defaults: + run: + working-directory: ${{ env.PY_APP_DIR }} + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: "pip" + cache-dependency-path: | + app_python/requirements.txt + app_python/requirements-dev.txt + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt -r requirements-dev.txt + + - name: Ruff (lint) + run: ruff check . + + - name: Ruff (format check) + run: ruff format --check . + + - name: Pytest + coverage + run: | + pytest --cov=. --cov-report=term-missing --cov-report=xml + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v5 + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: app_python/coverage.xml + fail_ci_if_error: false + + - name: Set up Snyk CLI + uses: snyk/actions/setup@master + + - name: Snyk scan (deps) + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + run: snyk test --file=requirements.txt --severity-threshold=high + + docker-build-push: + runs-on: ubuntu-latest + needs: [ "test-lint" ] + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Prepare tags + shell: bash + run: | + echo "VERSION=$(date -u +%Y.%m.%d)" >> $GITHUB_ENV + echo "SHORT_SHA=${GITHUB_SHA::7}" >> $GITHUB_ENV + + REF="${GITHUB_HEAD_REF:-$GITHUB_REF_NAME}" + + SAFE_REF="$(echo "$REF" | tr '[:upper:]' '[:lower:]' | sed -E 's#[^a-z0-9_.-]+#-#g')" + echo "SAFE_REF=$SAFE_REF" >> $GITHUB_ENV + + - name: Set up Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Docker Hub (PR same repo) + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository }} + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build and push snapshot (PR same repo) + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository }} + uses: docker/build-push-action@v6 + with: + context: ./app_python + file: ./app_python/Dockerfile + push: true + tags: | + ${{ env.DOCKER_IMAGE_PYTHON }}:snapshot-${{ env.SAFE_REF }} + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Build snapshot (PR fork) + if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository }} + uses: docker/build-push-action@v6 + with: + context: ./app_python + file: ./app_python/Dockerfile + push: false + load: true + tags: | + ${{ env.DOCKER_IMAGE_PYTHON }}:snapshot-${{ env.SAFE_REF }} + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Log in to Docker Hub (push) + if: ${{ github.event_name == 'push' }} + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build and push (push) + if: ${{ github.event_name == 'push' }} + uses: docker/build-push-action@v6 + with: + context: ./app_python + file: ./app_python/Dockerfile + push: true + tags: | + ${{ env.DOCKER_IMAGE_PYTHON }}:${{ env.VERSION }} + ${{ env.DOCKER_IMAGE_PYTHON }}:sha-${{ env.SHORT_SHA }} + ${{ env.DOCKER_IMAGE_PYTHON }}:${{ env.SAFE_REF }} + ${{ env.DOCKER_IMAGE_PYTHON }}:latest + cache-from: type=gha + cache-to: type=gha,mode=max \ No newline at end of file diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000000..f4b44804d6 --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,10 @@ +[defaults] +inventory = inventory/hosts.ini +roles_path = roles +host_key_checking = False +retry_files_enabled = False + +[privilege_escalation] +become = True +become_method = sudo +become_user = root diff --git a/ansible/docs/LAB05.md b/ansible/docs/LAB05.md new file mode 100644 index 0000000000..b59ac44a34 --- /dev/null +++ b/ansible/docs/LAB05.md @@ -0,0 +1,157 @@ +# Ansible Provisioning & Deployment + +## Architecture Overview +- Ansible version: 2.20.3 +- Target VM OS and version: Ubuntu 24.04.3 LTS +- This lab is organized using Ansible roles, which automatically load related tasks, handlers, defaults, and other artifacts based on a standard role directory structure. + Playbooks are kept minimal and only define what to run (which roles) against which hosts, while roles contain the implementation details. +- Roles improve maintainability by separating responsibilities (base OS setup, Docker installation, application deployment) into reusable, well-scoped units. + This also keeps playbooks clean and makes it easier to reuse the same role in other labs/projects by overriding variables instead of copying code. + +## Roles Documentation +### 2.1 Role: common +#### Purpose +Prepare the operating system for automation by updating apt cache and installing essential baseline packages. + +#### Key variables and defaults +File: roles/common/defaults/main.yml +- common_packages: list of packages to be installed + +#### Tasks +File: roles/common/tasks/main.yml +- Update apt cache +- Install common packages + +#### Handlers +None. + +##### Dependencies +None. + +### 2.2 Role: docker +#### Purpose +Install Docker Engine, enable and start Docker service, add a user to the docker group, and install python3-docker for Ansible Docker modules. + +#### Key variables and defaults +File: roles/docker/defaults/main.yml +- docker_user: user to be added into docker group + +#### Tasks +File: roles/docker/tasks/main.yml +- Add Docker GPG key +- Add Docker APT repository +- Install Docker packages +- Ensure service enabled and running +- Add user to docker group +- Install python3-docker + +#### Handlers +File: roles/docker/handlers/main.yml +- restart docker + +#### Dependencies +Recommended to run after common (baseline packages and system state), but no hard-coded dependency is required. + +### 2.3 Role: app_deploy +#### Purpose +Authenticate to Docker Hub (securely via Ansible Vault), pull a container image, recreate the container with the desired configuration, wait for the service to be reachable, and verify the health endpoint. + +#### Key variables and defaults +File: roles/app_deploy/defaults/main.yml +- app_port +- app_restart_policy +- app_env (environment variables) + +#### Tasks +File: roles/app_deploy/tasks/main.yml +- Docker Hub login (no_log enabled) +- Pull image +- Stop/remove old container if present +- Run new container with port mapping and restart policy +- Wait for port +- Health check via HTTP + +#### Handlers +File: roles/app_deploy/handlers/main.yml +- restart app + +#### Dependencies +Depends on Docker being installed and running on the target host (so it should be executed after the docker role). + +## Idempotency Demonstration +![](./screenshots/first_provision.png) +![](./screenshots/second_provision.png) +### What changed on the first run and why? +- common : Install common packages was changed because some baseline packages were not installed yet, so Ansible installed them to reach the desired state. +- docker : Add Docker GPG key was changed because the Docker repository signing key was not present on the VM and had to be added. +- docker : Add Docker repository was changed because the Docker APT repository entry did not exist, so Ansible created it. +- docker : Install Docker packages was changed because Docker Engine and related packages were missing and were installed during the first run. +- docker : Add user to docker group and docker : Install python docker SDK for Ansible modules were changed because the user needed group membership for non-root Docker usage and the python3-docker package was required for Ansible Docker modules. + +### What did not change on the second run and why? +- common : Install common packages was ok because the required packages were already installed from the first run. +- docker : Add Docker GPG key and docker : Add Docker repository were ok because the key and repository were already present and correctly configured. +- docker : Install Docker packages was ok because Docker packages were already installed at the required state. +- docker : Ensure Docker service is enabled and running remained ok because the service was already enabled and started. +- docker : Add user to docker group and docker : Install python docker SDK for Ansible modules were ok because the user group membership and the python3-docker package were already in place. + +### Why the roles are idempotent +The roles are idempotent because they rely on stateful Ansible modules (packages, repositories, services, users) that converge the server to a declared target state and report changed only when the current state differs from that desired state. +Additionally, the Docker restart handler ran on the first run because it was notified by tasks that changed, and handlers are designed to run after tasks complete and only when notified by changes, which avoids unnecessary restarts on subsequent runs. + +## Ansible Vault Usage +Sensitive data (Docker Hub credentials) is stored in an encrypted variable file using Ansible Vault, so secrets can be kept out of plaintext while still being usable by automation. +This allows storing encrypted files in version control without exposing credentials. + +### Vault password management +Strategy used: +--ask-vault-pass (interactive prompt) + +### Example of encrypted file +```text +$ANSIBLE_VAULT;1.1;AES256 +65636336326336346437643335383935623035393366396334336634396236666130356662333237 +3031343362643435313537353564643965623735313039300a326537353861653838373432323136 +32353330303665373565313738323033373538646633366530386536393739343236346564626232 +3131343233386263380a323937333433326366303735336539656435373038666530613836343534 +64306464633338653931343665613538616432666165396537333331323765343664366331373735 +61313863386335666535396164623164643061633164386133326465653136373965626136363463 +34363836396637396435376533356630663332333433636435383733343832663963636565363864 +33303163393066393435363563653863386463663835386230363238616430333432343030383532 +63396466336135306565616163666633633239303536663937613864386537646362643233376437 +35623739366463303537393065643936666631653739636663636138333563663163396263356561 +63653237383935303637336439396131366338323664613131316166396131306330653930653366 +32633262623939383637323135313665633330346436316631663037613635326563323333633637 +36346139646366383566383930333630366663333930376662613761386433386661373833666466 +34633136653035393334643933393336616137396531613537613664393366643030343930323334 +34373461323439343034366231636363396161663834666331336231353233643630396631626139 +66343238663361666161323162313038663165303932323238656132613636616538333761323434 +3365 +``` + +### Why Ansible Vault is important +Ansible Vault is important because it encrypts secrets (passwords/tokens/keys) and supports decrypting them only at runtime when the correct vault secret is provided. + +## Deployment Verification +![](./screenshots/deploy.png) +![](./screenshots/docker_ps.png) +![](./screenshots/curl.png) + +## Key Decisions +### Why use roles instead of plain playbooks? +Roles provide a standardized way to package tasks, defaults, handlers, and templates, which improves readability and long-term maintainability. +They also keep playbooks short and focused on orchestration rather than implementation details. + +### How do roles improve reusability? +A role can be applied to multiple hosts and projects by overriding variables, without duplicating task logic. +This enables consistent provisioning patterns across environments. + +### What makes a task idempotent? +A task is idempotent when running it multiple times results in the same final state and the second run produces no changes if the desired state is already met. + +### How do handlers improve efficiency? +Handlers are executed only when changes occur and are typically run once per play, even if multiple tasks notify them. +This avoids repeated service restarts during a single play execution. + +### Why is Ansible Vault necessary? +Vault is necessary to encrypt sensitive information so that secrets are not stored in plaintext and can be safely committed to a repository while remaining usable during automation. \ No newline at end of file diff --git a/ansible/docs/screenshots/curl.png b/ansible/docs/screenshots/curl.png new file mode 100644 index 0000000000..9e15139a6c Binary files /dev/null and b/ansible/docs/screenshots/curl.png differ diff --git a/ansible/docs/screenshots/deploy.png b/ansible/docs/screenshots/deploy.png new file mode 100644 index 0000000000..4670eec322 Binary files /dev/null and b/ansible/docs/screenshots/deploy.png differ diff --git a/ansible/docs/screenshots/docker_ps.png b/ansible/docs/screenshots/docker_ps.png new file mode 100644 index 0000000000..7e7b277cb1 Binary files /dev/null and b/ansible/docs/screenshots/docker_ps.png differ diff --git a/ansible/docs/screenshots/first_provision.png b/ansible/docs/screenshots/first_provision.png new file mode 100644 index 0000000000..9a845fc7a7 Binary files /dev/null and b/ansible/docs/screenshots/first_provision.png differ diff --git a/ansible/docs/screenshots/second_provision.png b/ansible/docs/screenshots/second_provision.png new file mode 100644 index 0000000000..dca5b7826a Binary files /dev/null and b/ansible/docs/screenshots/second_provision.png differ diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml new file mode 100644 index 0000000000..904248bda3 --- /dev/null +++ b/ansible/group_vars/all.yml @@ -0,0 +1,18 @@ +$ANSIBLE_VAULT;1.1;AES256 +65636336326336346437643335383935623035393366396334336634396236666130356662333237 +3031343362643435313537353564643965623735313039300a326537353861653838373432323136 +32353330303665373565313738323033373538646633366530386536393739343236346564626232 +3131343233386263380a323937333433326366303735336539656435373038666530613836343534 +64306464633338653931343665613538616432666165396537333331323765343664366331373735 +61313863386335666535396164623164643061633164386133326465653136373965626136363463 +34363836396637396435376533356630663332333433636435383733343832663963636565363864 +33303163393066393435363563653863386463663835386230363238616430333432343030383532 +63396466336135306565616163666633633239303536663937613864386537646362643233376437 +35623739366463303537393065643936666631653739636663636138333563663163396263356561 +63653237383935303637336439396131366338323664613131316166396131306330653930653366 +32633262623939383637323135313665633330346436316631663037613635326563323333633637 +36346139646366383566383930333630366663333930376662613761386433386661373833666466 +34633136653035393334643933393336616137396531613537613664393366643030343930323334 +34373461323439343034366231636363396161663834666331336231353233643630396631626139 +66343238663361666161323162313038663165303932323238656132613636616538333761323434 +3365 diff --git a/ansible/inventory/hosts.ini b/ansible/inventory/hosts.ini new file mode 100644 index 0000000000..beba42b553 --- /dev/null +++ b/ansible/inventory/hosts.ini @@ -0,0 +1,5 @@ +[webservers] +yc-vm-1 ansible_host=93.77.184.209 ansible_user=ubuntu ansible_ssh_private_key_file=~/.ssh/yc_lab + +[webservers:vars] +ansible_python_interpreter=/usr/bin/python3 diff --git a/ansible/playbooks/deploy.yml b/ansible/playbooks/deploy.yml new file mode 100644 index 0000000000..4f7b145aef --- /dev/null +++ b/ansible/playbooks/deploy.yml @@ -0,0 +1,8 @@ +--- +- name: Deploy application + hosts: webservers + vars_files: + - ../group_vars/all.yml + become: yes + roles: + - app_deploy diff --git a/ansible/playbooks/provision.yml b/ansible/playbooks/provision.yml new file mode 100644 index 0000000000..6ba43f9532 --- /dev/null +++ b/ansible/playbooks/provision.yml @@ -0,0 +1,7 @@ +--- +- name: Provision web servers + hosts: webservers + become: yes + roles: + - common + - docker diff --git a/ansible/playbooks/site.yml b/ansible/playbooks/site.yml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ansible/roles/app_deploy/defaults/main.yml b/ansible/roles/app_deploy/defaults/main.yml new file mode 100644 index 0000000000..33d51225da --- /dev/null +++ b/ansible/roles/app_deploy/defaults/main.yml @@ -0,0 +1,3 @@ +--- +app_restart_policy: unless-stopped +app_env: {} diff --git a/ansible/roles/app_deploy/handlers/main.yml b/ansible/roles/app_deploy/handlers/main.yml new file mode 100644 index 0000000000..cab887e7f2 --- /dev/null +++ b/ansible/roles/app_deploy/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: restart app + community.docker.docker_container: + name: "{{ app_container_name }}" + state: started + restart: true diff --git a/ansible/roles/app_deploy/tasks/main.yml b/ansible/roles/app_deploy/tasks/main.yml new file mode 100644 index 0000000000..34e474923b --- /dev/null +++ b/ansible/roles/app_deploy/tasks/main.yml @@ -0,0 +1,45 @@ +--- +- name: Login to Docker Hub + community.docker.docker_login: + username: "{{ dockerhub_username }}" + password: "{{ dockerhub_password }}" + no_log: true + # docker_login логинит в registry и сохраняет креды в docker config/credential store [web:64] + +- name: Pull image + community.docker.docker_image: + name: "{{ docker_image }}" + tag: "{{ docker_image_tag }}" + source: pull + +- name: Stop old container (if exists) + community.docker.docker_container: + name: "{{ app_container_name }}" + state: stopped + ignore_errors: true + +- name: Remove old container (if exists) + community.docker.docker_container: + name: "{{ app_container_name }}" + state: absent + +- name: Run container + community.docker.docker_container: + name: "{{ app_container_name }}" + image: "{{ docker_image }}:{{ docker_image_tag }}" + state: started + restart_policy: "{{ app_restart_policy }}" + ports: + - "{{ app_port }}:{{ app_port }}" + env: "{{ app_env }}" + notify: restart app + +- name: Wait for app port + ansible.builtin.wait_for: + port: "{{ app_port }}" + timeout: 60 + +- name: Check /health endpoint + ansible.builtin.uri: + url: "http://127.0.0.1:{{ app_port }}/health" + status_code: 200 diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml new file mode 100644 index 0000000000..2282a109a1 --- /dev/null +++ b/ansible/roles/common/defaults/main.yml @@ -0,0 +1,7 @@ +--- +common_packages: + - python3-pip + - curl + - git + - vim + - htop diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml new file mode 100644 index 0000000000..13e13e6a07 --- /dev/null +++ b/ansible/roles/common/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: Update apt cache + ansible.builtin.apt: + update_cache: yes + cache_valid_time: 3600 + +- name: Install common packages + ansible.builtin.apt: + name: "{{ common_packages }}" + state: present diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml new file mode 100644 index 0000000000..e64d3b7e66 --- /dev/null +++ b/ansible/roles/docker/defaults/main.yml @@ -0,0 +1,2 @@ +--- +docker_user: ubuntu diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml new file mode 100644 index 0000000000..1a5058da5e --- /dev/null +++ b/ansible/roles/docker/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart docker + ansible.builtin.service: + name: docker + state: restarted diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml new file mode 100644 index 0000000000..3ee751e586 --- /dev/null +++ b/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,47 @@ +--- +- name: Install dependencies for apt over HTTPS + ansible.builtin.apt: + name: + - ca-certificates + - gnupg + - lsb-release + state: present + update_cache: yes + +- name: Add Docker GPG key + ansible.builtin.apt_key: + url: https://download.docker.com/linux/ubuntu/gpg + state: present + +- name: Add Docker repository + ansible.builtin.apt_repository: + repo: "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable" + state: present + notify: restart docker + +- name: Install Docker packages + ansible.builtin.apt: + name: + - docker-ce + - docker-ce-cli + - containerd.io + state: present + update_cache: yes + notify: restart docker + +- name: Ensure Docker service is enabled and running + ansible.builtin.service: + name: docker + enabled: yes + state: started + +- name: Add user to docker group + ansible.builtin.user: + name: "{{ docker_user }}" + groups: docker + append: yes + +- name: Install python docker SDK for Ansible modules + ansible.builtin.apt: + name: python3-docker + state: present diff --git a/app_java/.dockerignore b/app_java/.dockerignore new file mode 100644 index 0000000000..e2e18e7a79 --- /dev/null +++ b/app_java/.dockerignore @@ -0,0 +1,19 @@ +# VCS +.git/ +.gitignore + +# IDE +.idea/ +.vscode/ + +# Gradle / build outputs +.gradle/ +build/ +out/ + +# OS junk +.DS_Store + +# docs +docs/ +*.md \ No newline at end of file diff --git a/app_java/.gitignore b/app_java/.gitignore new file mode 100644 index 0000000000..c2065bc262 --- /dev/null +++ b/app_java/.gitignore @@ -0,0 +1,37 @@ +HELP.md +.gradle +build/ +!gradle/wrapper/gradle-wrapper.jar +!**/src/main/**/build/ +!**/src/test/**/build/ + +### STS ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache +bin/ +!**/src/main/**/bin/ +!**/src/test/**/bin/ + +### IntelliJ IDEA ### +.idea +*.iws +*.iml +*.ipr +out/ +!**/src/main/**/out/ +!**/src/test/**/out/ + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ + +### VS Code ### +.vscode/ diff --git a/app_java/Dockerfile b/app_java/Dockerfile new file mode 100644 index 0000000000..cbd9e57a72 --- /dev/null +++ b/app_java/Dockerfile @@ -0,0 +1,23 @@ +FROM gradle:8.13-jdk21 AS builder + +WORKDIR /home/gradle/project + +COPY build.gradle.kts settings.gradle.kts gradle.properties* ./ +COPY gradle ./gradle +COPY gradlew ./ + +RUN ./gradlew --no-daemon dependencies || true + +COPY src ./src + +RUN ./gradlew --no-daemon clean bootJar + +FROM gcr.io/distroless/java21-debian12:nonroot + +WORKDIR /app + +COPY --from=builder /home/gradle/project/build/libs/*.jar /app/app.jar + +EXPOSE 8080 + +ENTRYPOINT ["java", "-jar", "/app/app.jar"] diff --git a/app_java/README.md b/app_java/README.md new file mode 100644 index 0000000000..7953d1555e --- /dev/null +++ b/app_java/README.md @@ -0,0 +1,67 @@ +# DevOps Info Service + +## Overview +A simple Python web service that returns service info, system information, runtime uptime, and request details. + +## Prerequisites +- Java 21 +- Gradle (or Gradle Wrapper `./gradlew`) + +## Installation +```bash +./gradlew clean build +``` + +## Running the Application +Run via Gradle +```bash +./gradlew bootRun +``` +Run as a jar +```bash +./gradlew build +java -jar build/libs/*.jar +``` +Custom config: +```bash +PORT=8080 ./gradlew bootRun +# or +HOST=127.0.0.1 PORT=3000 ./gradlew bootRun +``` + +## API Endpoints +- GET `/` — Service and system information +- GET `/health` — Health check + +## Configuration +| Variable | Default | Description | +| ------------------- | -------------------------- | ------------------- | +| HOST | 0.0.0.0 | Bind address | +| PORT | 5000 | Listen port | +| SERVICE_NAME | devops-info-service | Service name | +| SERVICE_VERSION | 1.0.0 | Service version | +| SERVICE_DESCRIPTION | DevOps course info service | Service description | + +## Docker +### Build image (local) +From the `app_java/` directory, build an image using the current folder as the build context: +```bash +docker build -t : . +``` + +### Run container +Run the container with port publishing so the service is reachable from the host: +```bash +docker run --rm -p : gghost1/devops-lab-app-java:latest +``` +Pass configuration via environment variables (the app reads HOST, PORT, DEBUG): +```bash +docker run --rm -e PORT= -p : gghost1/devops-lab-app-java:latest +``` +For local built image replace `gghost1/devops-lab-app-java:latest` on your `:`. + +### Pull from Docker Hub +Pull an already published image from Docker Hub: +```bash +docker pull gghost1/devops-lab-app-java:latest +``` \ No newline at end of file diff --git a/app_java/build.gradle.kts b/app_java/build.gradle.kts new file mode 100644 index 0000000000..60d8ad534e --- /dev/null +++ b/app_java/build.gradle.kts @@ -0,0 +1,29 @@ +plugins { + java + id("org.springframework.boot") version "3.5.10" + id("io.spring.dependency-management") version "1.1.7" +} + +group = "devops.core" +version = "0.0.1-SNAPSHOT" +description = "app_java" + +java { + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } +} + +repositories { + mavenCentral() +} + +dependencies { + implementation("org.springframework.boot:spring-boot-starter-web") + testImplementation("org.springframework.boot:spring-boot-starter-test") + testRuntimeOnly("org.junit.platform:junit-platform-launcher") +} + +tasks.withType { + useJUnitPlatform() +} diff --git a/app_java/docs/JAVA.md b/app_java/docs/JAVA.md new file mode 100644 index 0000000000..c5521510ad --- /dev/null +++ b/app_java/docs/JAVA.md @@ -0,0 +1,10 @@ +# Language Justification (Java 21) + +## Why Java for this service +I implemented this version in Java because it provides strong static typing, mature tooling, and a large ecosystem for building reliable backend services. Java is also commonly used in enterprise environments, so the same patterns used in this lab are directly applicable to real production systems. + +## Why Java 21 + Gradle +This project uses Java 21 and Gradle to ensure a modern language level and a reproducible build process. Gradle also integrates well with CI pipelines (build/test/package), which aligns with DevOps practices. + +## Conclusion +Java 21 + Spring Boot is a good match for an API-focused lab because it encourages clean architecture (controllers/services/config), predictable runtime behavior, and production-like conventions. diff --git a/app_java/docs/LAB01.md b/app_java/docs/LAB01.md new file mode 100644 index 0000000000..a881c311e0 --- /dev/null +++ b/app_java/docs/LAB01.md @@ -0,0 +1,183 @@ +# Java Web Application (Spring Boot) + +## 1) Framework Selection + +### Chosen framework: Spring Boot (Spring Web) +I chose Spring Boot with Spring Web because it provides a clean and conventional way to build REST APIs in Java with clear separation of concerns: controller layer for HTTP, service layer for business logic, and configuration via application settings. + +### Comparison table +| Framework | Pros | Cons | Fit for this lab | +|---|---|---|---| +| Spring Boot (chosen) | Standard approach for REST services, clean structure | More code than minimal frameworks | Best for Java version | +| Jakarta EE (plain) | Standard APIs | More manual wiring/config | Too much setup | +| Micronaut / Quarkus | Fast, modern | Extra learning curve for this course | Not selected | + +--- + +## 2) Best Practices Applied (with code examples) + +### 2.1 Clean code organization (Controller + Service + DTO) +HTTP handlers are minimal and delegate logic to a service class. + +```java +@RestController +public class InfoController { + private final InfoService infoService; + + public InfoController(InfoService infoService) { + this.infoService = infoService; + } + + @GetMapping("/") + public InfoResponse index(HttpServletRequest request) { + return infoService.buildInfoResponse(request); + } + + @GetMapping("/health") + public HealthResponse health() { + return infoService.buildHealthResponse(); + } +} +``` +Core logic (system info, uptime, request info) is isolated in the service: +```java +@Service +public class InfoService { + public InfoResponse buildInfoResponse(HttpServletRequest request) { + // uptime + system info + request info + endpoints + } +} +``` +### 2.2 External configuration (env-driven) +The app is configurable via environment variables using application.yml placeholders: +```text +server: + address: ${HOST:0.0.0.0} + port: ${PORT:8080} + +app: + service: + name: ${SERVICE_NAME:devops-info-service} + version: ${SERVICE_VERSION:1.0.0} + description: ${SERVICE_DESCRIPTION:DevOps course info service} +``` +The service metadata is injected using a typed configuration record: +```java +@ConfigurationProperties(prefix = "app.service") +public record ServiceInfoProperties(String name, String version, String description) {} +``` + +### 2.3 Logging +Requests are logged in the controller to improve observability: + +```java +log.info("Request {} {} from {}", request.getMethod(), request.getRequestURI(), request.getRemoteAddr()); +``` +### 2.4 Error handling (consistent JSON errors) +A global exception handler returns a predictable JSON error payload: + +```java +@RestControllerAdvice +public class GlobalExceptionHandler { + @ExceptionHandler(Exception.class) + public ResponseEntity handleAny(Exception ex) { + ApiError body = new ApiError("Internal Server Error", "An unexpected error occurred", Instant.now().toString()); + return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(body); + } +} +``` +### 2.5 Proxy-aware client IP (best-effort) +If the app is behind a reverse proxy, the client IP may be provided via X-Forwarded-For: + +```java +String xff = Optional.ofNullable(request.getHeader("X-Forwarded-For")) + .map(h -> h.split(",").trim()) + .orElse(""); +String clientIp = !xff.isBlank() ? xff : request.getRemoteAddr(); +``` +## 3) API Documentation +### 3.1 Endpoints +- GET `/` — service + system + runtime + request information + ```json + { + "service": { + "name": "devops-info-service", + "version": "1.0.0", + "description": "DevOps course info service", + "framework": "Spring Boot" + }, + "system": { + "hostname": "Mac.ufanet.ru", + "platform": "Mac OS X", + "platformVersion": "15.6.1", + "architecture": "aarch64", + "cpuCount": 10, + "javaVersion": "21.0.8" + }, + "runtime": { + "uptimeSeconds": 62, + "uptimeHuman": "0 hours, 1 minute", + "currentTime": "2026-01-26T19:29:03.793503Z", + "timezone": "UTC" + }, + "request": { + "clientIp": "0:0:0:0:0:0:0:1", + "userAgent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/143.0.0.0 Safari/537.36", + "method": "GET", + "path": "/" + }, + "endpoints": [ + { + "path": "/", + "method": "GET", + "description": "Service information" + }, + { + "path": "/health", + "method": "GET", + "description": "Health check" + } + ] + } + ``` +- GET `/health` — health check, timestamp, uptime + ```json + { + "status": "healthy", + "timestamp": "2026-01-26T19:28:31.110960Z", + "uptimeSeconds": 30 + } + ``` + +### 3.2 Example requests +Run: +```bash +./gradlew bootRun +``` +Custom configuration: +```bash +HOST=127.0.0.1 PORT=3000 ./gradlew bootRun +SERVICE_NAME=devops-info-service SERVICE_VERSION=1.0.0 SERVICE_DESCRIPTION="DevOps course info service" ./gradlew bootRun +``` +Test: +```bash +curl -i http://127.0.0.1:8080/ +curl -i http://127.0.0.1:8080/health +curl -s http://127.0.0.1:8080/ | python -m json.tool +``` + +# 4) Testing Evidence (Screenshots) +Screenshots are stored in docs/screenshots/: +- 01-main-endpoint.png — GET `/` full JSON response. + ![](screenshots/01-main-endpoint.png) +- 02-health-check.png — GET `/health` response. + ![](screenshots/02-health-check.png) +- 03-formatted-output.png — pretty-printed JSON output (python -m json.tool). + ![](screenshots/03-formatted-output.png) +- 04-app-run.png — compilation and execution. + ![](screenshots/04-app-run.png) + +## 5) Challenges & Solutions +- Uptime formatting: implemented using JVM uptime (RuntimeMXBean.getUptime()) converted to seconds and a human-readable string. +- Hostname retrieval: handled potential lookup issues by using a fallback value if hostname resolution fails. +- Client IP behind proxy: used X-Forwarded-For as a best-effort source with fallback to getRemoteAddr(). \ No newline at end of file diff --git a/app_java/docs/LAB02.md b/app_java/docs/LAB02.md new file mode 100644 index 0000000000..f36aca6172 --- /dev/null +++ b/app_java/docs/LAB02.md @@ -0,0 +1,104 @@ +## Multi-Stage Build (Java Spring Boot) + +### Strategy (Why multi-stage) +I used a multi-stage Docker build because Docker allows multiple `FROM` stages and lets me copy only the build artifacts into the final runtime image using `COPY --from=...`, leaving behind the compiler/SDK and intermediate files. +This is important for compiled (or build-tool heavy) languages like Java because a builder image with Gradle + JDK is large, while the runtime image only needs a JRE-compatible environment and the final JAR. + +### Stage 1 — Builder +- Base: `gradle:8.13-jdk21` +- Purpose: compile and package the Spring Boot app (`bootJar`), producing a JAR in `build/libs/`. + +Key caching decision: +- I copy `build.gradle.kts`, `settings.gradle.kts`, `gradle/`, and `gradlew` before copying `src/` so that dependency-related layers can be reused when I only change application code. + +### Stage 2 — Runtime +- Base: `gcr.io/distroless/java21-debian12:nonroot` +- Purpose: run the compiled JAR with the Java runtime, without including build tools. + +Why distroless: +Distroless images contain only the application and its runtime dependencies and do not include a shell or package manager, which reduces what is available inside the container at runtime. +I used the `:nonroot` variant to run the container as a non-root user by default. + +### Size comparison (builder vs final) +Commands I used: +```bash +# Build only the builder stage (for size comparison) +docker build --target builder -t app_java:builder . + +# Build the final runtime image +docker build -t app_java:runtime . + +# Push to the hub +docker tag app_java:builder gghost1/app_java:builder +docker push gghost1/app_java:builder + +docker tag app_java:runtime gghost1/app_java:runtime +docker push gghost1/app_java:runtime +``` +**Results**: +- Builder image size: 624.97 MB +- Final runtime image size: 86.39 MB + +Analysis: +The builder image is much larger because it contains Gradle and the full JDK toolchain, while the runtime image includes only what is needed to run the JAR plus the JAR itself. + +Trade-offs and debugging +A trade-off of distroless is that it does not contain a shell, so interactive debugging inside the container is harder; distroless provides debug variants for debugging scenarios. +For production, I prefer the non-debug distroless runtime because it is minimal and reduces unnecessary tooling in the final image. + +Docker Hub repository URL: https://hub.docker.com/r/gghost1/devops-lab-app-java + +### Multi-stage builds matter for compiled languages +For compiled / build-tool-heavy stacks like Java (Gradle + JDK), a single-stage image would often ship the entire build toolchain (Gradle, compiler, caches) into production, which is unnecessarily large and increases what exists inside the container at runtime. +Multi-stage builds solve this by separating “build environment” and “runtime environment,” producing a smaller final image that contains only what is needed to run the already-built artifact. +This also improves security in practice because fewer tools and packages exist in the runtime image, which reduces the potential attack surface. + +### Build output: +```terminaloutput +antipovd@Mac app_java % docker build -t devops-lab-app-java:1.0.0 . +[+] Building 70.5s (18/18) FINISHED docker:desktop-linux + => [internal] load build definition from Dockerfile 0.0s + => => transferring dockerfile: 502B 0.0s + => [internal] load metadata for gcr.io/distroless/java21-debian12:nonroot 2.0s + => [internal] load metadata for docker.io/library/gradle:8.13-jdk21 2.7s + => [auth] library/gradle:pull token for registry-1.docker.io 0.0s + => [internal] load .dockerignore 0.0s + => => transferring context: 174B 0.0s + => [builder 1/8] FROM docker.io/library/gradle:8.13-jdk21@sha256:67b8c4bfd2b064e58a7307e2da1fc3881bc03ecc7a57cf61d8b570a02ebfaea2 19.6s + => => resolve docker.io/library/gradle:8.13-jdk21@sha256:67b8c4bfd2b064e58a7307e2da1fc3881bc03ecc7a57cf61d8b570a02ebfaea2 0.0s + => => sha256:71a8b7fad68fc85eaee6cc7676712870dcfa1c968bb3763082b563db6c379028 59.53kB / 59.53kB 0.6s + ... (repeating) 8.4s + => => sha256:49b96e96358d7aed127d4f4cd2294d77d497c683123bbad89fa80a83d8ef64aa 28.85MB / 28.85MB 3.3s + => => extracting sha256:49b96e96358d7aed127d4f4cd2294d77d497c683123bbad89fa80a83d8ef64aa 0.3s + ... 0.5s + => => extracting sha256:71a8b7fad68fc85eaee6cc7676712870dcfa1c968bb3763082b563db6c379028 0.0s + => [internal] load build context 0.0s + => => transferring context: 64.49kB 0.0s + => [stage-1 1/3] FROM gcr.io/distroless/java21-debian12:nonroot@sha256:a801e7ccb0606399ae950b0010b03261d4cee3d9866aa2930de6e0dcb4a5b0f5 11.5s + => => resolve gcr.io/distroless/java21-debian12:nonroot@sha256:a801e7ccb0606399ae950b0010b03261d4cee3d9866aa2930de6e0dcb4a5b0f5 0.0s + => => sha256:7102060171e5d2a2c0f02f523912a62778fbab1d7abd7fee43a97ee39f79a6c9 59.45MB / 59.45MB 10.8s + ... 1.4s + => => sha256:d1c559a043f52900e1caad98278530ca55be2708a21a1d486f51109a79a5f4e5 104.22kB / 104.22kB 0.9s + => => extracting sha256:d1c559a043f52900e1caad98278530ca55be2708a21a1d486f51109a79a5f4e5 0.0s + ... 0.0s + => => extracting sha256:7102060171e5d2a2c0f02f523912a62778fbab1d7abd7fee43a97ee39f79a6c9 0.5s + => [stage-1 2/3] WORKDIR /app 0.3s + => [builder 2/8] WORKDIR /home/gradle/project 0.4s + => [builder 3/8] COPY build.gradle.kts settings.gradle.kts gradle.properties* ./ 0.0s + => [builder 4/8] COPY gradle ./gradle 0.0s + => [builder 5/8] COPY gradlew ./ 0.0s + => [builder 6/8] RUN ./gradlew --no-daemon dependencies || true 39.8s + => [builder 7/8] COPY src ./src 0.0s + => [builder 8/8] RUN ./gradlew --no-daemon clean bootJar 7.2s + => [stage-1 3/3] COPY --from=builder /home/gradle/project/build/libs/*.jar /app/app.jar 0.0s + => exporting to image 0.5s + => => exporting layers 0.4s + => => exporting manifest sha256:70afed2532c3b2f772b1429130e008c37374ab6037eec196c840e8321041073f 0.0s + => => exporting config sha256:bfa435964c2136301877bc43fbe28d55df6e7a5a3d001f60009853ae72773d8c 0.0s + => => exporting attestation manifest sha256:820c286b3949c1dd3b9c485ffbe99fad7500c6ea6fa766d599dc6437b640435a 0.0s + => => exporting manifest list sha256:d62d4f81f51b75a540746f277bae4d00967ab6461de78d9388e62be632938891 0.0s + => => naming to docker.io/library/devops-lab-app-java:1.0.0 0.0s + => => unpacking to docker.io/library/devops-lab-app-java:1.0.0 0.1s + +View build details: docker-desktop://dashboard/build/desktop-linux/desktop-linux/aj8d7f9fo4hz3qliib5xv7gx1 +``` \ No newline at end of file diff --git a/app_java/docs/screenshots/01-main-endpoint.png b/app_java/docs/screenshots/01-main-endpoint.png new file mode 100644 index 0000000000..27653c024f Binary files /dev/null and b/app_java/docs/screenshots/01-main-endpoint.png differ diff --git a/app_java/docs/screenshots/02-health-check.png b/app_java/docs/screenshots/02-health-check.png new file mode 100644 index 0000000000..3f6ceb4e4a Binary files /dev/null and b/app_java/docs/screenshots/02-health-check.png differ diff --git a/app_java/docs/screenshots/03-formatted-output.png b/app_java/docs/screenshots/03-formatted-output.png new file mode 100644 index 0000000000..36bab634fd Binary files /dev/null and b/app_java/docs/screenshots/03-formatted-output.png differ diff --git a/app_java/docs/screenshots/04-app-run.png b/app_java/docs/screenshots/04-app-run.png new file mode 100644 index 0000000000..e1b250cdab Binary files /dev/null and b/app_java/docs/screenshots/04-app-run.png differ diff --git a/app_java/gradle/wrapper/gradle-wrapper.jar b/app_java/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000000..1b33c55baa Binary files /dev/null and b/app_java/gradle/wrapper/gradle-wrapper.jar differ diff --git a/app_java/gradle/wrapper/gradle-wrapper.properties b/app_java/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000000..d4081da476 --- /dev/null +++ b/app_java/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,7 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-8.14.3-bin.zip +networkTimeout=10000 +validateDistributionUrl=true +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/app_java/gradlew b/app_java/gradlew new file mode 100755 index 0000000000..23d15a9367 --- /dev/null +++ b/app_java/gradlew @@ -0,0 +1,251 @@ +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s\n' "$PWD" ) || exit + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH="\\\"\\\"" + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + -jar "$APP_HOME/gradle/wrapper/gradle-wrapper.jar" \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/app_java/gradlew.bat b/app_java/gradlew.bat new file mode 100644 index 0000000000..db3a6ac207 --- /dev/null +++ b/app_java/gradlew.bat @@ -0,0 +1,94 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem +@rem SPDX-License-Identifier: Apache-2.0 +@rem + +@if "%DEBUG%"=="" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if %ERRORLEVEL% equ 0 goto execute + +echo. 1>&2 +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. 1>&2 +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH= + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" -jar "%APP_HOME%\gradle\wrapper\gradle-wrapper.jar" %* + +:end +@rem End local scope for the variables with windows NT shell +if %ERRORLEVEL% equ 0 goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/app_java/settings.gradle.kts b/app_java/settings.gradle.kts new file mode 100644 index 0000000000..1d14351e95 --- /dev/null +++ b/app_java/settings.gradle.kts @@ -0,0 +1 @@ +rootProject.name = "app_java" diff --git a/app_java/src/main/java/devops/core/app_java/AppJavaApplication.java b/app_java/src/main/java/devops/core/app_java/AppJavaApplication.java new file mode 100644 index 0000000000..d2344fb018 --- /dev/null +++ b/app_java/src/main/java/devops/core/app_java/AppJavaApplication.java @@ -0,0 +1,16 @@ +package devops.core.app_java; + +import devops.core.app_java.configuration.ServiceInfoProperties; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.context.properties.EnableConfigurationProperties; + +@SpringBootApplication +@EnableConfigurationProperties(ServiceInfoProperties.class) +public class AppJavaApplication { + + public static void main(String[] args) { + SpringApplication.run(AppJavaApplication.class, args); + } + +} diff --git a/app_java/src/main/java/devops/core/app_java/configuration/ServiceInfoProperties.java b/app_java/src/main/java/devops/core/app_java/configuration/ServiceInfoProperties.java new file mode 100644 index 0000000000..301f845e8c --- /dev/null +++ b/app_java/src/main/java/devops/core/app_java/configuration/ServiceInfoProperties.java @@ -0,0 +1,10 @@ +package devops.core.app_java.configuration; + +import org.springframework.boot.context.properties.ConfigurationProperties; + +@ConfigurationProperties(prefix = "app.service") +public record ServiceInfoProperties( + String name, + String version, + String description +) {} diff --git a/app_java/src/main/java/devops/core/app_java/controller/InfoController.java b/app_java/src/main/java/devops/core/app_java/controller/InfoController.java new file mode 100644 index 0000000000..d97483c77d --- /dev/null +++ b/app_java/src/main/java/devops/core/app_java/controller/InfoController.java @@ -0,0 +1,33 @@ +package devops.core.app_java.controller; + +import devops.core.app_java.dto.HealthResponse; +import devops.core.app_java.dto.InfoResponse; +import devops.core.app_java.service.InfoService; +import jakarta.servlet.http.HttpServletRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RestController; + +@RestController +public class InfoController { + + private static final Logger log = LoggerFactory.getLogger(InfoController.class); + + private final InfoService infoService; + + public InfoController(InfoService infoService) { + this.infoService = infoService; + } + + @GetMapping("/") + public InfoResponse index(HttpServletRequest request) { + log.info("Request {} {} from {}", request.getMethod(), request.getRequestURI(), request.getRemoteAddr()); + return infoService.buildInfoResponse(request); + } + + @GetMapping("/health") + public HealthResponse health() { + return infoService.buildHealthResponse(); + } +} diff --git a/app_java/src/main/java/devops/core/app_java/dto/EndpointDto.java b/app_java/src/main/java/devops/core/app_java/dto/EndpointDto.java new file mode 100644 index 0000000000..50ed923004 --- /dev/null +++ b/app_java/src/main/java/devops/core/app_java/dto/EndpointDto.java @@ -0,0 +1,7 @@ +package devops.core.app_java.dto; + +public record EndpointDto( + String path, + String method, + String description +) {} diff --git a/app_java/src/main/java/devops/core/app_java/dto/HealthResponse.java b/app_java/src/main/java/devops/core/app_java/dto/HealthResponse.java new file mode 100644 index 0000000000..6975b3bda8 --- /dev/null +++ b/app_java/src/main/java/devops/core/app_java/dto/HealthResponse.java @@ -0,0 +1,7 @@ +package devops.core.app_java.dto; + +public record HealthResponse( + String status, + String timestamp, + long uptimeSeconds +) {} diff --git a/app_java/src/main/java/devops/core/app_java/dto/InfoResponse.java b/app_java/src/main/java/devops/core/app_java/dto/InfoResponse.java new file mode 100644 index 0000000000..6493d8d054 --- /dev/null +++ b/app_java/src/main/java/devops/core/app_java/dto/InfoResponse.java @@ -0,0 +1,11 @@ +package devops.core.app_java.dto; + +import java.util.List; + +public record InfoResponse( + ServiceBlock service, + SystemBlock system, + RuntimeBlock runtime, + RequestBlock request, + List endpoints +) {} diff --git a/app_java/src/main/java/devops/core/app_java/dto/RequestBlock.java b/app_java/src/main/java/devops/core/app_java/dto/RequestBlock.java new file mode 100644 index 0000000000..22e5bb1acd --- /dev/null +++ b/app_java/src/main/java/devops/core/app_java/dto/RequestBlock.java @@ -0,0 +1,8 @@ +package devops.core.app_java.dto; + +public record RequestBlock( + String clientIp, + String userAgent, + String method, + String path +) {} \ No newline at end of file diff --git a/app_java/src/main/java/devops/core/app_java/dto/RuntimeBlock.java b/app_java/src/main/java/devops/core/app_java/dto/RuntimeBlock.java new file mode 100644 index 0000000000..6ff1a06c13 --- /dev/null +++ b/app_java/src/main/java/devops/core/app_java/dto/RuntimeBlock.java @@ -0,0 +1,8 @@ +package devops.core.app_java.dto; + +public record RuntimeBlock( + long uptimeSeconds, + String uptimeHuman, + String currentTime, + String timezone +) {} \ No newline at end of file diff --git a/app_java/src/main/java/devops/core/app_java/dto/ServiceBlock.java b/app_java/src/main/java/devops/core/app_java/dto/ServiceBlock.java new file mode 100644 index 0000000000..18f3cb7689 --- /dev/null +++ b/app_java/src/main/java/devops/core/app_java/dto/ServiceBlock.java @@ -0,0 +1,8 @@ +package devops.core.app_java.dto; + +public record ServiceBlock( + String name, + String version, + String description, + String framework +) {} \ No newline at end of file diff --git a/app_java/src/main/java/devops/core/app_java/dto/SystemBlock.java b/app_java/src/main/java/devops/core/app_java/dto/SystemBlock.java new file mode 100644 index 0000000000..28ca20a7c4 --- /dev/null +++ b/app_java/src/main/java/devops/core/app_java/dto/SystemBlock.java @@ -0,0 +1,10 @@ +package devops.core.app_java.dto; + +public record SystemBlock( + String hostname, + String platform, + String platformVersion, + String architecture, + int cpuCount, + String javaVersion +) {} \ No newline at end of file diff --git a/app_java/src/main/java/devops/core/app_java/exception/GlobalExceptionHandler.java b/app_java/src/main/java/devops/core/app_java/exception/GlobalExceptionHandler.java new file mode 100644 index 0000000000..5003a0532c --- /dev/null +++ b/app_java/src/main/java/devops/core/app_java/exception/GlobalExceptionHandler.java @@ -0,0 +1,47 @@ +package devops.core.app_java.exception; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.http.HttpStatus; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.ExceptionHandler; +import org.springframework.web.bind.annotation.RestControllerAdvice; +import org.springframework.web.servlet.resource.NoResourceFoundException; + +import java.time.Instant; + +@RestControllerAdvice +public class GlobalExceptionHandler { + + private static final Logger log = LoggerFactory.getLogger(GlobalExceptionHandler.class); + + /** + * Handle `NoResourceFoundException` exception and return 404 formated result. + */ + @ExceptionHandler(NoResourceFoundException.class) + public ResponseEntity handleNotFound(Exception ex) { + log.error("Unhandled exception", ex); + ApiError body = new ApiError( + "Not found", + "No resource found", + Instant.now().toString() + ); + return ResponseEntity.status(HttpStatus.NOT_FOUND).body(body); + } + + /** + * Handle all exceptions and return formated result. + */ + @ExceptionHandler(Exception.class) + public ResponseEntity handleAny(Exception ex) { + log.error("Unhandled exception", ex); + ApiError body = new ApiError( + "Internal Server Error", + "An unexpected error occurred", + Instant.now().toString() + ); + return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(body); + } + + public record ApiError(String error, String message, String timestamp) {} +} \ No newline at end of file diff --git a/app_java/src/main/java/devops/core/app_java/service/InfoService.java b/app_java/src/main/java/devops/core/app_java/service/InfoService.java new file mode 100644 index 0000000000..6ba26ce510 --- /dev/null +++ b/app_java/src/main/java/devops/core/app_java/service/InfoService.java @@ -0,0 +1,98 @@ +package devops.core.app_java.service; + + +import devops.core.app_java.configuration.ServiceInfoProperties; +import devops.core.app_java.dto.*; +import jakarta.servlet.http.HttpServletRequest; +import org.springframework.stereotype.Service; +import java.lang.management.ManagementFactory; +import java.net.InetAddress; +import java.time.Instant; +import java.time.ZoneOffset; +import java.util.List; +import java.util.Optional; + +/** + * Creates result DTOs. + */ +@Service +public class InfoService { + + private final ServiceInfoProperties props; + + public InfoService(ServiceInfoProperties props) { + this.props = props; + } + + public InfoResponse buildInfoResponse(HttpServletRequest request) { + var uptime = uptime(); + return new InfoResponse( + new ServiceBlock(props.name(), props.version(), props.description(), "Spring Boot"), + systemInfo(), + new RuntimeBlock(uptime.seconds(), uptime.human(), isoUtcNow(), "UTC"), + requestInfo(request), + endpoints() + ); + } + + public HealthResponse buildHealthResponse() { + var uptime = uptime(); + return new HealthResponse("healthy", isoUtcNow(), uptime.seconds()); + } + + private SystemBlock systemInfo() { + try { + String hostname = InetAddress.getLocalHost().getHostName(); + String osName = System.getProperty("os.name"); + String osVersion = System.getProperty("os.version"); + String arch = System.getProperty("os.arch"); + int cpuCount = Runtime.getRuntime().availableProcessors(); + String javaVersion = System.getProperty("java.version"); + + return new SystemBlock(hostname, osName, osVersion, arch, cpuCount, javaVersion); + } catch (Exception e) { + // fallback without hostname if resolution fails + String osName = System.getProperty("os.name"); + String osVersion = System.getProperty("os.version"); + String arch = System.getProperty("os.arch"); + int cpuCount = Runtime.getRuntime().availableProcessors(); + String javaVersion = System.getProperty("java.version"); + + return new SystemBlock("unknown", osName, osVersion, arch, cpuCount, javaVersion); + } + } + + private RequestBlock requestInfo(HttpServletRequest request) { + String xff = Optional.ofNullable(request.getHeader("X-Forwarded-For")) + .map(h -> h.split(",")[0].trim()) + .orElse(""); + + String clientIp = !xff.isBlank() ? xff : request.getRemoteAddr(); + String userAgent = Optional.ofNullable(request.getHeader("User-Agent")).orElse(""); + + return new RequestBlock(clientIp, userAgent, request.getMethod(), request.getRequestURI()); + } + + private List endpoints() { + return List.of( + new EndpointDto("/", "GET", "Service information"), + new EndpointDto("/health", "GET", "Health check") + ); + } + + private String isoUtcNow() { + return Instant.now().atOffset(ZoneOffset.UTC).toString(); + } + + private Uptime uptime() { + long uptimeSeconds = ManagementFactory.getRuntimeMXBean().getUptime() / 1000; + + long hours = uptimeSeconds / 3600; + long minutes = (uptimeSeconds % 3600) / 60; + String human = hours + " hour" + (hours == 1 ? "" : "s") + ", " + minutes + " minute" + (minutes == 1 ? "" : "s"); + + return new Uptime(uptimeSeconds, human); + } + + private record Uptime(long seconds, String human) {} +} \ No newline at end of file diff --git a/app_java/src/main/resources/application.yaml b/app_java/src/main/resources/application.yaml new file mode 100644 index 0000000000..6c533ac6ad --- /dev/null +++ b/app_java/src/main/resources/application.yaml @@ -0,0 +1,9 @@ +server: + address: ${HOST:0.0.0.0} + port: ${PORT:8080} + +app: + service: + name: ${SERVICE_NAME:devops-info-service} + version: ${SERVICE_VERSION:1.0.0} + description: ${SERVICE_DESCRIPTION:DevOps course info service} \ No newline at end of file diff --git a/app_java/src/test/java/devops/core/app_java/AppJavaApplicationTests.java b/app_java/src/test/java/devops/core/app_java/AppJavaApplicationTests.java new file mode 100644 index 0000000000..fb55f72643 --- /dev/null +++ b/app_java/src/test/java/devops/core/app_java/AppJavaApplicationTests.java @@ -0,0 +1,6 @@ +package devops.core.app_java; + +/** + * Tests will be added in Lab 3. + */ +class AppJavaApplicationTests {} diff --git a/app_python/.dockerignore b/app_python/.dockerignore new file mode 100644 index 0000000000..f43e5c0fa8 --- /dev/null +++ b/app_python/.dockerignore @@ -0,0 +1,21 @@ +# Python +__pycache__/ +*.py[cod] +*.log + +# Virtualenv +venv/ +.venv/ + +# VCS / IDE +.git/ +.vscode/ +.idea/ + +# Docs / tests (не нужны для runtime) +docs/ +tests/ +*.md + +# OS +.DS_Store diff --git a/app_python/.gitignore b/app_python/.gitignore new file mode 100644 index 0000000000..ba1db0ed69 --- /dev/null +++ b/app_python/.gitignore @@ -0,0 +1,12 @@ +# Python +__pycache__/ +*.py[cod] +venv/ +*.log + +# IDE +.vscode/ +.idea/ + +# OS +.DS_Store diff --git a/app_python/Dockerfile b/app_python/Dockerfile new file mode 100644 index 0000000000..62f8c95717 --- /dev/null +++ b/app_python/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.13-slim + +WORKDIR /app + +COPY requirements.txt /app/requirements.txt +RUN pip install --no-cache-dir -r /app/requirements.txt + +RUN groupadd -g 10001 app && \ + useradd --no-log-init -m -u 10001 -g 10001 -s /usr/sbin/nologin app && \ + chown -R app:app /app + +COPY --chown=app:app app.py /app/app.py + +USER app + +EXPOSE 5000 + +CMD ["python", "app.py"] \ No newline at end of file diff --git a/app_python/README.md b/app_python/README.md new file mode 100644 index 0000000000..3795fc8a6e --- /dev/null +++ b/app_python/README.md @@ -0,0 +1,89 @@ +# DevOps Info Service + +[![python-ci](https://github.com/gghost1/DevOps-Core-Course/actions/workflows/python-ci.yml/badge.svg?branch=lab03)](https://github.com/gghost1/DevOps-Core-Course/actions/workflows/python-ci.yml) +[![codecov](https://codecov.io/gh/gghost1/DevOps-Core-Course/branch/lab03/graph/badge.svg)](https://codecov.io/gh/gghost1/DevOps-Core-Course) + +## Overview +A simple Python web service that returns service info, system information, runtime uptime, and request details. + +## Prerequisites +- Python 3.10+ (recommended: latest available in your environment) +- pip + virtualenv + +## Installation +```bash +python3 -m venv venv +source venv/bin/activate +pip3 install -r requirements.txt +``` + +## Running the Application +```bash +python3 app.py +``` +Custom config: +```bash +PORT=8080 python3 app.py +# or +HOST=127.0.0.1 PORT=3000 DEBUG=true python3 app.py +``` +Production-like run (WSGI): +```bash +gunicorn -w 2 -b 0.0.0.0:5000 app:app +``` + +## API Endpoints +- GET `/` — Service and system information +- GET `/health` — Health check + +## Configuration +| Variable | Default | Description | +| ------------------- | -------------------------- | ------------------- | +| HOST | 0.0.0.0 | Bind address | +| PORT | 5000 | Listen port | +| DEBUG | false | Flask debug mode | +| LOG_LEVEL | INFO | Logging level | +| SERVICE_NAME | devops-info-service | Service name | +| SERVICE_VERSION | 1.0.0 | Service version | +| SERVICE_DESCRIPTION | DevOps course info service | Service description | + +## Testing +Install dev dependencies: +```bash +pip3 install -r requirements.txt -r requirements-dev.txt +``` + +### Run tests: +```bash +pytest +``` + +### Lint/format check: +```bash +ruff check . +ruff format --check . +``` + +## Docker +### Build image (local) +From the `app_python/` directory, build an image using the current folder as the build context: +```bash +docker build -t : . +``` + +### Run container +Run the container with port publishing so the service is reachable from the host: +```bash +docker run --rm -p : gghost1/devops-lab-app-python:latest +``` +Pass configuration via environment variables (the app reads HOST, PORT, DEBUG): +```bash +docker run --rm -e PORT= -p : gghost1/devops-lab-app-python:latest +``` +For local built image replace `gghost1/devops-lab-app-python:latest` on your `:`. + +### Pull from Docker Hub +Pull an already published image from Docker Hub: +```bash +docker pull gghost1/devops-lab-app-python:latest +``` diff --git a/app_python/app.py b/app_python/app.py new file mode 100644 index 0000000000..5ddfb296a5 --- /dev/null +++ b/app_python/app.py @@ -0,0 +1,187 @@ +""" +DevOps Info Service +LAB01: Python Web Application (Flask) +""" + +import os +import socket +import logging +import platform +from datetime import datetime, timezone +from flask import Flask, jsonify, request + +# Logging +logging.basicConfig( + level=os.getenv("LOG_LEVEL", "INFO").upper(), + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", +) +logger = logging.getLogger("devops-info-service") + +# Configuration (env) +HOST = os.getenv("HOST", "0.0.0.0") +PORT = int(os.getenv("PORT", "5000")) +DEBUG = os.getenv("DEBUG", "false").lower() == "true" + +SERVICE_NAME = os.getenv("SERVICE_NAME", "devops-info-service") +SERVICE_VERSION = os.getenv("SERVICE_VERSION", "1.0.0") +SERVICE_DESCRIPTION = os.getenv("SERVICE_DESCRIPTION", "DevOps course info service") +SERVICE_FRAMEWORK = "Flask" + +# App init +app = Flask(__name__) + +START_TIME = datetime.now(timezone.utc) + + +# Helpers +def iso_utc_now() -> str: + """Current time in UTC as ISO8601 with milliseconds and Z suffix.""" + return ( + datetime.now(timezone.utc) + .isoformat(timespec="milliseconds") + .replace("+00:00", "Z") + ) + + +def _plural(n: int, one: str, many: str) -> str: + return one if n == 1 else many + + +def get_uptime() -> dict: + """Return uptime in seconds and human-readable format.""" + delta = datetime.now(timezone.utc) - START_TIME + seconds = int(delta.total_seconds()) + hours = seconds // 3600 + minutes = (seconds % 3600) // 60 + human = f"{hours} {_plural(hours, 'hour', 'hours')}, {minutes} {_plural(minutes, 'minute', 'minutes')}" + return {"seconds": seconds, "human": human} + + +def get_platform_version() -> str: + """ + Try to return a friendly OS version string (e.g., Ubuntu 24.04). + Falls back to platform.platform(). + """ + try: + if hasattr(platform, "freedesktop_os_release"): + data = platform.freedesktop_os_release() + pretty = data.get("PRETTY_NAME") + if pretty: + return pretty + except Exception: + pass + return platform.platform() + + +def get_system_info() -> dict: + """Collect system information.""" + return { + "hostname": socket.gethostname(), + "platform": platform.system(), + "platform_version": get_platform_version(), + "architecture": platform.machine(), + "cpu_count": os.cpu_count() or 0, + "python_version": platform.python_version(), + } + + +def get_request_info() -> dict: + """Collect request information.""" + # request.remote_addr gives client address (may be proxy without X-Forwarded-For) + forwarded_for = request.headers.get("X-Forwarded-For", "").split(",")[0].strip() + client_ip = forwarded_for or request.remote_addr + + user_agent = request.headers.get("User-Agent", "") + + return { + "client_ip": client_ip, + "user_agent": user_agent, + "method": request.method, + "path": request.path, + } + + +def get_endpoints() -> list: + return [ + {"path": "/", "method": "GET", "description": "Service information"}, + {"path": "/health", "method": "GET", "description": "Health check"}, + ] + + +# Hooks +@app.before_request +def log_request(): + logger.info("%s %s from %s", request.method, request.path, request.remote_addr) + + +# Routes +@app.route("/", methods=["GET"]) +def index(): + uptime = get_uptime() + payload = { + "service": { + "name": SERVICE_NAME, + "version": SERVICE_VERSION, + "description": SERVICE_DESCRIPTION, + "framework": SERVICE_FRAMEWORK, + }, + "system": get_system_info(), + "runtime": { + "uptime_seconds": uptime["seconds"], + "uptime_human": uptime["human"], + "current_time": iso_utc_now(), + "timezone": "UTC", + }, + "request": get_request_info(), + "endpoints": get_endpoints(), + } + return jsonify(payload), 200 + + +@app.route("/health", methods=["GET"]) +def health(): + uptime = get_uptime() + return ( + jsonify( + { + "status": "healthy", + "timestamp": iso_utc_now(), + "uptime_seconds": uptime["seconds"], + } + ), + 200, + ) + + +# Error handlers +@app.errorhandler(404) +def not_found(_error): + return ( + jsonify( + { + "error": "Not Found", + "message": "Endpoint does not exist", + } + ), + 404, + ) + + +@app.errorhandler(500) +def internal_error(_error): + logger.exception("Internal error") + return ( + jsonify( + { + "error": "Internal Server Error", + "message": "An unexpected error occurred", + } + ), + 500, + ) + + +# Entry point +if __name__ == "__main__": + logger.info("Starting %s on %s:%s (debug=%s)", SERVICE_NAME, HOST, PORT, DEBUG) + app.run(host=HOST, port=PORT, debug=DEBUG) diff --git a/app_python/docs/LAB01.md b/app_python/docs/LAB01.md new file mode 100644 index 0000000000..ffbc88c4f8 --- /dev/null +++ b/app_python/docs/LAB01.md @@ -0,0 +1,164 @@ +# LAB01 — Python Web Application (DevOps Info Service) + +## 1) Framework Selection + +### Chosen framework: Flask +I chose **Flask** because it is a lightweight Python web framework that is quick to set up for small REST services and does not force a heavy project structure. +For this lab (two endpoints + JSON responses + basic best practices) Flask keeps the implementation minimal and easy to explain. + +### Comparison table +| Framework | Pros | Cons | Fit for this lab | +|-----------|---------------------------------------------|------------------------------------------------------|-----------------------------| +| Flask | Lightweight, flexible, easy to learn | Fewer built-in components than full-stack frameworks | Best match (simple service) | +| FastAPI | Async-first, automatic OpenAPI docs | More concepts (ASGI, Pydantic), more setup | Good, but not required here | +| Django | Full-featured framework, batteries included | Overkill for 2 endpoints | Too heavy for LAB01 scope | + +--- + +## 2) Best Practices Applied + +### 2.1 Clean code organization (helpers + constants) +I separated logic into small functions (`get_system_info`, `get_uptime`, `get_request_info`) to keep routes readable and maintainable. + +### 2.2 Environment-based configuration (12-factor style) +The service can be configured via environment variables to run in different environments without changing code. + +```python +HOST = os.getenv("HOST", "0.0.0.0") +PORT = int(os.getenv("PORT", "5000")) +DEBUG = os.getenv("DEBUG", "false").lower() == "true" +``` + +### 2.3 Logging (visibility & debugging) +A structured logging format is configured at startup. Each request is logged via a before_request hook. + +```python +logging.basicConfig( + level=os.getenv("LOG_LEVEL", "INFO").upper(), + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", +) +logger = logging.getLogger("devops-info-service") + +@app.before_request +def log_request(): + logger.info("%s %s from %s", request.method, request.path, request.remote_addr) +``` + +### 2.4 Error handling (JSON responses instead of HTML) +Custom handlers return JSON for 404 and 500, which is standard for API-style services. + +```python +@app.errorhandler(404) +def not_found(_error): + return jsonify({"error": "Not Found", "message": "Endpoint does not exist"}), 404 + +@app.errorhandler(500) +def internal_error(_error): + logger.exception("Internal error") + return jsonify({"error": "Internal Server Error", "message": "An unexpected error occurred"}), 500 +``` + +### 2.5 Request data handling (headers, client IP) +Request metadata is taken from Flask’s request object (headers, path, method, remote address). + +```python +def get_request_info() -> dict: + forwarded_for = request.headers.get("X-Forwarded-For", "").split(",").strip() + client_ip = forwarded_for or request.remote_addr + user_agent = request.headers.get("User-Agent", "") + return { + "client_ip": client_ip, + "user_agent": user_agent, + "method": request.method, + "path": request.path, + } +``` + +## 3) API Documentation +### 3.1 Endpoints +- GET `/` — service + system + runtime + request information. + ```json + { + "endpoints": [ + { + "description": "Service information", + "method": "GET", + "path": "/" + }, + { + "description": "Health check", + "method": "GET", + "path": "/health" + } + ], + "request": { + "client_ip": "127.0.0.1", + "method": "GET", + "path": "/", + "user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/143.0.0.0 Safari/537.36" + }, + "runtime": { + "current_time": "2026-01-25T20:42:30.631Z", + "timezone": "UTC", + "uptime_human": "0 hours, 8 minutes", + "uptime_seconds": 508 + }, + "service": { + "description": "DevOps course info service", + "framework": "Flask", + "name": "devops-info-service", + "version": "1.0.0" + }, + "system": { + "architecture": "arm64", + "cpu_count": 10, + "hostname": "Mac.ufanet.ru", + "platform": "Darwin", + "platform_version": "macOS-15.6.1-arm64-arm-64bit", + "python_version": "3.12.10" + } + } + ``` +- GET `/health` — health status + timestamp + uptime. + ```json + { + "status": "healthy", + "timestamp": "2026-01-25T20:38:07.959Z", + "uptime_seconds": 246 + } + ``` + +### 3.2 Example requests +Run the app: +```bash +python3 app.py +# Custom config: +HOST=127.0.0.1 PORT=3000 DEBUG=true python app.py +``` +Test endpoints: +```bash +curl -i http://127.0.0.1:5000/ +curl -i http://127.0.0.1:5000/health +``` +Pretty-print JSON: +```bash +curl -s http://127.0.0.1:5000/ | python3 -m json.tool +curl -s http://127.0.0.1:5000/health | python3 -m json.tool +``` +## 4) Testing Evidence (Screenshots) +Screenshots are stored in docs/screenshots/: +- 01-main-endpoint.png — GET `/` full JSON response. + ![](screenshots/01-main-endpoint.png) +- 02-health-check.png — GET `/health` response. + ![](screenshots/02-health-check.png) +- 03-formatted-output.png — pretty-printed JSON output (python -m json.tool). + ![](screenshots/03-formatted-output.png) + +## 5) Challenges & Solutions +- OS version formatting: Different systems expose OS version differently, so I used a helper that tries `platform.freedesktop_os_release()` when available and falls back to `platform.platform()`. +- Client IP behind proxy: If the service is behind a reverse proxy, the real IP may be in `X-Forwarded-For`, so I check that header first, then fall back to `request.remote_addr`. + +## 6) GitHub Community +Starring repositories helps with discovery/bookmarking and signals support to maintainers, which can increase a project’s visibility and contributions over time. + +Following developers helps you stay aware of teammates’ work, learn from their activity, and collaborate more effectively in team projects. \ No newline at end of file diff --git a/app_python/docs/LAB02.md b/app_python/docs/LAB02.md new file mode 100644 index 0000000000..7d378b7c6a --- /dev/null +++ b/app_python/docs/LAB02.md @@ -0,0 +1,114 @@ +# LAB02 — Docker (devops-info-service) + +## 1) Docker Best Practices Applied +### 1. Non-root user +**What I did:** created a dedicated user and switched to it using `USER`. +**Why it matters:** if the service can run without privileges, switching away from root reduces the impact of a container breakout or a compromised process. +```dockerfile +RUN groupadd -g 10001 app && useradd -u 10001 -g 10001 -s /usr/sbin/nologin app +USER app +``` + +### 2. Layer caching / correct layer order +**What I did:** copied requirements.txt first, installed dependencies, and only after that copied application code. +**Why it matters:** Docker builds images layer-by-layer; when a layer changes, all following layers rebuild, so keeping “rarely changing” steps (deps) earlier speeds up rebuilds. +```dockerfile +COPY requirements.txt /app/requirements.txt +RUN pip install --no-cache-dir -r /app/requirements.txt +COPY app.py /app/app.py +``` +### 3. .dockerignore usage +**What I did:** excluded dev artifacts (e.g., __pycache__, venv, .git, docs, tests) from build context. +**Why it matters:** .dockerignore removes matching files from the build context before it is sent to the builder, which improves build speed and reduces unnecessary context size. +```text +__pycache__/ +*.py[cod] +venv/ +.venv/ +.git/ +docs/ +tests/ +``` +### 4. Pinned base image version +**What I did:** used a specific Python base image tag (example: python:3.13-slim). +**Why it matters:** Docker notes that image tags can be mutable; pinning reduces unexpected changes between builds and improves reproducibility. + +## 2) Image Information & Decisions +Base image selection +- Chosen base image: python:3.13-slim +- Justification: 3.13-slim gives me a fixed major/minor Python line (3.13) while using the -slim variant, which is described as containing only the minimal Debian packages needed to run Python (i.e., smaller than the default python tag). + +Final image size +- Image size: 46.16 MB +- Assessment: This is acceptable for a small info-service because it is based on a slim runtime image and includes only the Python runtime plus my dependencies. + +Layer structure explanation +- My Dockerfile is structured so that the “dependency layer” is built before the “application layer”: first `COPY requirements.txt` + `RUN pip install` ..., and only then `COPY app.py`. +- This matters because Docker builds images layer-by-layer, and once a layer changes, all layers after it must be rebuilt; by isolating dependencies in earlier layers, changing `app.py` typically invalidates only the later “application” layers, making rebuilds faster. + +Optimization choices made +- I used `pip install --no-cache-dir` so pip does not store its download cache inside the image layers, keeping the final image smaller. +- I copied only the runtime files (`requirements.txt` and `app.py`) instead of `COPY . .`, which reduces the build context and also avoids unnecessary cache invalidations when unrelated files (docs/tests) change; +Docker also recommends excluding irrelevant files using .dockerignore. + +## 3) Build & Run Process +Build output: +```terminaloutput +antipovd@Mac app_python % docker build -t devops-lab-app-python:1.0.0 . +[+] Building 1.4s (12/12) FINISHED docker:desktop-linux + => [internal] load build definition from Dockerfile 0.0s + => => transferring dockerfile: 400B 0.0s + => [internal] load metadata for docker.io/library/python:3.13-slim 1.3s + => [auth] library/python:pull token for registry-1.docker.io 0.0s + => [internal] load .dockerignore 0.0s + => => transferring context: 225B 0.0s + => [1/6] FROM docker.io/library/python:3.13-slim@sha256:51e1a0a317fdb6e170dc791bbeae63fac5272c82f43958ef74a34e170c6f8b18 0.0s + => => resolve docker.io/library/python:3.13-slim@sha256:51e1a0a317fdb6e170dc791bbeae63fac5272c82f43958ef74a34e170c6f8b18 0.0s + => [internal] load build context 0.0s + => => transferring context: 63B 0.0s + => CACHED [2/6] WORKDIR /app 0.0s + => CACHED [3/6] COPY requirements.txt /app/requirements.txt 0.0s + => CACHED [4/6] RUN pip install --no-cache-dir -r /app/requirements.txt 0.0s + => CACHED [5/6] RUN groupadd -g 10001 app && useradd --no-log-init -m -u 10001 -g 10001 -s /usr/sbin/nologin app && chown -R app:app /app 0.0s + => CACHED [6/6] COPY --chown=app:app app.py /app/app.py 0.0s + => exporting to image 0.0s + => => exporting layers 0.0s + => => exporting manifest sha256:35eed0b6ce59e46fcd10b30b638a9a0f7addfd08e01a02218952fb498885e5d9 0.0s + => => exporting config sha256:0767cc16251909efabf6784b39b25df7818ef387629032407e0262e8db85ca01 0.0s + => => exporting attestation manifest sha256:899fc7441ba8f375bc3d556a6ef8b1152d24e27ad5daf684565cf96badfec0a2 0.0s + => => exporting manifest list sha256:a2eddd4433b981230dbe29e106c0b6f0b61b1172cc2bbb28a96035dc41b2bcb0 0.0s + => => naming to docker.io/library/devops-lab-app-python:1.0.0 0.0s + => => unpacking to docker.io/library/devops-lab-app-python:1.0.0 0.0s +``` +Container run output: +```terminaloutput +antipovd@Mac app_python % docker run --rm -e PORT=8080 -p 8080:8080 devops-lab-app-python:1.0.0 + +2026-01-29 21:00:16,625 - devops-info-service - INFO - Starting devops-info-service on 0.0.0.0:8080 (debug=False) + * Serving Flask app 'app' + * Debug mode: off +2026-01-29 21:00:16,627 - werkzeug - INFO - WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead. + * Running on all addresses (0.0.0.0) + * Running on http://127.0.0.1:8080 + * Running on http://172.17.0.2:8080 +2026-01-29 21:00:16,627 - werkzeug - INFO +2026-01-29 21:01:33,964 - devops-info-service - INFO - GET / from 192.168.65.1 +2026-01-29 21:01:33,967 - werkzeug - INFO - 192.168.65.1 - - [29/Jan/2026 21:01:33] "GET / HTTP/1.1" 200 - +``` +Tests output: +![](screenshots/04-formatted-output-containerized-app.png) +Docker Hub repository URL: https://hub.docker.com/r/gghost1/devops-lab-app-python + +## 4) Technical Analysis + +- My Dockerfile works because `docker build` uses a **build context**, and instructions like `COPY` can only reference files that exist inside that context (e.g., `requirements.txt` and `app.py`). +- If I changed the layer order (for example, copying the whole project before installing dependencies), I would invalidate the cache more often: Docker builds images as layers, and once a layer changes, all following layers must be rebuilt. +- The main security considerations I implemented were using a minimal, trusted base image and switching to a non-root user with `USER`, which Docker recommends when the service does not require privileges. +- `.dockerignore` improves my build because the Docker build client looks for `.dockerignore` in the root of the context and removes matching files from the context before sending it to the builder, which improves build speed (especially with a remote builder). + +## 5) Challenges & Solutions + +- One issue I ran into was making sure the app still starts correctly after switching to a non-root user; I solved it by ensuring the application directory ownership/permissions were set before `USER app`. +- Another issue was understanding why builds sometimes felt “slow” even for small code changes; I verified caching behavior by keeping dependencies in earlier layers and using Docker’s guidance that cache invalidation forces downstream layers to rebuild. +- For debugging, I used practical checks like rebuilding with `--no-cache` when I wanted a clean rebuild and `--pull` when I wanted to ensure the latest base image was used, which matches Docker’s best-practice guidance for rebuild behavior. +- What I learned is that build performance depends heavily on build context size and layer ordering, and that `.dockerignore` plus correct caching strategy makes rebuilds predictable and faster. \ No newline at end of file diff --git a/app_python/docs/LAB03.md b/app_python/docs/LAB03.md new file mode 100644 index 0000000000..6b8f80cc91 --- /dev/null +++ b/app_python/docs/LAB03.md @@ -0,0 +1,150 @@ +# Lab 3 — Continuous Integration (CI/CD) + +## 1) Testing Framework & Test Design +### Testing framework choice +**Selected framework:** pytest +**Why it matters:** pytest is widely used in modern Python projects, has concise assertions, fixtures, and integrates well with CI and coverage reporting. + +### Test structure +**What I did:** created tests under `app_python/tests/` using Flask test client (no need to run the server). +**Why it matters:** unit tests should be fast, deterministic, and runnable in CI without external dependencies. + +**Covered endpoints / cases** +- `GET /` — checks JSON structure and required fields. +- `GET /health` — checks health payload. +- Error cases — `404 Not Found`, `405 Method Not Allowed`. + + +## 2) Local Test & Lint Execution +**What I did:** ran lint, formatting checks, and tests locally before pushing CI changes. +**Why it matters:** CI should confirm quality, but local checks speed up iteration and reduce broken pipeline runs. + +Lint + format check: +```terminaloutput +cd app_python +ruff check . +ruff format --check . +All checks passed! +3 files already formatted +``` + +Tests run: +```terminaloutput +cd app_python +pip install -r requirements.txt -r requirements-dev.txt +pytest +``` +![](screenshots/05-tests-output.png) + +Coverage run: +```terminaloutput +cd app_python +pytest --cov=. --cov-report=term-missing --cov-report=xml +..... [100%] +=============================================================================================== tests coverage =============================================================================================== +_____________________________________________________________________________ coverage: platform darwin, python 3.12.10-final-0 ______________________________________________________________________________ + +Name Stmts Miss Cover Missing +------------------------------------------------------- +app.py 69 7 90% 68-70, 172-173, 186-187 +tests/__init__.py 0 0 100% +tests/test_endpoints.py 50 0 100% +------------------------------------------------------- +TOTAL 119 7 94% +Coverage XML written to file coverage.xml +Required test coverage of 70% reached. Total coverage: 94.12% +5 passed in 0.25s +``` + +## 3) GitHub Actions CI Workflow (Python) +### Workflow file + triggers +Workflow file: `.github/workflows/python-ci.yml` + +**What I did:** configured workflow to run on push and pull_request with path filters for `app_python/**`. +**Why it matters:** in a monorepo it prevents running Python CI when only Java/docs change and reduces CI time/cost. + +### CI stages +a) Code Quality & Testing +**What I did:** CI installs deps, runs linter (ruff), checks formatting, runs pytest, generates coverage XML. +**Why it matters:** catches style and functional regressions early and keeps codebase consistent. + +b) Docker Build & Push +**What I did:** CI builds Docker image from app_python/Dockerfile and pushes to Docker Hub. +**Why it matters:** guarantees the image in registry corresponds to code that passed tests. + +### Versioning strategy +**Chosen strategy:** CalVer (YYYY.MM.DD) +**Why it matters:** this service is released continuously; date-based versions are simple and reduce ambiguity. + +Docker tags produced +- On push: + - `:` (CalVer) + - `:latest` + - `:sha-` + - `:` (sanitized) +- On PR: + - `:snapshot-` + +Docker Hub link: https://hub.docker.com/r/gghost1/devops-lab-app-python + +From PR one tag for image created: +![](screenshots/06-docker-hub-image-from-pr.png) + +From Push to temporal branch (for experiments) 4 tags for image were created: +![](screenshots/07-docker-hub-image-from-push.png) + +## 4) CI Best Practices Implemented + +1. Dependency caching (pip) + **What I did:** enabled caching for pip dependencies (requirements hash based) via actions/setup-python cache. + **Why it matters:** reduces repeated downloads and speeds up CI runs. + +2. Docker layer caching (Buildx + gha cache) + **What I did:** configured docker build cache (cache-from / cache-to type=gha). + **Why it matters:** speeds up iterative image builds when only app code changes. + +3. Fail-fast / job dependency + **What I did:** Docker build/push job depends on successful lint+tests job. + **Why it matters:** prevents publishing broken images. + +4. Concurrency / cancel outdated runs + **What I did:** enabled concurrency with cancel-in-progress. + **Why it matters:** avoids wasting CI minutes on outdated commits when pushing multiple times quickly. + +5. Conditional pushing (PR vs push) + **What I did:** on PR builds/pushes only snapshot tags; on push publishes “release” tags. + **Why it matters:** PRs serve as preview builds; main branch pushes represent publishable artifacts. + +## 5) Security Scanning (Snyk) +**What I did:** integrated Snyk scanning into CI to check Python dependency vulnerabilities. +**Why it matters:** identifies known vulnerable dependencies early (supply-chain security). + +Snyk run output: +```terminaloutput +Testing /home/runner/work/DevOps-Core-Course/DevOps-Core-Course/app_python... + +Organization: dima170805b +Package manager: pip +Target file: requirements.txt +Project name: app_python +Open source: no +Project path: /home/runner/work/DevOps-Core-Course/DevOps-Core-Course/app_python +Licenses: enabled + +✔ Tested 9 dependencies for known issues, no vulnerable paths found. +``` + +## 6) Status Badge + Coverage Badge +### GitHub Actions status badge +**What I did:** added workflow status badge to app_python/README.md. +**Why it matters:** makes CI state visible on the project page. + +Badge line: +![](screenshots/08-badge-line.png) + +### Coverage reporting +**What I did:** generated coverage.xml in CI and uploaded it to Codecov; added coverage badge in README. +**Why it matters:** helps track test coverage trends and prevents coverage regressions. + +Codecov report link: https://app.codecov.io/github/gghost1/DevOps-Core-Course/pull/3/tree +![](screenshots/09-codecov-report.png) \ No newline at end of file diff --git a/app_python/docs/screenshots/01-main-endpoint.png b/app_python/docs/screenshots/01-main-endpoint.png new file mode 100644 index 0000000000..18f7edb44f Binary files /dev/null and b/app_python/docs/screenshots/01-main-endpoint.png differ diff --git a/app_python/docs/screenshots/02-health-check.png b/app_python/docs/screenshots/02-health-check.png new file mode 100644 index 0000000000..1e8b05dead Binary files /dev/null and b/app_python/docs/screenshots/02-health-check.png differ diff --git a/app_python/docs/screenshots/03-formatted-output.png b/app_python/docs/screenshots/03-formatted-output.png new file mode 100644 index 0000000000..5637ef687c Binary files /dev/null and b/app_python/docs/screenshots/03-formatted-output.png differ diff --git a/app_python/docs/screenshots/04-formatted-output-containerized-app.png b/app_python/docs/screenshots/04-formatted-output-containerized-app.png new file mode 100644 index 0000000000..04038d1504 Binary files /dev/null and b/app_python/docs/screenshots/04-formatted-output-containerized-app.png differ diff --git a/app_python/docs/screenshots/05-tests-output.png b/app_python/docs/screenshots/05-tests-output.png new file mode 100644 index 0000000000..a2ec5a0889 Binary files /dev/null and b/app_python/docs/screenshots/05-tests-output.png differ diff --git a/app_python/docs/screenshots/06-docker-hub-image-from-pr.png b/app_python/docs/screenshots/06-docker-hub-image-from-pr.png new file mode 100644 index 0000000000..c2b02d102d Binary files /dev/null and b/app_python/docs/screenshots/06-docker-hub-image-from-pr.png differ diff --git a/app_python/docs/screenshots/07-docker-hub-image-from-push.png b/app_python/docs/screenshots/07-docker-hub-image-from-push.png new file mode 100644 index 0000000000..57b8902d06 Binary files /dev/null and b/app_python/docs/screenshots/07-docker-hub-image-from-push.png differ diff --git a/app_python/docs/screenshots/08-badge-line.png b/app_python/docs/screenshots/08-badge-line.png new file mode 100644 index 0000000000..1fd4def407 Binary files /dev/null and b/app_python/docs/screenshots/08-badge-line.png differ diff --git a/app_python/docs/screenshots/09-codecov-report.png b/app_python/docs/screenshots/09-codecov-report.png new file mode 100644 index 0000000000..6fb6480465 Binary files /dev/null and b/app_python/docs/screenshots/09-codecov-report.png differ diff --git a/app_python/pytest.ini b/app_python/pytest.ini new file mode 100644 index 0000000000..f1e9710d30 --- /dev/null +++ b/app_python/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +addopts = -q --disable-warnings --maxfail=1 --cov-fail-under=70 +testpaths = tests diff --git a/app_python/requirements-dev.txt b/app_python/requirements-dev.txt new file mode 100644 index 0000000000..6da25a3589 --- /dev/null +++ b/app_python/requirements-dev.txt @@ -0,0 +1,3 @@ +pytest>=8.0 +pytest-cov>=5.0 +ruff>=0.7 diff --git a/app_python/requirements.txt b/app_python/requirements.txt new file mode 100644 index 0000000000..11f5643602 --- /dev/null +++ b/app_python/requirements.txt @@ -0,0 +1,4 @@ +# Web Framework +Flask==3.1.0 +# WSGI HTTP server ("Green Unicorn") for running Flask in production-like mode +gunicorn==23.0.0 diff --git a/app_python/tests/__init__.py b/app_python/tests/__init__.py new file mode 100644 index 0000000000..64d3994e89 --- /dev/null +++ b/app_python/tests/__init__.py @@ -0,0 +1 @@ +# Tests will be added in Lab 3 diff --git a/app_python/tests/test_endpoints.py b/app_python/tests/test_endpoints.py new file mode 100644 index 0000000000..52e2588dd2 --- /dev/null +++ b/app_python/tests/test_endpoints.py @@ -0,0 +1,77 @@ +import re + +import pytest + +from app import app as flask_app + + +@pytest.fixture() +def client(): + flask_app.config.update(TESTING=True) + with flask_app.test_client() as c: + yield c + + +def test_root_ok_json_shape(client): + resp = client.get("/") + assert resp.status_code == 200 + + data = resp.get_json() + assert isinstance(data, dict) + + # top-level keys + for key in ("service", "system", "runtime", "request", "endpoints"): + assert key in data + + # service + assert isinstance(data["service"]["name"], str) + assert isinstance(data["service"]["version"], str) + assert isinstance(data["service"]["description"], str) + assert data["service"]["framework"] == "Flask" + + # runtime + assert isinstance(data["runtime"]["uptime_seconds"], int) + assert isinstance(data["runtime"]["uptime_human"], str) + assert isinstance(data["runtime"]["current_time"], str) + assert data["runtime"]["timezone"] == "UTC" + + # endpoints list + assert isinstance(data["endpoints"], list) + paths = {e["path"] for e in data["endpoints"]} + assert "/" in paths + assert "/health" in paths + + +def test_root_respects_x_forwarded_for(client): + resp = client.get("/", headers={"X-Forwarded-For": "203.0.113.10"}) + assert resp.status_code == 200 + data = resp.get_json() + assert data["request"]["client_ip"] == "203.0.113.10" + + +def test_health_ok(client): + resp = client.get("/health") + assert resp.status_code == 200 + + data = resp.get_json() + assert data["status"] == "healthy" + assert isinstance(data["uptime_seconds"], int) + assert isinstance(data["timestamp"], str) + + # basic ISO-ish sanity check: ends with Z + assert data["timestamp"].endswith("Z") + assert re.match(r".+T.+\.\d{3}Z$", data["timestamp"]) + + +def test_not_found_has_json_body(client): + resp = client.get("/no-such-endpoint") + assert resp.status_code == 404 + + data = resp.get_json() + assert data["error"] == "Not Found" + assert isinstance(data["message"], str) + + +def test_method_not_allowed(client): + resp = client.post("/health") + assert resp.status_code == 405 diff --git a/pulumi/.gitignore b/pulumi/.gitignore new file mode 100644 index 0000000000..a3807e5bdb --- /dev/null +++ b/pulumi/.gitignore @@ -0,0 +1,2 @@ +*.pyc +venv/ diff --git a/pulumi/Pulumi.dev.yaml b/pulumi/Pulumi.dev.yaml new file mode 100644 index 0000000000..1cd713ba50 --- /dev/null +++ b/pulumi/Pulumi.dev.yaml @@ -0,0 +1,7 @@ +config: + lab4-pulumi:zone: ru-central1-a + lab4-pulumi:subnetCidr: 10.10.0.0/24 + lab4-pulumi:myIpCidr: 77.79.157.131/32 + lab4-pulumi:sshUser: ubuntu + lab4-pulumi:sshPublicKeyPath: /Users/antipovd/.ssh/yc_lab.pub + lab4-pulumi:imageId: fd84mnbiarffhtfrhnog diff --git a/pulumi/Pulumi.yaml b/pulumi/Pulumi.yaml new file mode 100644 index 0000000000..9eb4f3e667 --- /dev/null +++ b/pulumi/Pulumi.yaml @@ -0,0 +1,11 @@ +name: lab4-pulumi +description: devops course +runtime: + name: python + options: + toolchain: pip + virtualenv: venv +config: + pulumi:tags: + value: + pulumi:template: python diff --git a/pulumi/__main__.py b/pulumi/__main__.py new file mode 100644 index 0000000000..e19e46129c --- /dev/null +++ b/pulumi/__main__.py @@ -0,0 +1,122 @@ +import os +import pulumi +from pulumi import Config, Output +import pulumi_yandex as yandex + +cfg = Config() + +zone = cfg.get("zone") or "ru-central1-a" +subnet_cidr = cfg.get("subnetCidr") or "10.10.0.0/24" +my_ip_cidr = cfg.require("myIpCidr") +ssh_user = cfg.get("sshUser") or "ubuntu" +ssh_pubkey_path = cfg.get("sshPublicKeyPath") or os.path.expanduser("$HOME/.ssh/yc_lab.pub") +image_id = cfg.require("imageId") + +# VM sizing (same as Terraform) +platform_id = cfg.get("platformId") or "standard-v3" +cores = int(cfg.get("cores") or "2") +core_fraction = int(cfg.get("coreFraction") or "20") +memory_gb = float(cfg.get("memoryGb") or "1") +boot_disk_gb = int(cfg.get("bootDiskGb") or "10") + +pubkey = open(ssh_pubkey_path, "r").read().strip() + +net = yandex.VpcNetwork("lab-net", name="lab-net") + +subnet = yandex.VpcSubnet( + "lab-subnet", + name="lab-subnet", + zone=zone, + network_id=net.id, + v4_cidr_blocks=[subnet_cidr], +) + +sg = yandex.VpcSecurityGroup( + "lab-sg", + name="lab-sg", + network_id=net.id, + description="lab security group", + labels={ + "project": "lab4", + "iac": "pulumi", + }, +) + +# Ingress: SSH 22 from your IP +yandex.VpcSecurityGroupRule( + "rule-ssh-22", + security_group_binding=sg.id, + direction="ingress", + protocol="TCP", + description="SSH from my IP", + v4_cidr_blocks=[my_ip_cidr], + port=22, +) + +# Ingress: HTTP 80 from anywhere +yandex.VpcSecurityGroupRule( + "rule-http-80", + security_group_binding=sg.id, + direction="ingress", + protocol="TCP", + description="HTTP", + v4_cidr_blocks=["0.0.0.0/0"], + port=80, +) + +# Ingress: app 5000 from anywhere +yandex.VpcSecurityGroupRule( + "rule-app-5000", + security_group_binding=sg.id, + direction="ingress", + protocol="TCP", + description="App port 5000", + v4_cidr_blocks=["0.0.0.0/0"], + port=5000, +) + +# Egress: allow all outbound +yandex.VpcSecurityGroupRule( + "rule-egress-any", + security_group_binding=sg.id, + direction="egress", + protocol="ANY", + description="Allow all outbound", + v4_cidr_blocks=["0.0.0.0/0"], +) + +vm = yandex.ComputeInstance( + "lab-vm", + name="lab-vm", + zone=zone, + platform_id=platform_id, + resources=yandex.ComputeInstanceResourcesArgs( + cores=cores, + core_fraction=core_fraction, + memory=memory_gb, + ), + boot_disk=yandex.ComputeInstanceBootDiskArgs( + initialize_params=yandex.ComputeInstanceBootDiskInitializeParamsArgs( + image_id=image_id, + size=boot_disk_gb, + ) + ), + network_interfaces=[ + yandex.ComputeInstanceNetworkInterfaceArgs( + subnet_id=subnet.id, + nat=True, + security_group_ids=[sg.id], + ) + ], + metadata={ + "ssh-keys": f"{ssh_user}:{pubkey}", + }, + labels={ + "project": "lab4", + "iac": "pulumi", + }, +) + +public_ip = vm.network_interfaces.apply(lambda nics: nics[0].nat_ip_address) +pulumi.export("public_ip", public_ip) +pulumi.export("ssh_command", Output.concat("ssh -i ~/.ssh/yc_lab ", ssh_user, "@", public_ip)) diff --git a/pulumi/requirements.txt b/pulumi/requirements.txt new file mode 100644 index 0000000000..ab884ca4fa --- /dev/null +++ b/pulumi/requirements.txt @@ -0,0 +1,2 @@ +pulumi==3.222.0 +pulumi_yandex==0.13.0 diff --git a/terraform/.gitignore b/terraform/.gitignore new file mode 100644 index 0000000000..dc5c492752 --- /dev/null +++ b/terraform/.gitignore @@ -0,0 +1,14 @@ +# Terraform +.terraform/ +*.tfstate +*.tfstate.* +.terraform.lock.hcl + +# Secrets/vars +terraform.tfvars +*.tfvars + +# SSH keys / key files +*.pem +*.key +*.json diff --git a/terraform/docs/LAB04.md b/terraform/docs/LAB04.md new file mode 100644 index 0000000000..16f3d7ce1f --- /dev/null +++ b/terraform/docs/LAB04.md @@ -0,0 +1,144 @@ +# Cloud Provider & Infrastructure + +## Cloud provider chosen and why +I chose Yandex Cloud because it is accessible from Russia, provides a free‑tier–friendly VM configuration, and has first‑class Terraform support with official documentation and local mirrors for Terraform and providers. + +## Resources created +Using Terraform and the official Yandex Cloud provider, I created: +- A VPC network and subnet in availability zone ru-central1-a with CIDR 10.10.0.0/24. +- A security group attached to that network with inbound rules allowing: + - SSH on port 22 only from my current public IP (77.79.157.131/32), + - HTTP on port 80 from 0.0.0.0/0, + - Application traffic on port 5000 from 0.0.0.0/0, and an egress rule allowing all outbound traffic. +- A compute instance lab-vm using platform standard-v3, with 2 vCPUs at 20% core fraction, 1 GB RAM, and a 10 GB boot disk initialized from a public Ubuntu image `fd84mnbiarffhtfrhnog`. +- A network interface on the VM with nat = true, which assigns a public (NAT) IP address and attaches the previously created security group. + +## Total cost +![](./screenshots/total-cost.png) + +# Terraform Implementation + +## Terraform version used +Terraform CLI version: terraform v1.14.5, installed on macOS via Homebrew while connected to a VPN (so the HashiCorp release download was accessible). + +## Project structure explanation +The Terraform code was organized under a dedicated terraform/ directory to keep IaC isolated from application code and to make state/config handling predictable. +Recommended structure used: +```text +terraform/ +├── main.tf # Provider + resources (VPC, subnet, SG, VM) +├── variables.tf # Input variables (zone, CIDRs, image_id, ssh paths, etc.) +├── outputs.tf # Public IP output +├── terraform.tfvars # Local values (gitignored) +└── .gitignore # Ignore state, tfvars, and sensitive files +``` +Terraform’s standard workflow (init → plan → apply) runs from this directory and stores local state files there by default. + +## Key configuration decisions +- Public access via NAT: the VM network interface was configured with NAT so the instance receives a public IPv4 address. +- Public IP output: the NAT public IP was exposed via an output using `network_interface[0].nat_ip_address`, so the address can be retrieved with `terraform output -raw public_ip`. +- SSH key injection via metadata: the VM receives the SSH public key through `metadata.ssh-keys` in the format `:`. +- Firewall (security group) rules: SSH (22) was restricted to the current public IP (VPN egress IP) using CIDR /32, while HTTP (80) and app port (5000) were opened as required. (This matches the “allow specific CIDRs and ports” security group model in Yandex VPC.) + +## Challenges encountered +- Terraform install/download issue: installing Terraform via Homebrew failed without VPN due to geo restrictions when downloading from HashiCorp releases; using VPN allowed installation. +- Permissions in Yandex Cloud folder: initial terraform apply failed with PermissionDenied (“Operation is not permitted in the folder”), which was resolved by granting the required roles in the target folder (or switching to credentials that had those permissions). +- SSH connectivity and changing IP: SSH initially timed out because SSH ingress was restricted to a specific /32, and the VPN/public IP changed; updating the security group rule to the current VPN IP (`77.79.157.131/32`) fixed access. + +## Terminal output from terraform plan and terraform apply +### Terraform plan +![](./screenshots/terraform-plan-01.png) +![](./screenshots/terraform-plan-02.png) +![](./screenshots/terraform-plan-03.png) +![](./screenshots/terraform-plan-04.png) + +### Terraform apply +![](./screenshots/terraform-apply-01.png) +![](./screenshots/terraform-apply-02.png) +![](./screenshots/terraform-apply-03.png) +![](./screenshots/terraform-apply-04.png) +![](./screenshots/terraform-apply-05.png) + +### Proof of SSH access to VM +After applying the configuration and updating the security group to allow SSH only from the current VPN IP (`77.79.157.131/32`), SSH access to the VM succeeds using the command: +```bash +ssh -i ~/.ssh/yc_lab ubuntu@84.201.158.161 +``` +![](./screenshots/ssh-connection.png) + +# Pulumi Implementation +## Pulumi version and language used +- Language: Python (Pulumi project created from the python template, using a virtual environment and pulumi-yandex provider package). +- Pulumi CLI version: v3.222.0 + +## How code differs from Terraform +Terraform (HCL): infrastructure is described declaratively using resource blocks, and inputs/outputs are typically split across `main.tf`, `variables.tf`, and `outputs.tf`. +Pulumi (Python): infrastructure is defined by creating resource objects in `__main__.py`, configuration is read from stack config (`pulumi config set ...`), and outputs are exported with `pulumi.export(...)`. + +## Advantages you discovered +- Real programming language: Python made it easy to reuse variables and build values programmatically (for example, reading the SSH public key from a file and composing metadata strings). +- Stack-based configuration: Pulumi uses stacks to separate environments (e.g., dev), and each stack has its own configuration values. +- Safer secret handling: Pulumi supports marking config values as secrets using --secret, storing them encrypted in the backend/state. + +## Challenges encountered +- SSH key path handling: the program initially failed because Python does not expand ~ in paths (e.g., `~/.ssh/yc_lab.pub`), which caused a FileNotFoundError until the path was set using a full `$HOME/...` path or expanded in code. +- Provider authentication not set: the Yandex provider failed until `YC_TOKEN` (or a service account key file) plus the required cloud/folder identifiers were provided. +- Security group rules API difference: unlike Terraform, VpcSecurityGroup in pulumi-yandex did not accept ingress/egress directly, so rules had to be created as separate VpcSecurityGroupRule resources. +- Zone requirement: VM creation failed until the availability zone was explicitly set (either in the resource or provider configuration). +- IP restrictions + VPN changes: SSH access depended on the current public IP; when the VPN/public IP changed, the SSH rule (`myIpCidr` /32) had to be updated to the new IP to allow port 22. + +## Pulumi preview and up output + +### Pulumi preview +![](./screenshots/pulumi-preview.png) + +### Pulumi up +![](./screenshots/pulumi-up-01.png) +![](./screenshots/pulumi-up-02.png) + +### Proof of SSH access to VM +After applying the configuration and updating the security group to allow SSH only from the current VPN IP (`77.79.157.131/32`), SSH access to the VM succeeds using the command: +```bash +ssh -i ~/.ssh/yc_lab ubuntu@93.77.184.25 +``` +![](./screenshots/ssh-connection-pulumi.png) + +# Terraform vs Pulumi Comparison +## Ease of Learning +- Terraform was easier to learn for this lab because the workflow is very standardized (init → plan → apply → destroy) and the HCL syntax is purpose-built for infrastructure. +- Pulumi had a steeper start because I needed to set up a Python virtual environment, install provider packages, and understand how stacks/config work. + +Once running, Pulumi became easier to extend because it uses normal programming constructs (variables, functions), but the initial setup cost was higher. + +## Code Readability +- For simple infrastructure like “one VM + network + firewall,” Terraform felt more readable because the HCL blocks map directly to resources and are short. +- Pulumi Python was readable too, but more verbose due to object constructors and handling outputs/config. + +I personally find Terraform easier to scan quickly, while Pulumi is easier to refactor as the project grows. + +## Debugging +- Terraform debugging was more straightforward: errors usually point to a specific resource block and line in `.tf` files, and terraform plan helps confirm intended changes. +- Pulumi debugging involved both cloud/provider errors and Python/runtime errors (for example, path handling and missing provider config), so it required checking stack config, environment variables, and program exceptions. + +Pulumi’s preview is helpful, but troubleshooting sometimes felt more “software-like” than “declarative IaC-like.” + +## Documentation +- Terraform documentation and examples felt stronger overall because Terraform has a very large ecosystem and many provider examples, and the CLI behavior is well documented. +- Pulumi docs are good, especially around stacks, config, and state backends, but provider-specific examples for less common clouds can be more limited. + +For Yandex Cloud specifically, I relied more on provider API docs and trial-and-error (e.g., SG rules as separate resources). + +## Use Case +- I would use Terraform when I need a standard, predictable IaC workflow, straightforward resource definitions, and maximum portability across teams and CI/CD systems. +- I would use Pulumi when I need real programming language features (complex conditionals, loops, code reuse), want stack-based configuration per environment, or want secret handling integrated into the IaC workflow. + +For labs and simple infra, Terraform is usually the fastest; for larger “infrastructure + application” projects, Pulumi can scale better in code structure. + +# Lab 5 Preparation & Cleanup +## VM for Lab 5: +- Are you keeping your VM for Lab 5? No +- If no: What will you use for Lab 5? Will recreate cloud VM + +## Cleanup Status: +The terminal output is not saved, but cloud console screenshot is accessible +![](./screenshots/cloud-console.png) \ No newline at end of file diff --git a/terraform/docs/screenshots/cloud-console.png b/terraform/docs/screenshots/cloud-console.png new file mode 100644 index 0000000000..61a758b57e Binary files /dev/null and b/terraform/docs/screenshots/cloud-console.png differ diff --git a/terraform/docs/screenshots/pulumi-preview.png b/terraform/docs/screenshots/pulumi-preview.png new file mode 100644 index 0000000000..70154d25b9 Binary files /dev/null and b/terraform/docs/screenshots/pulumi-preview.png differ diff --git a/terraform/docs/screenshots/pulumi-up-01.png b/terraform/docs/screenshots/pulumi-up-01.png new file mode 100644 index 0000000000..cf8c13fbf8 Binary files /dev/null and b/terraform/docs/screenshots/pulumi-up-01.png differ diff --git a/terraform/docs/screenshots/pulumi-up-02.png b/terraform/docs/screenshots/pulumi-up-02.png new file mode 100644 index 0000000000..55612e3b39 Binary files /dev/null and b/terraform/docs/screenshots/pulumi-up-02.png differ diff --git a/terraform/docs/screenshots/ssh-connection-pulumi.png b/terraform/docs/screenshots/ssh-connection-pulumi.png new file mode 100644 index 0000000000..3a6e5ac829 Binary files /dev/null and b/terraform/docs/screenshots/ssh-connection-pulumi.png differ diff --git a/terraform/docs/screenshots/ssh-connection.png b/terraform/docs/screenshots/ssh-connection.png new file mode 100644 index 0000000000..aa31aeb85a Binary files /dev/null and b/terraform/docs/screenshots/ssh-connection.png differ diff --git a/terraform/docs/screenshots/terraform-apply-01.png b/terraform/docs/screenshots/terraform-apply-01.png new file mode 100644 index 0000000000..93654f2062 Binary files /dev/null and b/terraform/docs/screenshots/terraform-apply-01.png differ diff --git a/terraform/docs/screenshots/terraform-apply-02.png b/terraform/docs/screenshots/terraform-apply-02.png new file mode 100644 index 0000000000..3f469e14de Binary files /dev/null and b/terraform/docs/screenshots/terraform-apply-02.png differ diff --git a/terraform/docs/screenshots/terraform-apply-03.png b/terraform/docs/screenshots/terraform-apply-03.png new file mode 100644 index 0000000000..b9be03bee0 Binary files /dev/null and b/terraform/docs/screenshots/terraform-apply-03.png differ diff --git a/terraform/docs/screenshots/terraform-apply-04.png b/terraform/docs/screenshots/terraform-apply-04.png new file mode 100644 index 0000000000..017178f38d Binary files /dev/null and b/terraform/docs/screenshots/terraform-apply-04.png differ diff --git a/terraform/docs/screenshots/terraform-apply-05.png b/terraform/docs/screenshots/terraform-apply-05.png new file mode 100644 index 0000000000..6a544efe68 Binary files /dev/null and b/terraform/docs/screenshots/terraform-apply-05.png differ diff --git a/terraform/docs/screenshots/terraform-plan-01.png b/terraform/docs/screenshots/terraform-plan-01.png new file mode 100644 index 0000000000..5d06bb1c18 Binary files /dev/null and b/terraform/docs/screenshots/terraform-plan-01.png differ diff --git a/terraform/docs/screenshots/terraform-plan-02.png b/terraform/docs/screenshots/terraform-plan-02.png new file mode 100644 index 0000000000..0c2b768e1e Binary files /dev/null and b/terraform/docs/screenshots/terraform-plan-02.png differ diff --git a/terraform/docs/screenshots/terraform-plan-03.png b/terraform/docs/screenshots/terraform-plan-03.png new file mode 100644 index 0000000000..c3d446cdde Binary files /dev/null and b/terraform/docs/screenshots/terraform-plan-03.png differ diff --git a/terraform/docs/screenshots/terraform-plan-04.png b/terraform/docs/screenshots/terraform-plan-04.png new file mode 100644 index 0000000000..8e1b9a48d1 Binary files /dev/null and b/terraform/docs/screenshots/terraform-plan-04.png differ diff --git a/terraform/docs/screenshots/total-cost.png b/terraform/docs/screenshots/total-cost.png new file mode 100644 index 0000000000..277041846c Binary files /dev/null and b/terraform/docs/screenshots/total-cost.png differ diff --git a/terraform/main.tf b/terraform/main.tf new file mode 100644 index 0000000000..56a882309f --- /dev/null +++ b/terraform/main.tf @@ -0,0 +1,87 @@ +terraform { + required_providers { + yandex = { + source = "yandex-cloud/yandex" + } + } +} + +provider "yandex" { + zone = var.zone +} + +resource "yandex_vpc_network" "net" { + name = "lab-net" +} + +resource "yandex_vpc_subnet" "subnet" { + name = "lab-subnet" + zone = var.zone + network_id = yandex_vpc_network.net.id + v4_cidr_blocks = [var.subnet_cidr] +} + +resource "yandex_vpc_security_group" "sg" { + name = "lab-sg" + network_id = yandex_vpc_network.net.id + + ingress { + protocol = "TCP" + description = "SSH from my IP" + v4_cidr_blocks = [var.my_ip_cidr] + port = 22 + } + + ingress { + protocol = "TCP" + description = "HTTP" + v4_cidr_blocks = ["0.0.0.0/0"] + port = 80 + } + + ingress { + protocol = "TCP" + description = "App port 5000" + v4_cidr_blocks = ["0.0.0.0/0"] + port = 5000 + } + + egress { + protocol = "ANY" + description = "Allow all outbound" + v4_cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "yandex_compute_instance" "vm" { + name = "lab-vm" + platform_id = var.platform_id + + resources { + cores = var.cores + core_fraction = var.core_fraction + memory = var.memory_gb + } + + boot_disk { + initialize_params { + image_id = var.image_id + size = var.boot_disk_gb + } + } + + network_interface { + subnet_id = yandex_vpc_subnet.subnet.id + nat = true + security_group_ids = [yandex_vpc_security_group.sg.id] + } + + metadata = { + ssh-keys = "${var.ssh_user}:${file(var.ssh_public_key_path)}" + } + + labels = { + project = "lab4" + iac = "terraform" + } +} diff --git a/terraform/outputs.tf b/terraform/outputs.tf new file mode 100644 index 0000000000..66decbfb9e --- /dev/null +++ b/terraform/outputs.tf @@ -0,0 +1,3 @@ +output "public_ip" { + value = yandex_compute_instance.vm.network_interface[0].nat_ip_address +} diff --git a/terraform/variables.tf b/terraform/variables.tf new file mode 100644 index 0000000000..f2560b16f5 --- /dev/null +++ b/terraform/variables.tf @@ -0,0 +1,59 @@ +variable "zone" { + type = string + description = "YC availability zone" + default = "ru-central1-a" +} + +variable "subnet_cidr" { + type = string + description = "Subnet CIDR" + default = "10.10.0.0/24" +} + +variable "my_ip_cidr" { + type = string + description = "Your public IP in CIDR /32 for SSH access" +} + +variable "ssh_user" { + type = string + description = "Linux user for SSH" + default = "ubuntu" +} + +variable "ssh_public_key_path" { + type = string + description = "Path to public SSH key" + default = "~/.ssh/yc_lab.pub" +} + +variable "platform_id" { + type = string + description = "YC platform id" + default = "standard-v3" +} + +variable "cores" { + type = number + default = 2 +} + +variable "core_fraction" { + type = number + default = 20 +} + +variable "memory_gb" { + type = number + default = 1 +} + +variable "boot_disk_gb" { + type = number + default = 10 +} + +variable "image_id" { + type = string + description = "Boot disk image id" +}