diff --git a/.github/workflows/terraform-ci.yml b/.github/workflows/terraform-ci.yml
new file mode 100644
index 0000000000..b3fd54c6a8
--- /dev/null
+++ b/.github/workflows/terraform-ci.yml
@@ -0,0 +1,45 @@
+name: Terraform CI
+
+on:
+ pull_request:
+ paths:
+ - 'terraform/**'
+ - '.github/workflows/terraform-ci.yml'
+ push:
+ paths:
+ - 'terraform/**'
+ - '.github/workflows/terraform-ci.yml'
+
+jobs:
+ validate:
+ name: Validate Terraform
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: terraform/
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Setup Terraform
+ uses: hashicorp/setup-terraform@v3
+ with:
+ terraform_version: "1.9.0"
+
+ - name: Terraform Format Check
+ run: terraform fmt -check -recursive
+
+ - name: Terraform Init
+ run: terraform init -backend=false
+
+ - name: Terraform Validate
+ run: terraform validate
+
+ - name: Setup TFLint
+ uses: terraform-linters/setup-tflint@v4
+ with:
+ tflint_version: latest
+
+ - name: Run TFLint
+ run: tflint --format compact
diff --git a/.gitignore b/.gitignore
index 30d74d2584..a770e5c7f1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,25 @@
-test
\ No newline at end of file
+# Terraform
+*.tfstate
+*.tfstate.*
+.terraform/
+.terraform.lock.hcl
+terraform.tfvars
+*.tfvars
+crash.log
+
+# Pulumi
+venv/
+pulumi/venv/
+pulumi/Pulumi.*.yaml
+__pycache__/
+*.pyc
+
+# Credentials
+*.pem
+*.key
+yc-key.json
+
+# Ansible
+*.retry
+ansible/.vault_pass
+ansible/inventory/*.pyc
\ No newline at end of file
diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000000..13566b81b0
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/DevOps-Core-Course.iml b/.idea/DevOps-Core-Course.iml
new file mode 100644
index 0000000000..5e764c4f0b
--- /dev/null
+++ b/.idea/DevOps-Core-Course.iml
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/copilot.data.migration.ask2agent.xml b/.idea/copilot.data.migration.ask2agent.xml
new file mode 100644
index 0000000000..1f2ea11e7f
--- /dev/null
+++ b/.idea/copilot.data.migration.ask2agent.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/material_theme_project_new.xml b/.idea/material_theme_project_new.xml
new file mode 100644
index 0000000000..bd7edbb134
--- /dev/null
+++ b/.idea/material_theme_project_new.xml
@@ -0,0 +1,10 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000000..71d30f3527
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000000..35eb1ddfbb
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg
new file mode 100644
index 0000000000..54b3683b8c
--- /dev/null
+++ b/ansible/ansible.cfg
@@ -0,0 +1,12 @@
+[defaults]
+inventory = inventory/hosts.ini
+roles_path = roles
+host_key_checking = False
+remote_user = ubuntu
+retry_files_enabled = False
+vault_password_file = .vault_pass
+
+[privilege_escalation]
+become = True
+become_method = sudo
+become_user = root
diff --git a/ansible/docs/LAB05.md b/ansible/docs/LAB05.md
new file mode 100644
index 0000000000..6643887aaa
--- /dev/null
+++ b/ansible/docs/LAB05.md
@@ -0,0 +1,503 @@
+# Lab 05 — Ansible Fundamentals
+
+## 1. Architecture Overview
+
+**Ansible version:** ansible-core 2.20.3
+**Control node OS:** Ubuntu 24.04 LTS (local machine)
+**Target VM OS:** Ubuntu 24.04 LTS (Yandex Cloud, recreated using Lab 4 Terraform code)
+**Cloud provider:** Yandex Cloud, zone ru-central1-a, VM public IP: 89.169.131.155
+
+### Role structure
+
+```
+ansible/
+├── inventory/
+│ ├── hosts.ini # Static inventory
+│ ├── group_vars/
+│ │ └── all.yml # Encrypted with Ansible Vault
+│ ├── yandex.yml # Dynamic inventory notes
+│ └── yandex_inventory.py # Dynamic inventory script (Yandex Cloud API)
+├── roles/
+│ ├── common/ # Basic system setup
+│ │ ├── tasks/main.yml
+│ │ └── defaults/main.yml
+│ ├── docker/ # Docker installation
+│ │ ├── tasks/main.yml
+│ │ ├── handlers/main.yml
+│ │ └── defaults/main.yml
+│ └── app_deploy/ # Run our Python app in Docker
+│ ├── tasks/main.yml
+│ ├── handlers/main.yml
+│ └── defaults/main.yml
+├── playbooks/
+│ ├── site.yml # All roles together
+│ ├── provision.yml # System setup only
+│ └── deploy.yml # App deployment only
+├── ansible.cfg
+└── docs/
+ └── LAB05.md
+```
+
+### Why roles instead of one big playbook?
+
+Roles keep things organized. Each role does one specific job. If I need Docker on another project, I just copy the `docker` role. A monolithic playbook would be one huge file that is hard to read and impossible to reuse. Roles are the professional way to write Ansible.
+
+---
+
+## 2. Roles Documentation
+
+### common role
+
+**Purpose:** Basic server setup that any Ubuntu server needs before anything else.
+
+**Tasks:**
+- Update apt package cache (with `cache_valid_time=3600` so it does not update if it was updated less than an hour ago)
+- Install essential packages: python3-pip, curl, git, vim, htop, wget, unzip
+- Set timezone to Europe/Moscow
+
+**Variables (`defaults/main.yml`):**
+```yaml
+common_packages:
+ - python3-pip
+ - curl
+ - git
+ - vim
+ - htop
+ - wget
+ - unzip
+
+common_timezone: "Europe/Moscow"
+```
+
+**Handlers:** None — apt installs do not require a service restart.
+
+**Dependencies:** None.
+
+---
+
+### docker role
+
+**Purpose:** Install Docker CE on Ubuntu following the official Docker installation steps, translated to Ansible tasks.
+
+**Tasks:**
+1. Install prerequisites (ca-certificates, curl, gnupg)
+2. Create `/etc/apt/keyrings` directory with correct permissions
+3. Download Docker's official GPG key
+4. Add Docker apt repository using `{{ ansible_distribution_release }}` fact (works on Ubuntu 22.04 and 24.04 without changes)
+5. Install docker-ce, docker-ce-cli, containerd.io, docker-buildx-plugin, docker-compose-plugin
+6. Start and enable Docker service
+7. Add `ubuntu` user to docker group
+8. Install python3-docker (required for Ansible's docker modules)
+
+**Variables (`defaults/main.yml`):**
+```yaml
+docker_user: ubuntu
+```
+
+**Handlers (`handlers/main.yml`):**
+```yaml
+- name: restart docker
+ service:
+ name: docker
+ state: restarted
+```
+Triggered when Docker packages are installed (via `notify: restart docker`).
+
+**Dependencies:** common role (apt cache should be updated first).
+
+---
+
+### app_deploy role
+
+**Purpose:** Pull the Python app Docker image from Docker Hub and run it as a container.
+
+**Tasks:**
+1. Log in to Docker Hub using vault credentials (`no_log: true` so password never appears in output)
+2. Pull Docker image
+3. Remove old container if it exists (idempotent cleanup)
+4. Start new container with port mapping `5000:5000` and `restart_policy: unless-stopped`
+5. Wait for port 5000 to open (confirms container started)
+6. Verify `/health` endpoint returns HTTP 200
+
+**Variables (`defaults/main.yml`):**
+```yaml
+app_port: 5000
+app_restart_policy: unless-stopped
+app_env_vars: {}
+```
+
+Variables from vault (`inventory/group_vars/all.yml`):
+```yaml
+dockerhub_username: blxxdclxud
+dockerhub_password:
+app_name: devops-info-service
+docker_image: "{{ dockerhub_username }}/{{ app_name }}"
+docker_image_tag: latest
+app_port: 5000
+app_container_name: "{{ app_name }}"
+```
+
+**Handlers (`handlers/main.yml`):**
+```yaml
+- name: restart app container
+ community.docker.docker_container:
+ name: "{{ app_container_name }}"
+ state: started
+ restart: yes
+```
+
+**Dependencies:** docker role.
+
+---
+
+## 3. Idempotency Demonstration
+
+### First run — `ansible-playbook playbooks/provision.yml`
+
+```
+PLAY [Provision web servers] ***************************************************
+
+TASK [Gathering Facts] *********************************************************
+ok: [lab-vm]
+
+TASK [common : Update apt cache] ***********************************************
+changed: [lab-vm]
+
+TASK [common : Install common packages] ****************************************
+changed: [lab-vm]
+
+TASK [common : Set timezone] ***************************************************
+changed: [lab-vm]
+
+TASK [docker : Install required packages for Docker repo] **********************
+ok: [lab-vm]
+
+TASK [docker : Create keyrings directory] **************************************
+ok: [lab-vm]
+
+TASK [docker : Add Docker GPG key] *********************************************
+changed: [lab-vm]
+
+TASK [docker : Add Docker repository] ******************************************
+changed: [lab-vm]
+
+TASK [docker : Install Docker packages] ****************************************
+changed: [lab-vm]
+
+TASK [docker : Ensure Docker service is started and enabled] *******************
+ok: [lab-vm]
+
+TASK [docker : Add user to docker group] ***************************************
+changed: [lab-vm]
+
+TASK [docker : Install python3-docker for Ansible docker modules] **************
+changed: [lab-vm]
+
+RUNNING HANDLER [docker : restart docker] **************************************
+changed: [lab-vm]
+
+PLAY RECAP *********************************************************************
+lab-vm : ok=13 changed=9 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+```
+
+9 tasks changed on first run because everything was installed fresh. The handler ran once at the end to restart Docker after packages were installed.
+
+### Second run — `ansible-playbook playbooks/provision.yml`
+
+```
+PLAY [Provision web servers] ***************************************************
+
+TASK [Gathering Facts] *********************************************************
+ok: [lab-vm]
+
+TASK [common : Update apt cache] ***********************************************
+ok: [lab-vm]
+
+TASK [common : Install common packages] ****************************************
+ok: [lab-vm]
+
+TASK [common : Set timezone] ***************************************************
+ok: [lab-vm]
+
+TASK [docker : Install required packages for Docker repo] **********************
+ok: [lab-vm]
+
+TASK [docker : Create keyrings directory] **************************************
+ok: [lab-vm]
+
+TASK [docker : Add Docker GPG key] *********************************************
+ok: [lab-vm]
+
+TASK [docker : Add Docker repository] ******************************************
+ok: [lab-vm]
+
+TASK [docker : Install Docker packages] ****************************************
+ok: [lab-vm]
+
+TASK [docker : Ensure Docker service is started and enabled] *******************
+ok: [lab-vm]
+
+TASK [docker : Add user to docker group] ***************************************
+ok: [lab-vm]
+
+TASK [docker : Install python3-docker for Ansible docker modules] **************
+ok: [lab-vm]
+
+PLAY RECAP *********************************************************************
+lab-vm : ok=12 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+```
+
+**changed=0 on second run.** The handler did not even trigger because no packages were reinstalled.
+
+### Analysis
+
+**What changed on first run:** Almost everything, because the VM was a fresh Ubuntu with no Docker or common packages.
+
+**What stayed `ok` on second run:**
+- `apt` module checks if each package is already at `state: present` — if yes, it does nothing
+- `file` module checks if the directory already has the correct permissions
+- `get_url` checks if the file already exists with the correct checksum
+- `apt_repository` checks if the repo line is already in the sources list
+- `service` checks if Docker is already started and enabled
+- `user` checks if ubuntu is already in the docker group
+
+**What makes our roles idempotent:** We use Ansible's declarative modules (`apt`, `service`, `file`, `user`, `get_url`, `apt_repository`) instead of `shell` or `command`. These modules always check current state before making a change. If the state already matches the desired state, they do nothing.
+
+---
+
+## 4. Ansible Vault Usage
+
+### How credentials are stored
+
+All sensitive data lives in `inventory/group_vars/all.yml`, which is encrypted with Ansible Vault. The file in git looks like this:
+
+```
+$ANSIBLE_VAULT;1.1;AES256
+32386331623939663963666531666434613830323232613238396234643063373738613764303939
+6235346663643761326237373864353263323335336336360a656439343563613939353830393938
+...
+```
+
+It is completely unreadable without the vault password.
+
+### Vault password management
+
+The vault password is stored in `ansible/.vault_pass` (plain text file). This file is in `.gitignore` so it never gets committed. The `ansible.cfg` points to it automatically:
+
+```ini
+vault_password_file = .vault_pass
+```
+
+### Commands used
+
+```bash
+# Encrypt the file after writing plaintext
+ansible-vault encrypt inventory/group_vars/all.yml --vault-password-file .vault_pass --encrypt-vault-id default
+
+# View encrypted file to verify content
+ansible-vault view inventory/group_vars/all.yml --vault-password-file .vault_pass
+
+# Edit encrypted file
+ansible-vault edit inventory/group_vars/all.yml
+```
+
+### Proof of encryption (ansible-vault view output)
+
+```
+---
+# Docker Hub credentials
+dockerhub_username: blxxdclxud
+dockerhub_password: dckr_pat_***************************
+
+# Application configuration
+app_name: devops-info-service
+docker_image: "{{ dockerhub_username }}/{{ app_name }}"
+docker_image_tag: latest
+app_port: 5000
+app_container_name: "{{ app_name }}"
+```
+
+### Why Ansible Vault is necessary
+
+If we committed the Docker Hub password to git, anyone with access to the repo could pull our images without permission. Vault encrypts with AES-256, so the encrypted file is safe to commit. The only secret that must be kept out of git is the vault password file itself (`.vault_pass`), which is in `.gitignore`.
+
+---
+
+## 5. Deployment Verification
+
+### Terminal output from `ansible-playbook playbooks/deploy.yml`
+
+```
+PLAY [Deploy application] ******************************************************
+
+TASK [Gathering Facts] *********************************************************
+ok: [lab-vm]
+
+TASK [app_deploy : Log in to Docker Hub] ***************************************
+ok: [lab-vm]
+
+TASK [app_deploy : Pull Docker image] ******************************************
+changed: [lab-vm]
+
+TASK [app_deploy : Remove old container if exists] *****************************
+ok: [lab-vm]
+
+TASK [app_deploy : Run application container] **********************************
+changed: [lab-vm]
+
+TASK [app_deploy : Wait for application to be ready] ***************************
+ok: [lab-vm]
+
+TASK [app_deploy : Verify health endpoint] *************************************
+ok: [lab-vm]
+
+TASK [app_deploy : Print health check result] **********************************
+ok: [lab-vm] => {
+ "msg": "App is healthy: 200"
+}
+
+PLAY RECAP *********************************************************************
+lab-vm : ok=8 changed=2 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+```
+
+### Container status — `ansible webservers -a "docker ps"`
+
+```
+lab-vm | CHANGED | rc=0 >>
+CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+7708f3ef9215 blxxdclxud/devops-info-service:latest "python -m uvicorn a…" 22 seconds ago Up 20 seconds (healthy) 0.0.0.0:5000->5000/tcp devops-info-service
+```
+
+### Health check — `curl http://89.169.131.155:5000/health`
+
+```json
+{"status":"healthy","timestamp":"2026-02-26T20:56:39.121836Z","uptime_seconds":19}
+```
+
+### Main endpoint — `curl http://89.169.131.155:5000/`
+
+```json
+{"service":{"name":"devops-info-service","version":"1.0.0","description":"DevOps course info service","framework":"FastAPI"},"system":{"hostname":"7708f3ef9215","platform":"Linux","platform_version":"Linux-6.8.0-100-generic-x86_64-with-glibc2.41","architecture":"x86_64","cpu_count":2,"python_version":"3.13.12"},"runtime":{"uptime_seconds":20,"uptime_human":"0 hours, 0 minutes","current_time":"2026-02-26T20:56:39.536715Z","timezone":"UTC"},"request":{"client_ip":"80.136.142.219","user_agent":"curl/8.5.0","method":"GET","path":"/"},"endpoints":[{"path":"/","method":"GET","description":"Service information"},{"path":"/health","method":"GET","description":"Health check"}]}
+```
+
+---
+
+## 6. Key Decisions
+
+**Why use roles instead of plain playbooks?**
+Roles split code into focused, reusable pieces. If I need to install Docker on a different project, I just copy the `docker` role. A plain playbook would be one huge file where everything is mixed together, making it hard to read and impossible to reuse across projects.
+
+**How do roles improve reusability?**
+Each role is self-contained with its own variables, handlers, and tasks. Other playbooks can include just the roles they need. For example, `provision.yml` uses `common` and `docker`, while `deploy.yml` only uses `app_deploy`. You can share roles via Ansible Galaxy.
+
+**What makes a task idempotent?**
+Using Ansible's built-in modules instead of shell commands. Modules like `apt`, `service`, `file`, and `user` check the current state first. They only make a change if the current state is different from the desired state. Running `apt: name=docker-ce state=present` ten times has the same result as running it once.
+
+**How do handlers improve efficiency?**
+Handlers only run once at the end of a play, even if notified multiple times. If ten tasks all notify `restart docker`, Docker restarts only once. Without handlers, you would either restart too many times or forget to restart at all.
+
+**Why is Ansible Vault necessary?**
+We need Docker Hub credentials to pull the private image. Storing them as plaintext in the repo is a security risk. Vault encrypts with AES-256 so the encrypted file is safe to commit. Only the vault password needs to stay secret, and we keep it out of git via `.gitignore`.
+
+---
+
+## 7. Challenges
+
+- The VM from Lab 4 was already destroyed, so it was recreated with `terraform apply` from existing Lab 4 code
+- `ansible_distribution_release` Ansible fact is needed in the Docker repo string to work on different Ubuntu versions automatically
+- `python3-docker` must be installed on the target VM for Ansible docker modules to work — easy to forget
+- `no_log: true` on the Docker Hub login task is required to prevent the password appearing in Ansible output
+- `ansible-core 2.20.3` has a regression where `group_vars` must be placed relative to the inventory file (in `inventory/group_vars/`), not just in the project root
+- The official `yandex.cloud` Ansible collection is not yet available on Ansible Galaxy for ansible-core 2.20.x, so dynamic inventory was implemented using a custom Python script with the `yandexcloud` SDK
+
+---
+
+## Bonus: Dynamic Inventory with Yandex Cloud
+
+### Why dynamic inventory?
+
+With static inventory, the VM IP must be updated manually every time the VM is recreated. With dynamic inventory, Ansible queries the Yandex Cloud API directly and always gets the current IP. If the VM is destroyed and recreated with a new IP, playbooks still work with no changes.
+
+### Setup
+
+**Install the Yandex Cloud Python SDK:**
+```bash
+pip install --break-system-packages yandexcloud grpcio
+```
+
+**Inventory script:** `inventory/yandex_inventory.py`
+
+The script:
+1. Loads the service account key (same JSON key used in Lab 4 Terraform)
+2. Calls the Yandex Compute API to list all instances in the folder
+3. Filters only RUNNING instances
+4. For each instance, extracts the public NAT IP
+5. Groups VMs with label `project=devops-lab04` into the `webservers` group
+6. Returns JSON in the Ansible dynamic inventory format
+
+### Authentication
+
+Same service account key file used in Lab 4 Terraform (`/home/blxxdclxud/yc-key.json`). The key file is in `.gitignore` on both Terraform and Ansible sides.
+
+### How cloud metadata maps to Ansible variables
+
+| Ansible variable | Yandex Cloud field |
+|---|---|
+| `ansible_host` | `network_interfaces[0].primary_v4_address.one_to_one_nat.address` |
+| `ansible_user` | hardcoded `ubuntu` (all VMs use this user) |
+| host group `webservers` | VMs with label `project=devops-lab04` |
+
+### Test — `ansible-inventory -i inventory/yandex_inventory.py --graph`
+
+```
+@all:
+ |--@ungrouped:
+ |--@webservers:
+ | |--lab-vm
+```
+
+### Test — `ansible all -i inventory/yandex_inventory.py -m ping`
+
+```
+lab-vm | SUCCESS => {
+ "ansible_facts": {
+ "discovered_interpreter_python": "/usr/bin/python3.12"
+ },
+ "changed": false,
+ "ping": "pong"
+}
+```
+
+### Run provision with dynamic inventory
+
+```
+PLAY [Provision web servers] ***************************************************
+
+TASK [Gathering Facts] *********************************************************
+ok: [lab-vm]
+
+TASK [common : Update apt cache] ***********************************************
+ok: [lab-vm]
+
+TASK [common : Install common packages] ****************************************
+ok: [lab-vm]
+
+...
+
+PLAY RECAP *********************************************************************
+lab-vm : ok=12 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+```
+
+### What happens when VM IP changes
+
+With static inventory (`hosts.ini`) I would have to find the new IP and update it manually. With the dynamic inventory script, Ansible queries the API every run and always gets the current IP automatically. Destroy and recreate the VM, playbooks still work with zero changes.
+
+### Benefits vs static inventory
+
+| Feature | Static (hosts.ini) | Dynamic (yandex_inventory.py) |
+|---|---|---|
+| IP management | Manual update | Automatic |
+| New VMs | Must add manually | Auto-discovered |
+| Scaling to 10+ VMs | Very tedious | Works instantly |
+| Deleted VMs | Must remove manually | Disappear automatically |
+| Source of truth | The file itself | The cloud API |
diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
new file mode 100644
index 0000000000..7c66f4a036
--- /dev/null
+++ b/ansible/group_vars/all.yml
@@ -0,0 +1,21 @@
+$ANSIBLE_VAULT;1.1;AES256
+32386331623939663963666531666434613830323232613238396234643063373738613764303939
+6235346663643761326237373864353263323335336336360a656439343563613939353830393938
+61646430643262363831646434323437336463353434653263363836656362343630626663663338
+6431396336626134310a656264653638383235616338356233653864303863646162363236626436
+64333537303362616437306433666437393932613337626434646463643562646438373531306637
+62303261373964333532316139363165383961643065646261343066373938393337666630656261
+30356437376234393830616634306163376438313030326162366461663733643733653439366132
+66326637393065636631306261323630663533643535666162373433326230373066633430643737
+63636335623965626630373730623336373234623764383261366464316537363664613837363733
+32663866323638643537323530313732656566313263666561343937383139623166343339353532
+63666238633836316539383461663731656433313133366363396366653063393338333138616235
+30616538646264313935323738313035663762663435333135323733616437613734633164646463
+65356634613135393464623735346264623764376266336230373866623433366464373239303263
+63326137643636633763653239663937326339613632666566643433663436633162356535386330
+30333635636637333836383433663665353234306339343561656164653261383833643138323263
+61383963343537646563623764336265646238653838356230363839613432616231663630353965
+30353862623337313939353531356230353033616130663336313031363034333262333631636564
+30653366316365303161376336663863616134373831313432666165303061333565396438383966
+61303939643933303237636234643065333738343834316461363233333531616330363535643331
+31356231653239373031
diff --git a/ansible/inventory/group_vars/all.yml b/ansible/inventory/group_vars/all.yml
new file mode 100644
index 0000000000..7c66f4a036
--- /dev/null
+++ b/ansible/inventory/group_vars/all.yml
@@ -0,0 +1,21 @@
+$ANSIBLE_VAULT;1.1;AES256
+32386331623939663963666531666434613830323232613238396234643063373738613764303939
+6235346663643761326237373864353263323335336336360a656439343563613939353830393938
+61646430643262363831646434323437336463353434653263363836656362343630626663663338
+6431396336626134310a656264653638383235616338356233653864303863646162363236626436
+64333537303362616437306433666437393932613337626434646463643562646438373531306637
+62303261373964333532316139363165383961643065646261343066373938393337666630656261
+30356437376234393830616634306163376438313030326162366461663733643733653439366132
+66326637393065636631306261323630663533643535666162373433326230373066633430643737
+63636335623965626630373730623336373234623764383261366464316537363664613837363733
+32663866323638643537323530313732656566313263666561343937383139623166343339353532
+63666238633836316539383461663731656433313133366363396366653063393338333138616235
+30616538646264313935323738313035663762663435333135323733616437613734633164646463
+65356634613135393464623735346264623764376266336230373866623433366464373239303263
+63326137643636633763653239663937326339613632666566643433663436633162356535386330
+30333635636637333836383433663665353234306339343561656164653261383833643138323263
+61383963343537646563623764336265646238653838356230363839613432616231663630353965
+30353862623337313939353531356230353033616130663336313031363034333262333631636564
+30653366316365303161376336663863616134373831313432666165303061333565396438383966
+61303939643933303237636234643065333738343834316461363233333531616330363535643331
+31356231653239373031
diff --git a/ansible/inventory/hosts.ini b/ansible/inventory/hosts.ini
new file mode 100644
index 0000000000..75b7ef1a14
--- /dev/null
+++ b/ansible/inventory/hosts.ini
@@ -0,0 +1,5 @@
+[webservers]
+lab-vm ansible_host=89.169.131.155 ansible_user=ubuntu ansible_ssh_private_key_file=~/.ssh/id_ed25519
+
+[webservers:vars]
+ansible_python_interpreter=/usr/bin/python3
diff --git a/ansible/inventory/yandex.yml b/ansible/inventory/yandex.yml
new file mode 100644
index 0000000000..9bea87b325
--- /dev/null
+++ b/ansible/inventory/yandex.yml
@@ -0,0 +1,19 @@
+# Yandex Cloud Dynamic Inventory Configuration
+# This file documents the dynamic inventory approach.
+# The actual inventory script is: inventory/yandex_inventory.py
+#
+# Usage:
+# ansible-inventory -i inventory/yandex_inventory.py --graph
+# ansible all -i inventory/yandex_inventory.py -m ping
+# ansible-playbook -i inventory/yandex_inventory.py playbooks/provision.yml
+#
+# Authentication:
+# Uses service account key file: /home/blxxdclxud/yc-key.json
+# Same key used by Terraform in Lab 4.
+#
+# How VMs are discovered:
+# - Queries Yandex Cloud Compute API for folder_id: b1ga4ttr9f92otmhh4cc
+# - Filters only RUNNING instances
+# - Sets ansible_host to public NAT IP
+# - Sets ansible_user to "ubuntu"
+# - Groups VMs with label project=devops-lab04 into "webservers" group
diff --git a/ansible/inventory/yandex_inventory.py b/ansible/inventory/yandex_inventory.py
new file mode 100755
index 0000000000..ca49eec4f5
--- /dev/null
+++ b/ansible/inventory/yandex_inventory.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python3
+"""
+Dynamic inventory script for Yandex Cloud.
+Queries the Yandex Compute API and returns running VMs as Ansible inventory.
+
+Usage:
+ ansible -i inventory/yandex_inventory.py all -m ping
+ ansible-inventory -i inventory/yandex_inventory.py --graph
+"""
+
+import json
+import os
+import sys
+
+import grpc
+import yandexcloud
+from yandex.cloud.compute.v1.instance_service_pb2 import ListInstancesRequest
+from yandex.cloud.compute.v1.instance_service_pb2_grpc import InstanceServiceStub
+
+FOLDER_ID = "b1ga4ttr9f92otmhh4cc"
+SERVICE_ACCOUNT_KEY_FILE = "/home/blxxdclxud/yc-key.json"
+SSH_USER = "ubuntu"
+SSH_KEY = "~/.ssh/id_ed25519"
+
+
+def get_instances():
+ with open(SERVICE_ACCOUNT_KEY_FILE) as f:
+ sa_key = json.load(f)
+
+ sdk = yandexcloud.SDK(service_account_key=sa_key)
+ instance_service = sdk.client(InstanceServiceStub)
+
+ response = instance_service.List(ListInstancesRequest(folder_id=FOLDER_ID))
+ return [i for i in response.instances if i.status == 2] # 2 = RUNNING
+
+
+def build_inventory(instances):
+ hostvars = {}
+ webservers = []
+
+ for instance in instances:
+ name = instance.name
+ public_ip = None
+
+ for iface in instance.network_interfaces:
+ if iface.primary_v4_address.one_to_one_nat.address:
+ public_ip = iface.primary_v4_address.one_to_one_nat.address
+ break
+
+ if not public_ip:
+ continue
+
+ hostvars[name] = {
+ "ansible_host": public_ip,
+ "ansible_user": SSH_USER,
+ "ansible_ssh_private_key_file": SSH_KEY,
+ "yc_labels": dict(instance.labels),
+ }
+
+ if instance.labels.get("project", "") == "devops-lab04":
+ webservers.append(name)
+
+ return {
+ "all": {
+ "hosts": list(hostvars.keys()),
+ },
+ "webservers": {
+ "hosts": webservers,
+ },
+ "_meta": {
+ "hostvars": hostvars,
+ },
+ }
+
+
+if __name__ == "__main__":
+ if "--list" in sys.argv:
+ instances = get_instances()
+ inventory = build_inventory(instances)
+ print(json.dumps(inventory, indent=2))
+ elif "--host" in sys.argv:
+ print(json.dumps({}))
+ else:
+ print(json.dumps({}))
diff --git a/ansible/playbooks/deploy.yml b/ansible/playbooks/deploy.yml
new file mode 100644
index 0000000000..56850a7585
--- /dev/null
+++ b/ansible/playbooks/deploy.yml
@@ -0,0 +1,7 @@
+---
+- name: Deploy application
+ hosts: webservers
+ become: yes
+
+ roles:
+ - app_deploy
diff --git a/ansible/playbooks/provision.yml b/ansible/playbooks/provision.yml
new file mode 100644
index 0000000000..f53efb0248
--- /dev/null
+++ b/ansible/playbooks/provision.yml
@@ -0,0 +1,8 @@
+---
+- name: Provision web servers
+ hosts: webservers
+ become: yes
+
+ roles:
+ - common
+ - docker
diff --git a/ansible/playbooks/site.yml b/ansible/playbooks/site.yml
new file mode 100644
index 0000000000..d42c17fd27
--- /dev/null
+++ b/ansible/playbooks/site.yml
@@ -0,0 +1,9 @@
+---
+- name: Full site setup
+ hosts: webservers
+ become: yes
+
+ roles:
+ - common
+ - docker
+ - app_deploy
diff --git a/ansible/roles/app_deploy/defaults/main.yml b/ansible/roles/app_deploy/defaults/main.yml
new file mode 100644
index 0000000000..9ffffde25e
--- /dev/null
+++ b/ansible/roles/app_deploy/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+app_port: 5000
+app_restart_policy: unless-stopped
+app_env_vars: {}
diff --git a/ansible/roles/app_deploy/handlers/main.yml b/ansible/roles/app_deploy/handlers/main.yml
new file mode 100644
index 0000000000..73deea15ef
--- /dev/null
+++ b/ansible/roles/app_deploy/handlers/main.yml
@@ -0,0 +1,6 @@
+---
+- name: restart app container
+ community.docker.docker_container:
+ name: "{{ app_container_name }}"
+ state: started
+ restart: yes
diff --git a/ansible/roles/app_deploy/tasks/main.yml b/ansible/roles/app_deploy/tasks/main.yml
new file mode 100644
index 0000000000..9671a977a3
--- /dev/null
+++ b/ansible/roles/app_deploy/tasks/main.yml
@@ -0,0 +1,43 @@
+---
+- name: Log in to Docker Hub
+ community.docker.docker_login:
+ username: "{{ dockerhub_username }}"
+ password: "{{ dockerhub_password }}"
+ no_log: true
+
+- name: Pull Docker image
+ community.docker.docker_image:
+ name: "{{ docker_image }}:{{ docker_image_tag }}"
+ source: pull
+
+- name: Remove old container if exists
+ community.docker.docker_container:
+ name: "{{ app_container_name }}"
+ state: absent
+
+- name: Run application container
+ community.docker.docker_container:
+ name: "{{ app_container_name }}"
+ image: "{{ docker_image }}:{{ docker_image_tag }}"
+ state: started
+ restart_policy: "{{ app_restart_policy }}"
+ ports:
+ - "{{ app_port }}:{{ app_port }}"
+ env: "{{ app_env_vars }}"
+
+- name: Wait for application to be ready
+ wait_for:
+ port: "{{ app_port }}"
+ host: localhost
+ delay: 3
+ timeout: 30
+
+- name: Verify health endpoint
+ uri:
+ url: "http://localhost:{{ app_port }}/health"
+ status_code: 200
+ register: health_check
+
+- name: Print health check result
+ debug:
+ msg: "App is healthy: {{ health_check.status }}"
diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml
new file mode 100644
index 0000000000..802083bd51
--- /dev/null
+++ b/ansible/roles/common/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+common_packages:
+ - python3-pip
+ - curl
+ - git
+ - vim
+ - htop
+ - wget
+ - unzip
+
+common_timezone: "Europe/Moscow"
diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml
new file mode 100644
index 0000000000..55926f77e9
--- /dev/null
+++ b/ansible/roles/common/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+- name: Update apt cache
+ apt:
+ update_cache: yes
+ cache_valid_time: 3600
+
+- name: Install common packages
+ apt:
+ name: "{{ common_packages }}"
+ state: present
+
+- name: Set timezone
+ timezone:
+ name: "{{ common_timezone }}"
diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml
new file mode 100644
index 0000000000..e64d3b7e66
--- /dev/null
+++ b/ansible/roles/docker/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+docker_user: ubuntu
diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml
new file mode 100644
index 0000000000..3627303e6b
--- /dev/null
+++ b/ansible/roles/docker/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: restart docker
+ service:
+ name: docker
+ state: restarted
diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml
new file mode 100644
index 0000000000..ec6be60672
--- /dev/null
+++ b/ansible/roles/docker/tasks/main.yml
@@ -0,0 +1,58 @@
+---
+- name: Install required packages for Docker repo
+ apt:
+ name:
+ - ca-certificates
+ - curl
+ - gnupg
+ state: present
+
+- name: Create keyrings directory
+ file:
+ path: /etc/apt/keyrings
+ state: directory
+ mode: "0755"
+
+- name: Add Docker GPG key
+ get_url:
+ url: https://download.docker.com/linux/ubuntu/gpg
+ dest: /etc/apt/keyrings/docker.asc
+ mode: "0644"
+
+- name: Add Docker repository
+ apt_repository:
+ repo: >-
+ deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.asc]
+ https://download.docker.com/linux/ubuntu
+ {{ ansible_distribution_release }} stable
+ state: present
+ filename: docker
+
+- name: Install Docker packages
+ apt:
+ name:
+ - docker-ce
+ - docker-ce-cli
+ - containerd.io
+ - docker-buildx-plugin
+ - docker-compose-plugin
+ state: present
+ update_cache: yes
+ notify: restart docker
+
+- name: Ensure Docker service is started and enabled
+ service:
+ name: docker
+ state: started
+ enabled: yes
+
+- name: Add user to docker group
+ user:
+ name: "{{ docker_user }}"
+ groups: docker
+ append: yes
+
+- name: Install python3-docker for Ansible docker modules
+ apt:
+ name: python3-docker
+ state: present
diff --git a/docs/LAB04.md b/docs/LAB04.md
new file mode 100644
index 0000000000..c8e021d48d
--- /dev/null
+++ b/docs/LAB04.md
@@ -0,0 +1,474 @@
+# Lab 04 — Infrastructure as Code (Terraform & Pulumi)
+
+## 1. Cloud Provider & Infrastructure
+
+**Cloud Provider:** Yandex Cloud
+
+**Why:** Yandex Cloud is recommended by the course for Russia. It has a free trial grant, good Terraform/Pulumi support, and does not require VPN.
+
+**Instance Configuration:**
+- Platform: standard-v2
+- vCPU: 2 (core fraction 20%)
+- RAM: 1 GB
+- Disk: 10 GB HDD
+- OS: Ubuntu 24.04 LTS
+- Zone: ru-central1-a
+
+**Total cost:** 0₽ (free grant)
+
+**Resources created:**
+- VPC Network (`lab-network`)
+- Subnet (`lab-subnet`, 10.0.1.0/24)
+- Security Group (SSH port 22, HTTP port 80, app port 5000)
+- Compute Instance (`lab-vm` with public IP)
+
+---
+
+## 2. Terraform Implementation
+
+**Terraform version:** 1.9+
+
+**Project structure:**
+```
+terraform/
+├── .gitignore # Ignores state, credentials, .terraform/
+├── main.tf # Provider, network, subnet, security group, VM
+├── variables.tf # Input variables (cloud_id, folder_id, zone, etc.)
+├── outputs.tf # VM public IP, VM ID, SSH command
+└── terraform.tfvars # Actual values (gitignored)
+```
+
+**Key decisions:**
+- Used variables for all configurable values so nothing is hardcoded
+- Used outputs to display public IP and SSH command after apply
+- Used `.gitignore` to keep secrets and state out of git
+- Used labels for resource identification
+- Security group allows only required ports (22, 80, 5000)
+
+**Challenges:**
+- Had to find the correct Ubuntu 24.04 image ID for Yandex Cloud
+- Needed to set up service account and authorized key for authentication
+
+### Terminal Output
+
+Initializing the backend...
+Initializing provider plugins...
+- Finding latest version of yandex-cloud/yandex...
+- Installing yandex-cloud/yandex v0.186.0...
+- Installed yandex-cloud/yandex v0.186.0 (unauthenticated)
+
+Terraform has been successfully initialized!
+
+Terraform used the selected providers to generate the following execution plan. Resource actions
+are indicated with the following symbols:
+ + create
+
+Terraform will perform the following actions:
+
+ # yandex_compute_instance.lab_vm will be created
+ + resource "yandex_compute_instance" "lab_vm" {
+ + created_at = (known after apply)
+ + folder_id = (known after apply)
+ + fqdn = (known after apply)
+ + gpu_cluster_id = (known after apply)
+ + hardware_generation = (known after apply)
+ + hostname = (known after apply)
+ + id = (known after apply)
+ + labels = {
+ + "project" = "devops-lab04"
+ + "task" = "terraform"
+ }
+ + metadata = {
+ + "ssh-keys" = <<-EOT
+ ubuntu:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKfBnyjaKsKyiGkHXoSmRrJW1zewQEhVJxjqrKrRT11r ramazanatzuf10@gmail.com
+ EOT
+ }
+ + name = "lab-vm"
+ + network_acceleration_type = "standard"
+ + platform_id = "standard-v2"
+ + status = (known after apply)
+ + zone = "ru-central1-a"
+
+ + boot_disk {
+ + auto_delete = true
+ + device_name = (known after apply)
+ + disk_id = (known after apply)
+ + mode = (known after apply)
+
+ + initialize_params {
+ + block_size = (known after apply)
+ + description = (known after apply)
+ + image_id = "fd8p685sjqdraf7mpkuc"
+ + name = (known after apply)
+ + size = 10
+ + snapshot_id = (known after apply)
+ + type = "network-hdd"
+ }
+ }
+
+ + metadata_options (known after apply)
+
+ + network_interface {
+ + index = (known after apply)
+ + ip_address = (known after apply)
+ + ipv4 = true
+ + ipv6 = (known after apply)
+ + ipv6_address = (known after apply)
+ + mac_address = (known after apply)
+ + nat = true
+ + nat_ip_address = (known after apply)
+ + nat_ip_version = (known after apply)
+ + security_group_ids = (known after apply)
+ + subnet_id = (known after apply)
+ }
+
+ + resources {
+ + core_fraction = 20
+ + cores = 2
+ + memory = 1
+ }
+ }
+
+ # yandex_vpc_network.lab_network will be created
+ + resource "yandex_vpc_network" "lab_network" {
+ + created_at = (known after apply)
+ + default_security_group_id = (known after apply)
+ + folder_id = (known after apply)
+ + id = (known after apply)
+ + labels = (known after apply)
+ + name = "lab-network"
+ + subnet_ids = (known after apply)
+ }
+
+ # yandex_vpc_security_group.lab_sg will be created
+ + resource "yandex_vpc_security_group" "lab_sg" {
+ + created_at = (known after apply)
+ + folder_id = (known after apply)
+ + id = (known after apply)
+ + labels = (known after apply)
+ + name = "lab-security-group"
+ + network_id = (known after apply)
+ + status = (known after apply)
+
+ + egress {
+ + description = "Allow all outbound"
+ + from_port = -1
+ + id = (known after apply)
+ + labels = (known after apply)
+ + port = -1
+ + protocol = "ANY"
+ + to_port = -1
+ + v4_cidr_blocks = [
+ + "0.0.0.0/0",
+ ]
+ }
+
+ + ingress {
+ + description = "Allow HTTP"
+ + from_port = -1
+ + id = (known after apply)
+ + labels = (known after apply)
+ + port = 80
+ + protocol = "TCP"
+ + to_port = -1
+ + v4_cidr_blocks = [
+ + "0.0.0.0/0",
+ ]
+ }
+ + ingress {
+ + description = "Allow SSH"
+ + from_port = -1
+ + id = (known after apply)
+ + labels = (known after apply)
+ + port = 22
+ + protocol = "TCP"
+ + to_port = -1
+ + v4_cidr_blocks = [
+ + "0.0.0.0/0",
+ ]
+ }
+ + ingress {
+ + description = "Allow app port 5000"
+ + from_port = -1
+ + id = (known after apply)
+ + labels = (known after apply)
+ + port = 5000
+ + protocol = "TCP"
+ + to_port = -1
+ + v4_cidr_blocks = [
+ + "0.0.0.0/0",
+ ]
+ }
+ }
+
+ # yandex_vpc_subnet.lab_subnet will be created
+ + resource "yandex_vpc_subnet" "lab_subnet" {
+ + created_at = (known after apply)
+ + folder_id = (known after apply)
+ + id = (known after apply)
+ + labels = (known after apply)
+ + name = "lab-subnet"
+ + network_id = (known after apply)
+ + v4_cidr_blocks = [
+ + "10.0.1.0/24",
+ ]
+ + zone = "ru-central1-a"
+ }
+
+Plan: 4 to add, 0 to change, 0 to destroy.
+
+yandex_vpc_network.lab_network: Creating...
+yandex_vpc_network.lab_network: Creation complete after 14s [id=enptvj5mcvlei7hrv83s]
+yandex_vpc_subnet.lab_subnet: Creating...
+yandex_vpc_security_group.lab_sg: Creating...
+yandex_vpc_subnet.lab_subnet: Creation complete after 1s [id=e9b8rovrospd6deptkhc]
+yandex_vpc_security_group.lab_sg: Creation complete after 4s [id=enp9sdve5bv9nuua560j]
+yandex_compute_instance.lab_vm: Creating...
+yandex_compute_instance.lab_vm: Creation complete after 45s [id=fhmkce8lk639oi5g0s9n]
+
+Apply complete! Resources: 4 added, 0 changed, 0 destroyed.
+
+Outputs:
+
+ssh_connection = "ssh -i ~/.ssh/id_ed25519 ubuntu@89.169.135.233"
+vm_id = "fhmkce8lk639oi5g0s9n"
+vm_public_ip = "89.169.135.233"
+```
+
+**SSH connection proof:**
+
+
+---
+
+## 3. Pulumi Implementation
+
+**Pulumi version:** 3.x
+**Language:** Python
+
+**Project structure:**
+```
+pulumi/
+├── .gitignore # Ignores venv/, stack configs, __pycache__/
+├── __main__.py # Main infrastructure code (same as Terraform)
+├── Pulumi.yaml # Project metadata
+└── requirements.txt # Python dependencies
+```
+
+**How code differs from Terraform:**
+- Written in Python instead of HCL
+- Resources are Python objects, not HCL blocks
+- Configuration uses `pulumi.Config()` instead of `variable` blocks
+- Outputs use `pulumi.export()` instead of `output` blocks
+- Can use normal Python features (file reading, string formatting)
+
+**Advantages discovered:**
+- Familiar Python syntax, easier to read
+- Can use regular Python code for logic (reading SSH key file, etc.)
+- Better IDE support with autocomplete and type checking
+- Secrets are encrypted by default
+
+**Challenges:**
+- Pulumi requires a backend for state (used `--local` for simplicity)
+- Python virtual environment setup adds extra steps
+- Smaller community, fewer examples online
+
+### Terminal Output
+
+**terraform destroy (cleanup before Pulumi):**
+```
+Plan: 0 to add, 0 to change, 4 to destroy.
+
+Changes to Outputs:
+ - ssh_connection = "ssh -i ~/.ssh/id_ed25519 ubuntu@89.169.135.233" -> null
+ - vm_id = "fhmkce8lk639oi5g0s9n" -> null
+ - vm_public_ip = "89.169.135.233" -> null
+yandex_compute_instance.lab_vm: Destroying... [id=fhmkce8lk639oi5g0s9n]
+yandex_compute_instance.lab_vm: Still destroying... [id=fhmkce8lk639oi5g0s9n, 00m10s elapsed]
+yandex_compute_instance.lab_vm: Still destroying... [id=fhmkce8lk639oi5g0s9n, 00m20s elapsed]
+yandex_compute_instance.lab_vm: Still destroying... [id=fhmkce8lk639oi5g0s9n, 00m30s elapsed]
+yandex_compute_instance.lab_vm: Destruction complete after 37s
+yandex_vpc_subnet.lab_subnet: Destroying... [id=e9b8rovrospd6deptkhc]
+yandex_vpc_security_group.lab_sg: Destroying... [id=enp9sdve5bv9nuua560j]
+yandex_vpc_security_group.lab_sg: Destruction complete after 1s
+yandex_vpc_subnet.lab_subnet: Destruction complete after 5s
+yandex_vpc_network.lab_network: Destroying... [id=enptvj5mcvlei7hrv83s]
+yandex_vpc_network.lab_network: Destruction complete after 1s
+
+Destroy complete! Resources: 4 destroyed.
+
+```
+
+**pulumi preview:**
+```
+Previewing update (dev):
+ Type Name Plan Info
+ pulumi:pulumi:Stack lab04-pulumi-dev 1 message
+
+Diagnostics:
+ pulumi:pulumi:Stack (lab04-pulumi-dev):
+ DEBUG: Using Python: /home/blxxdclxud/assignments/DevOps-Core-Course/pulumi/venv/bin/python3
+
+Resources:
+ 5 unchanged
+
+```
+
+Current stack is dev:
+ Managed by blxxdclxud-BOM-WXX9
+ Last updated: 21 seconds ago (2026-02-15 23:54:56.071594762 +0300 MSK)
+ Pulumi version used: v3.220.0
+
+Current stack resources (6):
+ TYPE NAME
+ pulumi:pulumi:Stack lab04-pulumi-dev
+ ├─ yandex:index/vpcNetwork:VpcNetwork lab-network
+ ├─ yandex:index/vpcSubnet:VpcSubnet lab-subnet
+ ├─ yandex:index/vpcSecurityGroup:VpcSecurityGroup lab-security-group
+ ├─ yandex:index/computeInstance:ComputeInstance lab-vm
+ └─ pulumi:providers:yandex default_0_13_0
+
+Current stack outputs (3):
+ OUTPUT VALUE
+ ssh_connection ssh -i ~/.ssh/id_ed25519 ubuntu@89.169.135.37
+ vm_id fhm49b6u8mk4vvd265sk
+ vm_public_ip 89.169.135.37
+```
+
+**SSH connection proof:**
+
+
+---
+
+## 4. Terraform vs Pulumi Comparison
+
+**Ease of Learning:** Terraform was easier to learn because HCL syntax is simple and there are many examples online. Pulumi requires you to know a programming language, but if you already know Python, it feels more natural.
+
+**Code Readability:** I find Pulumi more readable because it is regular Python code. Terraform HCL is also readable but has its own special syntax that you need to learn. For simple infrastructure both look clean.
+
+**Debugging:** Terraform was easier to debug because error messages are clear and `terraform plan` shows exactly what will happen. Pulumi errors sometimes mix Python errors with infrastructure errors which can be confusing.
+
+**Documentation:** Terraform has better documentation because it has a bigger community. The Terraform Registry has detailed docs for every provider. Pulumi docs are good but have fewer examples.
+
+**Use Case:** I would use Terraform for simple, standard infrastructure where I don't need complex logic. I would use Pulumi for projects where I need loops, conditions, or want to reuse code with functions and classes.
+
+---
+
+## 5. Lab 5 Preparation & Cleanup
+
+**VM for Lab 5:** I will recreate the VM using Terraform code when needed for Lab 5. The code is ready in the repository.
+
+**Cleanup Status:** All resources were destroyed after completing the lab.
+
+```
+Previewing destroy (dev):
+ Type Name Plan
+ - pulumi:pulumi:Stack lab04-pulumi-dev delete
+ - ├─ yandex:index:VpcSubnet lab-subnet delete
+ - ├─ yandex:index:VpcSecurityGroup lab-security-group delete
+ - ├─ yandex:index:VpcNetwork lab-network delete
+ - └─ yandex:index:ComputeInstance lab-vm delete
+
+Outputs:
+ - ssh_connection: "ssh -i ~/.ssh/id_ed25519 ubuntu@89.169.135.37"
+ - vm_id : "fhm49b6u8mk4vvd265sk"
+ - vm_public_ip : "89.169.135.37"
+
+Resources:
+ - 5 to delete
+
+Destroying (dev):
+ Type Name Status
+ - pulumi:pulumi:Stack lab04-pulumi-dev deleted (0.00s)
+ - ├─ yandex:index:ComputeInstance lab-vm deleted (38s)
+ - ├─ yandex:index:VpcSubnet lab-subnet deleted (5s)
+ - ├─ yandex:index:VpcSecurityGroup lab-security-group deleted (1s)
+ - └─ yandex:index:VpcNetwork lab-network deleted (0.56s)
+
+Outputs:
+ - ssh_connection: "ssh -i ~/.ssh/id_ed25519 ubuntu@89.169.135.37"
+ - vm_id : "fhm49b6u8mk4vvd265sk"
+ - vm_public_ip : "89.169.135.37"
+
+Resources:
+ - 5 deleted
+
+Duration: 46s
+
+```
+
+---
+
+## Bonus: IaC CI/CD
+
+Created `.github/workflows/terraform-ci.yml` that automatically validates Terraform code on pull requests.
+
+**Path filters:** The workflow only triggers on changes to `terraform/**` files and the workflow file itself. This prevents unnecessary CI runs when other files change.
+
+**Steps:**
+1. `terraform fmt -check` — checks code formatting
+2. `terraform init -backend=false` — initializes without backend (no credentials needed)
+3. `terraform validate` — checks syntax and configuration
+4. `tflint` — lints for best practices and common errors
+
+**Workflow run proof:**
+
+---
+
+## Bonus: GitHub Repository Import
+
+### What is `terraform import`?
+
+`terraform import` lets you bring existing infrastructure under Terraform management. This is useful when you have resources that were created manually (through web console or CLI) and you want to manage them with code now.
+
+### Import Process
+
+1. Created `terraform/github/main.tf` with GitHub provider and `github_repository` resource
+2. Ran `terraform init` to install the GitHub provider
+3. Ran `terraform import github_repository.course_repo DevOps-Core-Course`
+4. Ran `terraform plan` to verify state matches reality
+
+### Terminal Output
+
+**terraform import:**
+```
+$ terraform import github_repository.course_repo DevOps-Core-Course
+
+github_repository.course_repo: Importing from ID "DevOps-Core-Course"...
+github_repository.course_repo: Import prepared!
+ Prepared github_repository for import
+github_repository.course_repo: Refreshing state... [id=DevOps-Core-Course]
+
+Import successful!
+
+The resources that were imported are shown above. These resources are now in
+your Terraform state and will henceforth be managed by Terraform.
+```
+
+**terraform plan (after import):**
+```
+Terraform will perform the following actions:
+
+ # github_repository.course_repo will be updated in-place
+ ~ resource "github_repository" "course_repo" {
+ ~ description = "🚀Production-grade DevOps course..." -> "DevOps Core Course lab assignments"
+ - has_downloads = true -> null
+ ~ has_issues = false -> true
+ ~ has_projects = true -> false
+ ~ has_wiki = true -> false
+ id = "DevOps-Core-Course"
+ name = "DevOps-Core-Course"
+ }
+
+Plan: 0 to add, 1 to change, 0 to destroy.
+```
+
+**Drift Analysis:** The plan shows that the actual repository state (with wiki/projects enabled) differs from our minimal Terraform configuration. To sync them, we would either update our `main.tf` to match reality or `apply` to enforce the new configuration. This demonstrates how Terraform detects configuration drift.
+
+### Why Importing Matters
+
+- **Version control:** Track all changes to infrastructure in Git
+- **Consistency:** Prevent configuration drift — everyone sees the same config
+- **Automation:** Changes go through code review before applying
+- **Documentation:** Code is living documentation of your infrastructure
+- **Disaster recovery:** Can recreate everything from code if something breaks
+- **Team collaboration:** Multiple people can work on infrastructure without conflicts
diff --git a/docs/image-1.png b/docs/image-1.png
new file mode 100644
index 0000000000..f9e45b6df0
Binary files /dev/null and b/docs/image-1.png differ
diff --git a/docs/image-2.png b/docs/image-2.png
new file mode 100644
index 0000000000..ff142a2652
Binary files /dev/null and b/docs/image-2.png differ
diff --git a/docs/image.png b/docs/image.png
new file mode 100644
index 0000000000..52c3b35b0d
Binary files /dev/null and b/docs/image.png differ
diff --git a/pulumi/.gitignore b/pulumi/.gitignore
new file mode 100644
index 0000000000..274d25ec36
--- /dev/null
+++ b/pulumi/.gitignore
@@ -0,0 +1,4 @@
+venv/
+__pycache__/
+*.pyc
+Pulumi.*.yaml
diff --git a/pulumi/Pulumi.yaml b/pulumi/Pulumi.yaml
new file mode 100644
index 0000000000..e79f304ef7
--- /dev/null
+++ b/pulumi/Pulumi.yaml
@@ -0,0 +1,4 @@
+name: lab04-pulumi
+runtime:
+ name: python
+description: Lab 04 - Yandex Cloud VM with Pulumi
diff --git a/pulumi/__main__.py b/pulumi/__main__.py
new file mode 100644
index 0000000000..34157f721f
--- /dev/null
+++ b/pulumi/__main__.py
@@ -0,0 +1,92 @@
+import sys
+print(f"DEBUG: Using Python: {sys.executable}")
+# print(f"DEBUG: Sys Path: {sys.path}")
+
+import pulumi
+import pulumi_yandex as yandex
+
+config = pulumi.Config()
+folder_id = config.require("folder_id")
+zone = config.get("zone") or "ru-central1-a"
+image_id = config.get("image_id") or "fd8p685sjqdraf7mpkuc"
+ssh_public_key_path = config.get("ssh_public_key_path") or "~/.ssh/id_ed25519.pub"
+
+import os
+ssh_key_path = os.path.expanduser(ssh_public_key_path)
+with open(ssh_key_path, "r") as f:
+ ssh_public_key = f.read().strip()
+
+# Network
+network = yandex.VpcNetwork("lab-network")
+
+# Subnet
+subnet = yandex.VpcSubnet("lab-subnet",
+ zone=zone,
+ network_id=network.id,
+ v4_cidr_blocks=["10.0.1.0/24"])
+
+# Security group
+security_group = yandex.VpcSecurityGroup("lab-security-group",
+ network_id=network.id,
+ ingresses=[
+ yandex.VpcSecurityGroupIngressArgs(
+ protocol="TCP",
+ port=22,
+ v4_cidr_blocks=["0.0.0.0/0"],
+ description="Allow SSH",
+ ),
+ yandex.VpcSecurityGroupIngressArgs(
+ protocol="TCP",
+ port=80,
+ v4_cidr_blocks=["0.0.0.0/0"],
+ description="Allow HTTP",
+ ),
+ yandex.VpcSecurityGroupIngressArgs(
+ protocol="TCP",
+ port=5000,
+ v4_cidr_blocks=["0.0.0.0/0"],
+ description="Allow app port 5000",
+ ),
+ ],
+ egresses=[
+ yandex.VpcSecurityGroupEgressArgs(
+ protocol="ANY",
+ v4_cidr_blocks=["0.0.0.0/0"],
+ description="Allow all outbound",
+ ),
+ ])
+
+# VM (free tier: 2 vCPU 20%, 1GB RAM, 10GB HDD)
+instance = yandex.ComputeInstance("lab-vm",
+ platform_id="standard-v2",
+ zone=zone,
+ resources=yandex.ComputeInstanceResourcesArgs(
+ cores=2,
+ memory=1,
+ core_fraction=20,
+ ),
+ boot_disk=yandex.ComputeInstanceBootDiskArgs(
+ initialize_params=yandex.ComputeInstanceBootDiskInitializeParamsArgs(
+ image_id=image_id,
+ size=10,
+ type="network-hdd",
+ ),
+ ),
+ network_interfaces=[yandex.ComputeInstanceNetworkInterfaceArgs(
+ subnet_id=subnet.id,
+ nat=True,
+ security_group_ids=[security_group.id],
+ )],
+ metadata={
+ "ssh-keys": f"ubuntu:{ssh_public_key}",
+ },
+ labels={
+ "project": "devops-lab04",
+ "task": "pulumi",
+ })
+
+pulumi.export("vm_public_ip", instance.network_interfaces[0].nat_ip_address)
+pulumi.export("vm_id", instance.id)
+pulumi.export("ssh_connection",
+ instance.network_interfaces[0].nat_ip_address.apply(
+ lambda ip: f"ssh -i ~/.ssh/id_ed25519 ubuntu@{ip}"))
diff --git a/pulumi/requirements.txt b/pulumi/requirements.txt
new file mode 100644
index 0000000000..6e47e4b09a
--- /dev/null
+++ b/pulumi/requirements.txt
@@ -0,0 +1,3 @@
+pulumi>=3.0.0
+pulumi-yandex>=0.13.0
+setuptools==69.5.1
diff --git a/pulumi/up.log b/pulumi/up.log
new file mode 100644
index 0000000000..c90db7258f
--- /dev/null
+++ b/pulumi/up.log
@@ -0,0 +1,53 @@
+Previewing update (dev):
+
+@ previewing update....
+ + pulumi:pulumi:Stack lab04-pulumi-dev create
+ + yandex:index:VpcNetwork lab-network create
+ + yandex:index:VpcSubnet lab-subnet create
+ + yandex:index:VpcSecurityGroup lab-security-group create
+ + yandex:index:ComputeInstance lab-vm create
+ + pulumi:pulumi:Stack lab04-pulumi-dev create DEBUG: Using Python: /home/blxxdclxud/assignments/DevOps-Core-Course/pulumi/venv/bin/python3
+ + pulumi:pulumi:Stack lab04-pulumi-dev create 1 message
+Diagnostics:
+ pulumi:pulumi:Stack (lab04-pulumi-dev):
+ DEBUG: Using Python: /home/blxxdclxud/assignments/DevOps-Core-Course/pulumi/venv/bin/python3
+
+Outputs:
+ ssh_connection: [unknown]
+ vm_id : [unknown]
+ vm_public_ip : [unknown]
+
+Resources:
+ + 5 to create
+
+Updating (dev):
+
+@ updating....
+ + pulumi:pulumi:Stack lab04-pulumi-dev creating (0s)
+ + yandex:index:VpcNetwork lab-network creating (0s)
+@ updating........
+ + yandex:index:VpcNetwork lab-network created (5s)
+ + yandex:index:VpcSubnet lab-subnet creating (0s)
+ + yandex:index:VpcSecurityGroup lab-security-group creating (0s)
+@ updating....
+ + yandex:index:VpcSubnet lab-subnet created (0.69s)
+@ updating.....
+ + yandex:index:VpcSecurityGroup lab-security-group created (2s)
+ + yandex:index:ComputeInstance lab-vm creating (0s)
+^C received; cancelling. If you would like to terminate immediately, press ^C again.
+Note that terminating immediately may lead to orphaned resources and other inconsistencies.
+
+@ updating...........................................
+ + yandex:index:ComputeInstance lab-vm created (40s)
+ + pulumi:pulumi:Stack lab04-pulumi-dev creating (48s) error: update canceled
+ + pulumi:pulumi:Stack lab04-pulumi-dev **creating failed** 1 error
+Diagnostics:
+ pulumi:pulumi:Stack (lab04-pulumi-dev):
+ error: update canceled
+
+Resources:
+ + 5 created
+ 1 errored
+
+Duration: 50s
+
diff --git a/terraform/.gitignore b/terraform/.gitignore
new file mode 100644
index 0000000000..9a7b5a76e8
--- /dev/null
+++ b/terraform/.gitignore
@@ -0,0 +1,10 @@
+.terraform/
+.terraform.lock.hcl
+*.tfstate
+*.tfstate.*
+terraform.tfvars
+*.tfvars
+crash.log
+*.pem
+*.key
+*.json
diff --git a/terraform/github/.gitignore b/terraform/github/.gitignore
new file mode 100644
index 0000000000..491a4e6e8b
--- /dev/null
+++ b/terraform/github/.gitignore
@@ -0,0 +1,6 @@
+.terraform/
+.terraform.lock.hcl
+*.tfstate
+*.tfstate.*
+terraform.tfvars
+*.tfvars
diff --git a/terraform/github/main.tf b/terraform/github/main.tf
new file mode 100644
index 0000000000..49aae1b204
--- /dev/null
+++ b/terraform/github/main.tf
@@ -0,0 +1,26 @@
+terraform {
+ required_providers {
+ github = {
+ source = "integrations/github"
+ version = "~> 5.0"
+ }
+ }
+}
+
+provider "github" {
+ token = var.github_token
+}
+
+resource "github_repository" "course_repo" {
+ name = "DevOps-Core-Course"
+ description = "DevOps Core Course lab assignments"
+ visibility = "public"
+
+ has_issues = true
+ has_wiki = false
+ has_projects = false
+
+ allow_merge_commit = true
+ allow_squash_merge = true
+ allow_rebase_merge = true
+}
diff --git a/terraform/github/variables.tf b/terraform/github/variables.tf
new file mode 100644
index 0000000000..ac03f764b0
--- /dev/null
+++ b/terraform/github/variables.tf
@@ -0,0 +1,5 @@
+variable "github_token" {
+ description = "GitHub Personal Access Token"
+ type = string
+ sensitive = true
+}
diff --git a/terraform/main.tf b/terraform/main.tf
new file mode 100644
index 0000000000..31c7769ff2
--- /dev/null
+++ b/terraform/main.tf
@@ -0,0 +1,98 @@
+terraform {
+ required_providers {
+ yandex = {
+ source = "yandex-cloud/yandex"
+ version = ">= 0.87.0"
+ }
+ }
+ required_version = ">= 1.0"
+}
+
+provider "yandex" {
+ service_account_key_file = var.service_account_key_file
+ cloud_id = var.cloud_id
+ folder_id = var.folder_id
+ zone = var.zone
+}
+
+# Network
+resource "yandex_vpc_network" "lab_network" {
+ name = "lab-network"
+}
+
+# Subnet
+resource "yandex_vpc_subnet" "lab_subnet" {
+ name = "lab-subnet"
+ zone = var.zone
+ network_id = yandex_vpc_network.lab_network.id
+ v4_cidr_blocks = ["10.0.1.0/24"]
+}
+
+# Security group
+resource "yandex_vpc_security_group" "lab_sg" {
+ name = "lab-security-group"
+ network_id = yandex_vpc_network.lab_network.id
+
+ ingress {
+ protocol = "TCP"
+ port = 22
+ v4_cidr_blocks = ["0.0.0.0/0"]
+ description = "Allow SSH"
+ }
+
+ ingress {
+ protocol = "TCP"
+ port = 80
+ v4_cidr_blocks = ["0.0.0.0/0"]
+ description = "Allow HTTP"
+ }
+
+ ingress {
+ protocol = "TCP"
+ port = 5000
+ v4_cidr_blocks = ["0.0.0.0/0"]
+ description = "Allow app port 5000"
+ }
+
+ egress {
+ protocol = "ANY"
+ v4_cidr_blocks = ["0.0.0.0/0"]
+ description = "Allow all outbound"
+ }
+}
+
+# Compute instance (free tier: 2 vCPU 20%, 1GB RAM, 10GB HDD)
+resource "yandex_compute_instance" "lab_vm" {
+ name = "lab-vm"
+ platform_id = "standard-v2"
+ zone = var.zone
+
+ resources {
+ cores = 2
+ memory = 1
+ core_fraction = 20
+ }
+
+ boot_disk {
+ initialize_params {
+ image_id = var.image_id
+ size = 10
+ type = "network-hdd"
+ }
+ }
+
+ network_interface {
+ subnet_id = yandex_vpc_subnet.lab_subnet.id
+ nat = true
+ security_group_ids = [yandex_vpc_security_group.lab_sg.id]
+ }
+
+ metadata = {
+ ssh-keys = "ubuntu:${file(var.ssh_public_key_path)}"
+ }
+
+ labels = {
+ project = "devops-lab04"
+ task = "terraform"
+ }
+}
diff --git a/terraform/outputs.tf b/terraform/outputs.tf
new file mode 100644
index 0000000000..9668d66fac
--- /dev/null
+++ b/terraform/outputs.tf
@@ -0,0 +1,14 @@
+output "vm_public_ip" {
+ description = "Public IP address of the VM"
+ value = yandex_compute_instance.lab_vm.network_interface[0].nat_ip_address
+}
+
+output "vm_id" {
+ description = "ID of the created VM"
+ value = yandex_compute_instance.lab_vm.id
+}
+
+output "ssh_connection" {
+ description = "SSH connection command"
+ value = "ssh -i ~/.ssh/id_ed25519 ubuntu@${yandex_compute_instance.lab_vm.network_interface[0].nat_ip_address}"
+}
diff --git a/terraform/variables.tf b/terraform/variables.tf
new file mode 100644
index 0000000000..c319aed35c
--- /dev/null
+++ b/terraform/variables.tf
@@ -0,0 +1,33 @@
+variable "cloud_id" {
+ description = "Yandex Cloud ID"
+ type = string
+}
+
+variable "folder_id" {
+ description = "Yandex Cloud Folder ID"
+ type = string
+}
+
+variable "zone" {
+ description = "Yandex Cloud zone"
+ type = string
+ default = "ru-central1-a"
+}
+
+variable "service_account_key_file" {
+ description = "Path to service account key JSON file"
+ type = string
+ default = "~/yc-key.json"
+}
+
+variable "ssh_public_key_path" {
+ description = "Path to SSH public key"
+ type = string
+ default = "~/.ssh/id_ed25519.pub"
+}
+
+variable "image_id" {
+ description = "Boot disk image ID (Ubuntu 24.04 LTS)"
+ type = string
+ default = "fd8p685sjqdraf7mpkuc"
+}