diff --git a/.github/markdown-link-check-config.json b/.github/markdown-link-check-config.json index 726dd5c..7068688 100644 --- a/.github/markdown-link-check-config.json +++ b/.github/markdown-link-check-config.json @@ -2,6 +2,12 @@ "ignorePatterns": [ { "pattern": "^https://analytics.episkopos.community" + }, + { + "pattern": "^https://dev.episkopos.community" + }, + { + "pattern": "^http://api-server" } ], "timeout": "20s", diff --git a/.github/workflows/check-external-links.yml b/.github/workflows/check-external-links.yml index 45ea2af..674fdd5 100644 --- a/.github/workflows/check-external-links.yml +++ b/.github/workflows/check-external-links.yml @@ -40,12 +40,15 @@ jobs: --timeout 30000 \ --retry \ --concurrency 5 \ - --skip 'analytics.episkopos.community' \ + --skip 'analytics.episkopos.community|dev.episkopos.community' \ --format json > linkinator-report.json || true - # Parse results for summary + # Parse results — only count external broken links (skip internal + # relative paths that fail because linkinator can't resolve + # extensionless Docusaurus URLs from the filesystem; Docusaurus + # validates internal links at build time) if [ -f linkinator-report.json ]; then - BROKEN=$(jq '[.links[] | select(.status >= 400 or .status == 0)] | length' linkinator-report.json) + BROKEN=$(jq '[.links[] | select((.status >= 400 or .status == 0) and (.url | startswith("http")))] | length' linkinator-report.json) TOTAL=$(jq '.links | length' linkinator-report.json) echo "broken_count=$BROKEN" >> $GITHUB_OUTPUT echo "total_count=$TOTAL" >> $GITHUB_OUTPUT @@ -58,7 +61,7 @@ jobs: echo "" >> $GITHUB_STEP_SUMMARY if [ -f linkinator-report.json ]; then - BROKEN=$(jq '[.links[] | select(.status >= 400 or .status == 0)]' linkinator-report.json) + BROKEN=$(jq '[.links[] | select((.status >= 400 or .status == 0) and (.url | startswith("http")))]' linkinator-report.json) BROKEN_COUNT=$(echo "$BROKEN" | jq 'length') if [ "$BROKEN_COUNT" -gt 0 ]; then diff --git a/.github/workflows/check-readme-links.yml b/.github/workflows/check-readme-links.yml index a56fdda..262df9b 100644 --- a/.github/workflows/check-readme-links.yml +++ b/.github/workflows/check-readme-links.yml @@ -29,4 +29,4 @@ jobs: use-verbose-mode: 'yes' config-file: '.github/markdown-link-check-config.json' folder-path: 'docs,prompts,curriculum' - file-path: './README.md,./CONTRIBUTING.md' + file-path: './README.md' diff --git a/curriculum/README.md b/curriculum/README.md index ebccced..6e9e0a3 100644 --- a/curriculum/README.md +++ b/curriculum/README.md @@ -55,4 +55,14 @@ A structured learning path for AI-assisted software development. --- +## Part V: Infrastructure & Operations + +| Module | Topic | Status | +|--------|-------|--------| +| 19 | [Containerization & Orchestration](https://dev.episkopos.community/docs/curriculum/part-5-infrastructure/containerization-and-orchestration) | ✓ | +| 20 | Observability & Reliability | Planned | +| 21 | Infrastructure as Code | Planned | + +--- + *See the [full documentation](https://dev.episkopos.community/docs/curriculum) for complete module content, exercises, and examples.* diff --git a/curriculum/part-3-building-applications/15-deployed-app.md b/curriculum/part-3-building-applications/15-deployed-app.md index 9e0de69..ea18d6e 100644 --- a/curriculum/part-3-building-applications/15-deployed-app.md +++ b/curriculum/part-3-building-applications/15-deployed-app.md @@ -27,6 +27,10 @@ A production-deployed chat app with: 4 exercises covering deployment setup, environment configuration, CI/CD pipelines, and health monitoring. +## What's Next + +Want more control over your deployment? [Module 19: Containerization & Orchestration](../part-5-infrastructure-and-operations/19-containerization-and-orchestration.md) covers Docker and Kubernetes — the same tools used by teams at every scale. + --- *See the full module for step-by-step deployment guides.* diff --git a/curriculum/part-5-infrastructure-and-operations/19-containerization-and-orchestration.md b/curriculum/part-5-infrastructure-and-operations/19-containerization-and-orchestration.md new file mode 100644 index 0000000..12eff90 --- /dev/null +++ b/curriculum/part-5-infrastructure-and-operations/19-containerization-and-orchestration.md @@ -0,0 +1,1186 @@ +# Module 19: Containerization & Orchestration + +**From "works on my machine" to "runs anywhere" — Docker, Kubernetes, and the infrastructure mental model for modern software.** + +--- + +## Learning Objectives + +By the end of this module, you will: + +- Understand why containers exist and what problems they solve +- Write Dockerfiles and build container images +- Run multi-container applications with Docker Compose +- Understand Kubernetes primitives and what role each plays +- Evaluate the K8s-first development stack (kind, Tilt, k3s) +- Articulate when containerization is the right choice + +**Time**: 4-5 hours (reading + exercises) + +--- + +## Introduction + +In Part III, you deployed your chat app to managed platforms like Vercel and Railway. You pushed code, the platform built it, and gave you a URL. That works — until it doesn't. + +What happens when: +- Your app needs a specific version of Node, plus a Redis cache, plus a PostgreSQL database? +- A new teammate joins and spends a full day getting the project running locally? +- Your staging environment behaves differently from production? +- You need to run 10 copies of your backend behind a load balancer? + +These are **environment problems** — and containers solve them. + +A container packages your application with everything it needs to run: code, runtime, libraries, system tools. If it runs in the container, it runs the same way everywhere — your laptop, your teammate's laptop, staging, production. + +This module covers two layers: + +1. **Docker** — How to build and run containers (the practical foundation) +2. **Kubernetes** — How to orchestrate containers at scale (the awareness layer) + +We'll spend most of our time on Docker because it's the skill you'll use daily. Kubernetes gets enough coverage to build intuition — so when the topic comes up in your team, you can ask the right questions and follow the conversation. + +--- + +## Architecture: Containers vs. Traditional Deployment + +### Traditional Deployment (Module 15) + +``` +Your Laptop Production Server +├── Node v20 ├── Node v18 (different!) +├── npm packages ├── npm packages (maybe different) +├── OS: macOS ├── OS: Ubuntu Linux +└── Your app code └── Your app code + └── "works here" └── "crashes there" +``` + +### Container-Based Deployment + +``` +Your Laptop Production Server +├── Docker Engine ├── Docker Engine (or K8s) +└── Container └── Container (identical image) + ├── Node v20 ├── Node v20 + ├── npm packages ├── npm packages + ├── Alpine Linux ├── Alpine Linux + └── Your app code └── Your app code + └── "works here" └── "works here too" +``` + +**Key insight**: The container IS the environment. Ship the container, ship the guarantee. + +--- + +## Part 1: The Problem Containers Solve + +### "Works On My Machine" + +Every developer has heard (or said) this. The root cause is always the same: **the environment is different**. + +Differences that break things: +- Operating system (macOS vs. Linux vs. Windows) +- Language runtime version (Node 18 vs. 20) +- System libraries (OpenSSL 1.1 vs. 3.0) +- Environment variables (missing or different values) +- File paths and permissions +- Installed tools and their versions + +### Before Containers: Virtual Machines + +Virtual machines (VMs) solved this by running an entire operating system inside another operating system. You could ship a VM image and guarantee identical environments. + +The problem: VMs are heavy. Each one runs a full OS kernel, needs gigabytes of RAM, and takes minutes to start. + +``` +VM Approach: +┌──────────────────────────────────┐ +│ Host OS (your laptop) │ +│ ┌────────────────────────────┐ │ +│ │ VM (full guest OS) │ │ +│ │ ┌──────────────────────┐ │ │ +│ │ │ Your App │ │ │ +│ │ │ + Runtime │ │ │ +│ │ │ + Libraries │ │ │ +│ │ └──────────────────────┘ │ │ +│ │ Full Linux Kernel │ │ +│ │ Full OS Utilities │ │ +│ │ ~1-2 GB overhead │ │ +│ └────────────────────────────┘ │ +└──────────────────────────────────┘ +``` + +### Containers: Lightweight Isolation + +Containers share the host OS kernel but isolate everything above it. They package just your application and its dependencies — no redundant OS. + +``` +Container Approach: +┌──────────────────────────────────┐ +│ Host OS + Container Runtime │ +│ ┌────────────┐ ┌────────────┐ │ +│ │ Container A │ │ Container B │ │ +│ │ Your App │ │ Database │ │ +│ │ + Runtime │ │ + Config │ │ +│ │ ~100 MB │ │ ~200 MB │ │ +│ └────────────┘ └────────────┘ │ +│ Shared OS Kernel │ +└──────────────────────────────────┘ +``` + +**Result**: Start in seconds, use megabytes instead of gigabytes, run dozens on a single laptop. + +### Containers Are Not VMs + +This distinction matters: + +| | Virtual Machine | Container | +|---|---|---| +| Isolation | Full OS | Process-level | +| Size | Gigabytes | Megabytes | +| Startup | Minutes | Seconds | +| Overhead | High (full kernel) | Minimal (shared kernel) | +| Use case | Different OS needs | Application packaging | + +Containers use Linux kernel features (namespaces and cgroups) to isolate processes without the overhead of running a separate kernel. This is why Docker containers are Linux-native — on macOS and Windows, Docker runs a lightweight Linux VM under the hood to provide that kernel. + +--- + +## Part 2: Docker Fundamentals + +### Key Concepts + +**Image**: A read-only template containing your application and everything it needs to run. Think of it as a snapshot — a frozen, portable environment. + +**Container**: A running instance of an image. You can run multiple containers from the same image. Each gets its own isolated filesystem, network, and process space. + +**Dockerfile**: A text file with instructions for building an image. It's the recipe. + +**Registry**: A storage service for images. Docker Hub is the default public registry. Your team might use a private one (GitHub Container Registry, AWS ECR, etc.). + +``` +Dockerfile → docker build → Image → docker run → Container +(recipe) (cook) (dish) (serve) (running) +``` + +### Writing a Dockerfile + +Here's a Dockerfile for the chat app's Express backend: + +```dockerfile +# Start from a base image with Node.js installed +FROM node:20-alpine + +# Set the working directory inside the container +WORKDIR /app + +# Copy package files first (for better caching) +COPY package.json package-lock.json ./ + +# Install dependencies +RUN npm ci --only=production + +# Copy the rest of the application code +COPY . . + +# Tell Docker which port the app listens on +EXPOSE 3001 + +# The command to run when the container starts +CMD ["node", "src/index.js"] +``` + +Let's break down each instruction: + +| Instruction | What it does | +|---|---| +| `FROM` | Base image to build on. `node:20-alpine` = Node.js 20 on Alpine Linux (tiny) | +| `WORKDIR` | Sets the working directory for subsequent instructions | +| `COPY` | Copies files from your machine into the image | +| `RUN` | Executes a command during the build (here: install dependencies) | +| `EXPOSE` | Documents which port the app uses (doesn't actually publish it) | +| `CMD` | The default command when a container starts from this image | + +### Building an Image + +```bash +docker build -t chat-backend:latest . +``` + +- `-t chat-backend:latest` — Tags the image with a name and version +- `.` — Build context (current directory, where the Dockerfile is) + +### Running a Container + +```bash +docker run -p 3001:3001 -e NODE_ENV=production chat-backend:latest +``` + +- `-p 3001:3001` — Map host port 3001 to container port 3001 +- `-e NODE_ENV=production` — Set an environment variable +- `chat-backend:latest` — The image to run + +Your app is now running inside a container, accessible at `localhost:3001`. + +### Image Layers and Caching + +Each Dockerfile instruction creates a **layer**. Docker caches layers and only rebuilds from the point where something changed. This is why we copy `package.json` before the application code: + +```dockerfile +# These layers are cached if package.json hasn't changed +COPY package.json package-lock.json ./ +RUN npm ci --only=production + +# Only this layer rebuilds when you change application code +COPY . . +``` + +If you only changed a source file, Docker reuses the cached `npm ci` layer — saving minutes on every build. + +### The .dockerignore File + +Like `.gitignore`, but for Docker builds. Keeps unnecessary files out of your image: + +``` +node_modules +.git +.env +*.md +.DS_Store +``` + +This matters because `COPY . .` copies everything in the build context. Without `.dockerignore`, you'd copy `node_modules` (then install them again), `.git` history, and other waste. + +### Multi-Stage Builds + +For the React frontend, you need to build static files but don't need the build tools in production: + +```dockerfile +# Stage 1: Build +FROM node:20-alpine AS build +WORKDIR /app +COPY package.json package-lock.json ./ +RUN npm ci +COPY . . +RUN npm run build + +# Stage 2: Serve +FROM caddy:2-alpine +COPY Caddyfile /etc/caddy/Caddyfile +COPY --from=build /app/dist /srv +EXPOSE 80 +``` + +**Stage 1** installs all dependencies (including dev) and builds the app. **Stage 2** copies only the built files into a lightweight Caddy image. The final image is tiny — just Caddy and your static files, no Node.js, no source code, no `node_modules`. + +Caddy needs a minimal config file (`Caddyfile`) for SPA routing: + +``` +:80 { + root * /srv + try_files {path} /index.html + file_server +} +``` + +`try_files` ensures that client-side routes (like `/chat/room/1`) serve `index.html` instead of returning 404. + +--- + +## Part 3: Docker Compose — Multi-Container Applications + +Real applications aren't a single container. Your chat app needs at least a frontend and a backend. Add a database, a cache, a message queue — suddenly you're managing many containers that need to talk to each other. + +### The Problem + +Running containers individually: + +```bash +# Create a network so containers can communicate +docker network create chat-app + +# Start the database +docker run -d --name postgres --network chat-app \ + -e POSTGRES_PASSWORD=secret \ + postgres:16-alpine + +# Start the backend +docker run -d --name backend --network chat-app \ + -e DATABASE_URL=postgresql://postgres:secret@postgres:5432/chat \ + -p 3001:3001 \ + chat-backend:latest + +# Start the frontend +docker run -d --name frontend --network chat-app \ + -p 8080:80 \ + chat-frontend:latest +``` + +This is tedious, error-prone, and hard to reproduce. Docker Compose solves it. + +### docker-compose.yml + +```yaml +services: + traefik: + image: traefik:v3.2 + command: + - "--providers.docker=true" + - "--providers.docker.exposedbydefault=false" + - "--entrypoints.web.address=:80" + ports: + - "8080:80" + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + + frontend: + build: + context: ./client + labels: + - "traefik.enable=true" + - "traefik.http.routers.frontend.rule=PathPrefix(`/`)" + - "traefik.http.routers.frontend.priority=1" + - "traefik.http.services.frontend.loadbalancer.server.port=80" + depends_on: + - backend + + backend: + build: + context: ./server + environment: + NODE_ENV: production + DATABASE_URL: postgresql://postgres:secret@db:5432/chat + labels: + - "traefik.enable=true" + - "traefik.http.routers.backend.rule=PathPrefix(`/api`)" + - "traefik.http.routers.backend.priority=2" + - "traefik.http.services.backend.loadbalancer.server.port=3001" + depends_on: + - db + + db: + image: postgres:16-alpine + environment: + POSTGRES_DB: chat + POSTGRES_PASSWORD: secret + volumes: + - pgdata:/var/lib/postgresql/data + ports: + - "5432:5432" + +volumes: + pgdata: +``` + +Traefik acts as a reverse proxy, using Docker labels to discover services and route traffic. Requests to `/api/*` go to the backend (priority 2), everything else goes to the frontend (priority 1). The frontend uses `/api` as its API base URL — same origin, no CORS needed. + +### Running the Full Stack + +```bash +# Start everything +docker compose up + +# Start in background +docker compose up -d + +# View logs +docker compose logs -f backend + +# Stop everything +docker compose down + +# Stop and remove data volumes +docker compose down -v +``` + +One command. Every service starts, connects to the same network, and can find each other by service name (`backend` can reach `db` at hostname `db`). + +### Key Compose Concepts + +**Services**: Each entry under `services:` becomes a container. The name (`frontend`, `backend`, `db`) becomes the hostname on the internal network. + +**build vs. image**: Use `build` to build from a local Dockerfile, `image` to pull a pre-built image from a registry. + +**depends_on**: Controls startup order. `backend` waits for `db` to start (but not necessarily to be ready — that's an important distinction). + +**volumes**: Persist data outside the container. Without `pgdata`, your database would lose all data when the container stops. + +**ports**: Map `host:container` ports. `"5432:5432"` makes the database accessible from your host machine at `localhost:5432`. + +**environment**: Set environment variables. For sensitive values, use a `.env` file: + +```yaml +backend: + env_file: + - .env +``` + +### Compose for Development + +Compose is particularly valuable for local development. You can override settings for dev: + +```yaml +# docker-compose.override.yml (automatically loaded) +services: + backend: + volumes: + - ./server/src:/app/src # Mount source code for live changes + environment: + NODE_ENV: development + command: ["npx", "nodemon", "src/index.js"] +``` + +Now changes to your source code are reflected immediately inside the container — no rebuild needed. + +--- + +## Part 4: Volumes, Networks, and State + +### Volumes: Persistent Data + +Containers are **ephemeral** — when they stop, any data written inside them disappears. Volumes solve this. + +```yaml +volumes: + pgdata: # Named volume — Docker manages the storage location + +services: + db: + image: postgres:16-alpine + volumes: + - pgdata:/var/lib/postgresql/data # Persist database files +``` + +Three types of mounts: + +| Type | Syntax | Use Case | +|---|---|---| +| Named volume | `pgdata:/data` | Database storage, persistent data | +| Bind mount | `./src:/app/src` | Development (live code changes) | +| tmpfs | `tmpfs: /tmp` | Temporary data (never written to disk) | + +### Networks: Container Communication + +Docker Compose creates a default network for all services. Containers reach each other by service name: + +```javascript +// Inside the backend container, "db" resolves to the database container +const pool = new Pool({ + connectionString: 'postgresql://postgres:secret@db:5432/chat' + // ^^ service name +}); +``` + +You don't need to know IP addresses. Docker's internal DNS handles it. + +### Managing State Across Restarts + +```bash +# Data survives container restarts +docker compose down # Containers removed, volumes kept +docker compose up -d # New containers, same data + +# Nuclear option — remove everything including data +docker compose down -v # -v removes volumes too +``` + +--- + +## Part 5: Why Orchestration? + +Docker Compose works well for development and simple deployments. But production systems have requirements that Compose alone can't meet. + +### The Scaling Problem + +``` +docker compose up --scale backend=3 +``` + +This starts 3 backend containers, but: +- How does traffic get distributed between them? +- What if one crashes? Who restarts it? +- How do you update without downtime? +- What if you need containers spread across multiple servers? + +### What Orchestration Provides + +An orchestrator manages containers across a cluster of machines. You declare what you want ("run 3 copies of my backend, always"), and the orchestrator makes it happen. + +| Concern | Docker Compose | Orchestrator (K8s) | +|---|---|---| +| Scaling | Manual (`--scale`) | Automatic (CPU/memory rules) | +| Self-healing | None (container dies, stays dead) | Restarts automatically | +| Load balancing | Not built in | Built in | +| Rolling updates | Stop all, start all | Zero-downtime updates | +| Multi-server | Single host only | Cluster of machines | +| Service discovery | DNS by container name | DNS + advanced routing | + +### Kubernetes: The Industry Standard + +Kubernetes (often written K8s — K, 8 middle letters, s) is the dominant container orchestrator. Originally designed at Google, now open source and maintained by the Cloud Native Computing Foundation (CNCF). + +You don't need to master Kubernetes right now. But as a developer working on containerized applications, you need to understand its vocabulary and mental model so you can: + +- Read and modify deployment manifests +- Understand what your platform team is talking about +- Debug issues in staging and production environments +- Make informed architectural decisions + +--- + +## Part 6: Kubernetes Primitives + +Kubernetes has a lot of concepts, but the core ones you'll encounter daily are fewer than you think. Each solves a specific problem. + +### The Mental Model + +Kubernetes works on **declarative state**: you describe what you want, and Kubernetes continuously works to make reality match your description. If a container crashes, Kubernetes notices the mismatch and creates a new one. + +``` +You declare: "I want 3 copies of my backend running" +Kubernetes sees: 2 running (one crashed) +Kubernetes acts: Starts a new one +Result: 3 running again +``` + +### Pod + +The smallest deployable unit. A Pod wraps one or more containers that share storage and network. In practice, most Pods contain a single container. + +```yaml +# You rarely write Pod manifests directly — Deployments manage them +apiVersion: v1 +kind: Pod +metadata: + name: chat-backend +spec: + containers: + - name: backend + image: chat-backend:latest + ports: + - containerPort: 3001 +``` + +**Why it matters**: When someone says "the pod is crashing," they mean your container is failing to start or run. `kubectl logs ` is how you see what went wrong. + +### Deployment + +Manages a set of identical Pods. Handles scaling, updates, and self-healing. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: chat-backend +spec: + replicas: 3 # Run 3 copies + selector: + matchLabels: + app: chat-backend + template: # Pod template + metadata: + labels: + app: chat-backend + spec: + containers: + - name: backend + image: chat-backend:latest + ports: + - containerPort: 3001 + envFrom: + - configMapRef: + name: backend-config +``` + +**Key behavior**: If you update the image tag and apply the manifest, Kubernetes performs a **rolling update** — starting new Pods before stopping old ones, ensuring zero downtime. + +### StatefulSet + +Like a Deployment, but for workloads that need stable identity and persistent storage — primarily databases. + +```yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres +spec: + serviceName: postgres + replicas: 1 + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: postgres:16-alpine + volumeMounts: + - name: pgdata + mountPath: /var/lib/postgresql/data + volumeClaimTemplates: + - metadata: + name: pgdata + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi +``` + +**Deployment vs. StatefulSet**: Use Deployments for stateless services (your API, frontend). Use StatefulSets for stateful workloads (databases, caches) that need stable network identities and persistent volumes. + +### Service + +Provides a stable network endpoint for a set of Pods. Since Pods are ephemeral (they come and go), you need something permanent to point to. + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: chat-backend +spec: + selector: + app: chat-backend # Routes to all Pods with this label + ports: + - port: 80 # Service port + targetPort: 3001 # Container port + type: ClusterIP # Internal only (default) +``` + +Other Pods in the cluster can now reach the backend at `http://chat-backend:80`. The Service load-balances across all matching Pods automatically. + +**Service types**: +- `ClusterIP` — Internal only (default, most common) +- `NodePort` — Exposes on each node's IP at a static port +- `LoadBalancer` — Provisions an external load balancer (cloud providers) + +### Ingress + +Routes external HTTP traffic to internal Services. This is how the outside world reaches your app. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: chat-ingress +spec: + rules: + - host: chat.example.com + http: + paths: + - path: /api + pathType: Prefix + backend: + service: + name: chat-backend + port: + number: 80 + - path: / + pathType: Prefix + backend: + service: + name: chat-frontend + port: + number: 80 +``` + +**How it works**: An Ingress Controller (like Traefik, which is built into k3s) reads these rules and configures routing. `chat.example.com/api/*` goes to your backend Service, everything else goes to your frontend Service. + +### ConfigMap + +Stores non-sensitive configuration as key-value pairs, decoupled from your container image. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: backend-config +data: + NODE_ENV: "production" + CORS_ORIGIN: "https://chat.example.com" + LOG_LEVEL: "info" +``` + +Referenced by Pods via `envFrom` (load all keys as env vars) or `env` (load specific keys). Change the ConfigMap and restart the Pod — no image rebuild needed. + +For **sensitive** values (passwords, API keys), use a `Secret` instead of a ConfigMap. Secrets are base64-encoded and can be encrypted at rest. + +### Kustomize + +A tool (built into `kubectl`) for managing Kubernetes manifests across environments without templating. Instead of one massive YAML file with `if/else` logic, you write a clean base and overlay environment-specific changes. + +``` +k8s/ +├── base/ +│ ├── kustomization.yaml +│ ├── deployment.yaml +│ ├── service.yaml +│ └── configmap.yaml +└── overlays/ + ├── development/ + │ └── kustomization.yaml # Overrides for dev + ├── staging/ + │ └── kustomization.yaml # Overrides for staging + └── production/ + └── kustomization.yaml # Overrides for prod +``` + +Base `kustomization.yaml`: +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - deployment.yaml + - service.yaml + - configmap.yaml +``` + +Production overlay `kustomization.yaml`: +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../../base +patches: + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: chat-backend + spec: + replicas: 3 +``` + +Apply to a specific environment: +```bash +kubectl apply -k k8s/overlays/production +``` + +**Why Kustomize over Helm?** Helm uses templates (Go templating in YAML — hard to read, hard to debug). Kustomize uses plain YAML with strategic merge patches. For most applications, Kustomize is simpler and sufficient. Helm is better suited for packaging software you distribute to others. + +### Putting It All Together + +Here's how the primitives compose for the chat app: + +``` +Internet + │ + ▼ +Ingress (routes /api → backend, / → frontend) + │ + ├─▶ Service: chat-frontend + │ └─▶ Deployment (2 replicas) + │ ├─▶ Pod: frontend-abc12 + │ └─▶ Pod: frontend-def34 + │ + ├─▶ Service: chat-backend + │ └─▶ Deployment (3 replicas) + │ ├─▶ Pod: backend-ghi56 + │ ├─▶ Pod: backend-jkl78 + │ └─▶ Pod: backend-mno90 + │ + └─▶ Service: postgres + └─▶ StatefulSet (1 replica) + └─▶ Pod: postgres-0 + └─▶ PersistentVolume (1Gi) + +ConfigMaps: backend-config, frontend-config +Secrets: db-credentials +``` + +--- + +## Part 7: The K8s-First Development Stack + +Here's an opinionated take: for web-based software products, start with Kubernetes from day one. Not because you need the scale, but because the development workflow is better than you'd expect — and the production story is dramatically simpler when you get there. + +### The Stack + +| Tool | Role | Where | +|---|---|---| +| **kind** | Runs a K8s cluster inside Docker containers | Local development | +| **Tilt** | Watches your code, rebuilds and deploys to the cluster automatically | Local development | +| **k3s** | Lightweight, certified Kubernetes distribution | Production | +| **Kustomize** | Manages environment-specific configuration | Everywhere | + +### kind: Kubernetes in Docker + +[kind](https://kind.sigs.k8s.io/) runs a full Kubernetes cluster inside Docker containers. It's designed for testing Kubernetes itself, but it's an excellent local development tool. + +```bash +# Create a cluster +kind create cluster --name chat-dev + +# Your kubectl now points to the local cluster +kubectl cluster-info + +# Delete when done +kind delete cluster --name chat-dev +``` + +**Why kind over minikube?** kind is faster to start, uses fewer resources, and creates clusters identically to CI environments. It runs inside Docker, which you already have installed. + +### Tilt: The Developer Experience Layer + +[Tilt](https://tilt.dev/) is the tool that makes K8s development feel like local development. Without Tilt, deploying to a local K8s cluster means manually rebuilding images and reapplying manifests on every code change. Tilt automates all of it. + +You define a `Tiltfile` (written in Starlark, a Python-like language): + +```python +# Tiltfile + +# Build the backend image and deploy to K8s +docker_build('chat-backend', './server') +k8s_yaml('k8s/base/backend-deployment.yaml') + +# Build the frontend image and deploy to K8s +docker_build('chat-frontend', './client') +k8s_yaml('k8s/base/frontend-deployment.yaml') + +# Database — use the image directly, no build needed +k8s_yaml('k8s/base/postgres-statefulset.yaml') + +# Services and ingress +k8s_yaml('k8s/base/services.yaml') +k8s_yaml('k8s/base/ingress.yaml') + +# Port forwards for local access +k8s_resource('chat-backend', port_forwards='3001:3001') +k8s_resource('chat-frontend', port_forwards='8080:80') +``` + +Run `tilt up` and Tilt: +1. Builds your Docker images +2. Deploys everything to your kind cluster +3. Watches your source code for changes +4. Rebuilds and redeploys automatically on save +5. Streams logs from all containers +6. Provides a web dashboard showing the status of all services + +**This is the key insight**: with Tilt, the development experience is comparable to `docker compose up` with live reload — but you're running real Kubernetes. Same manifests, same networking model, same configuration. The gap between dev and prod shrinks to nearly zero. + +### k3s: Production Kubernetes Without the Complexity + +[k3s](https://k3s.io/) is a lightweight Kubernetes distribution built for production. It's fully certified K8s packaged as a single binary under 100MB. + +What makes k3s practical for smaller teams: +- **Single binary install**: `curl -sfL https://get.k3s.io | sh -` +- **Batteries included**: Built-in ingress controller (Traefik), load balancer, and storage +- **Low resource usage**: Runs on machines with 512MB RAM +- **Same API**: Anything that works on "full" Kubernetes works on k3s + +k3s runs in production for thousands of organizations, from edge deployments to multi-node clusters. It's not a toy — it's Kubernetes without the operational overhead of managing etcd clusters and control plane components separately. + +### The Argument for K8s-First + +**"Isn't Kubernetes overkill for a small project?"** + +The traditional thinking is: start simple (Heroku/Railway), outgrow it, then migrate to Kubernetes. This migration is expensive — you're rewriting deployment infrastructure at the same time your app is growing and your team is busy. + +The K8s-first alternative: + +1. **Local-production parity from day one.** Your `docker compose up` becomes `tilt up`. Same containers, same networking. But now your manifests *are* your production configuration. + +2. **No migration tax.** You never have to rewrite deployment. The same K8s manifests that run on kind locally run on k3s in production. Add Kustomize overlays for environment differences. + +3. **Tilt makes it developer-friendly.** The "Kubernetes is too complicated for development" argument assumed you were running `kubectl apply` manually. Tilt eliminates that friction. + +4. **k3s makes it operations-friendly.** You don't need a dedicated platform team to run k3s. A single $10/month VPS can run your entire stack. + +5. **Scales without architecture changes.** When you need 3 replicas instead of 1, change a number in a YAML file. When you need a second node, join it to the cluster. No re-platforming. + +**When this approach is NOT the right call:** +- **Static sites and JAMstack** — Vercel/Netlify are purpose-built and better +- **Serverless workloads** — Functions that run infrequently don't need always-on containers +- **You're the only developer and want maximum simplicity** — Railway/Render have lower initial learning investment +- **Your team has zero container experience** — Get comfortable with Docker first, then consider K8s + +The goal isn't dogma. It's recognizing that for **web applications with a backend, database, and foreseeable scaling needs**, the K8s-first stack (kind + Tilt + k3s + Kustomize) offers a better long-term trajectory than starting on a managed platform and migrating later. + +--- + +## Exercise 1: Containerize the Chat App Backend + +Write a Dockerfile for the chat app's Express backend. + +**Requirements:** +1. Use `node:20-alpine` as the base image +2. Set the working directory to `/app` +3. Copy and install dependencies first (layer caching) +4. Copy application code +5. Expose port 3001 +6. Set the default command + +**Test it:** +```bash +docker build -t chat-backend:latest ./server +docker run -p 3001:3001 -e NODE_ENV=production chat-backend:latest +curl http://localhost:3001/api/health +``` + +
+Solution + +```dockerfile +FROM node:20-alpine +WORKDIR /app +COPY package.json package-lock.json ./ +RUN npm ci --only=production +COPY . . +EXPOSE 3001 +CMD ["node", "src/index.js"] +``` + +And `.dockerignore`: +``` +node_modules +.git +.env +*.md +.DS_Store +``` + +
+ +--- + +## Exercise 2: Multi-Stage Frontend Build + +Write a multi-stage Dockerfile for the React frontend. + +**Requirements:** +1. Stage 1 (`build`): Install dependencies and run `npm run build` +2. Stage 2: Copy built files into a `caddy:2-alpine` image with a `Caddyfile` for SPA routing +3. The final image should contain only Caddy and the static files + +**Test it:** +```bash +docker build -t chat-frontend:latest ./client +docker run -p 8080:80 chat-frontend:latest +# Visit http://localhost:8080 +``` + +
+Solution + +```dockerfile +# Stage 1: Build +FROM node:20-alpine AS build +WORKDIR /app +COPY package.json package-lock.json ./ +RUN npm ci +COPY . . +RUN npm run build + +# Stage 2: Serve +FROM caddy:2-alpine +COPY Caddyfile /etc/caddy/Caddyfile +COPY --from=build /app/dist /srv +EXPOSE 80 +``` + +And `Caddyfile`: +``` +:80 { + root * /srv + try_files {path} /index.html + file_server +} +``` + +
+ +--- + +## Exercise 3: Docker Compose + +Write a `docker-compose.yml` that runs the frontend, backend, and a PostgreSQL database together. + +**Requirements:** +1. Frontend served on port 8080 +2. Backend on port 3001 with environment variables for the database +3. PostgreSQL with a named volume for persistence +4. Backend depends on the database; frontend depends on the backend + +**Test it:** +```bash +docker compose up +# Frontend at http://localhost:8080 +# Backend at http://localhost:3001/api/health +# Database at localhost:5432 +``` + +
+Solution + +See the [19-chat-docker-compose example](../../examples/19-chat-docker-compose/) for the complete working setup. + +
+ +--- + +## Exercise 4: Read a Kubernetes Manifest + +Given this manifest, answer the questions below: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: api-server +spec: + replicas: 2 + selector: + matchLabels: + app: api + template: + metadata: + labels: + app: api + spec: + containers: + - name: api + image: myapp/api:v1.2.3 + ports: + - containerPort: 8080 + envFrom: + - configMapRef: + name: api-config +--- +apiVersion: v1 +kind: Service +metadata: + name: api-server +spec: + selector: + app: api + ports: + - port: 80 + targetPort: 8080 +``` + +**Questions:** +1. How many copies of the API will be running? +2. What Docker image is being used? +3. How would another Pod in the cluster reach this service? +4. If a Pod crashes, what happens? +5. Where does the Pod get its environment variables? + +
+Answers + +1. **2 replicas** — specified by `replicas: 2` +2. **myapp/api:v1.2.3** — specified in the container spec +3. **http://api-server:80** — the Service name becomes a DNS entry, port 80 maps to container port 8080 +4. **Kubernetes creates a replacement** — the Deployment controller notices the actual state (1 Pod) doesn't match desired state (2 Pods) and creates a new one +5. **From the ConfigMap named `api-config`** — `envFrom` with `configMapRef` loads all keys from the ConfigMap as environment variables + +
+ +--- + +## Exercise 5: Design a Kustomize Overlay + +Your chat app runs in development (1 replica, debug logging) and production (3 replicas, info logging). Using the Kustomize structure from Part 6, write the production overlay that changes the replica count. + +
+Solution + +`k8s/overlays/production/kustomization.yaml`: +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../../base +patches: + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: chat-backend + spec: + replicas: 3 + - patch: |- + apiVersion: v1 + kind: ConfigMap + metadata: + name: backend-config + data: + LOG_LEVEL: "info" +``` + +Apply with: `kubectl apply -k k8s/overlays/production` + +
+ +--- + +## Common Issues + +### "Cannot connect to the Docker daemon" + +``` +Cannot connect to the Docker daemon at unix:///var/run/docker.sock +``` + +**Fix**: Docker Desktop (or Docker Engine) isn't running. Start it, then try again. + +### Port Already in Use + +``` +Error: port is already allocated +``` + +**Fix**: Another process (or container) is using that port. Stop it, or map to a different host port: `-p 3002:3001`. + +### Image Build Fails at npm ci + +``` +npm ERR! could not determine executable to run +``` + +**Fix**: Make sure `package-lock.json` is included in the `COPY` and isn't in `.dockerignore`. + +### Container Starts Then Immediately Exits + +**Fix**: Check logs with `docker logs `. Common causes: +- Missing environment variables +- Database not reachable (if using `depends_on`, the database may not be *ready* yet — just started) +- Application crash on startup + +### "Connection Refused" Between Containers + +**Fix**: Use the **service name** (not `localhost`) as the hostname. Inside a Docker network, containers reach each other by name. `localhost` inside a container means that container itself. + +--- + +## Key Takeaways + +1. **Containers solve environment problems** — Package your app with its dependencies, and it runs the same everywhere. + +2. **Images are recipes, containers are instances** — Build once, run many times. Each container is isolated. + +3. **Layer caching is your friend** — Copy dependency files before source code. Structure your Dockerfile for fast rebuilds. + +4. **Docker Compose is essential for local development** — One `docker compose up` replaces a page of setup instructions. + +5. **Kubernetes manages containers at scale** — Declarative state, self-healing, rolling updates, service discovery. You describe what you want; K8s makes it happen. + +6. **You don't need to master K8s to benefit from it** — Learn the primitives (Pod, Deployment, Service, Ingress, ConfigMap, Kustomize). Understand the vocabulary. Ask good questions. + +7. **The K8s-first stack is worth evaluating** — kind + Tilt (dev) and k3s (prod) with Kustomize (config) offers local-prod parity without the traditional Kubernetes complexity tax. + +--- + +## What's Next + +This module gave you the foundation for containerized development and an awareness of Kubernetes. From here: + +- **Practice**: Work through the [Docker Compose example](../../examples/19-chat-docker-compose/) and the [Kubernetes example](../../examples/20-chat-kubernetes/) to get hands-on experience +- **Go deeper on Docker**: Learn about health checks, resource limits, and security scanning +- **Explore K8s further**: Set up a kind cluster and deploy the chat app with Tilt +- **Module 20 (planned)**: Observability & Reliability — monitoring what your containers are doing in production +- **Module 21 (planned)**: Infrastructure as Code — managing the infrastructure itself declaratively diff --git a/curriculum/part-5-infrastructure-and-operations/README.md b/curriculum/part-5-infrastructure-and-operations/README.md new file mode 100644 index 0000000..0d0a33a --- /dev/null +++ b/curriculum/part-5-infrastructure-and-operations/README.md @@ -0,0 +1,61 @@ +# Part V: Infrastructure & Operations + +From running on localhost to running in production — containers, orchestration, and the infrastructure that powers modern software. + +**Full content**: [dev.episkopos.community/docs/curriculum/part-5-infrastructure](https://dev.episkopos.community/docs/curriculum/part-5-infrastructure) + +--- + +## Modules + +| Module | Topic | Status | +|--------|-------|--------| +| 19 | [Containerization & Orchestration](./19-containerization-and-orchestration.md) | ✓ | +| 20 | Observability & Reliability | Planned | +| 21 | Infrastructure as Code | Planned | + +--- + +## The Journey + +Part V picks up where Part III left off. You deployed your app to managed platforms — now you'll learn the infrastructure patterns that professional teams use to ship and operate software at any scale. + +``` +Module 19 Module 20 Module 21 +Containerization → Observability → Infrastructure +& Orchestration & Reliability as Code +Docker, K8s Logs, Metrics, Terraform, GitOps, + Alerting Automated Provisioning +``` + +--- + +## Roadmap + +### Module 20: Observability & Reliability (Planned) + +Understanding what your application is doing in production: +- Structured logging and log aggregation +- Metrics collection and dashboards +- Distributed tracing +- Alerting strategies and on-call practices +- SLOs, SLIs, and error budgets + +### Module 21: Infrastructure as Code (Planned) + +Managing infrastructure declaratively: +- Why infrastructure should be versioned like application code +- Terraform fundamentals +- GitOps workflows +- Secrets management +- Cloud provider patterns (AWS, GCP, Azure) + +--- + +## Prerequisites + +Complete [Part III: Building Applications](../part-3-building-applications/) before starting Part V. Part IV (Historical Context) is recommended but not required. + +--- + +*See the [full documentation](https://dev.episkopos.community/docs/curriculum) for complete module content, exercises, and examples.* diff --git a/examples/19-chat-docker-compose/.env.example b/examples/19-chat-docker-compose/.env.example new file mode 100644 index 0000000..eef76f6 --- /dev/null +++ b/examples/19-chat-docker-compose/.env.example @@ -0,0 +1,5 @@ +POSTGRES_DB=chat +POSTGRES_PASSWORD=secret +DATABASE_URL=postgresql://postgres:secret@db:5432/chat +CORS_ORIGIN=http://localhost:8080 +NODE_ENV=production diff --git a/examples/19-chat-docker-compose/README.md b/examples/19-chat-docker-compose/README.md new file mode 100644 index 0000000..ef8dad1 --- /dev/null +++ b/examples/19-chat-docker-compose/README.md @@ -0,0 +1,237 @@ +# Chat Docker Compose (Module 19) + +The chat application containerized with Docker and orchestrated with Docker Compose — frontend, backend, and database running as isolated containers with Traefik as the reverse proxy. + +## What You'll Learn + +- **Dockerfile Authoring**: Writing production-ready Dockerfiles for Node.js and React applications +- **Multi-Stage Builds**: Separating build-time and runtime dependencies for smaller images +- **Docker Compose**: Defining and running multi-container applications with a single command +- **Reverse Proxy**: Using Traefik with Docker labels for path-based routing +- **Container Networking**: How services discover and communicate with each other +- **Volume Persistence**: Keeping database data across container restarts +- **Development Overrides**: Using `docker-compose.override.yml` for live reload during development + +## Structure + +``` +19-chat-docker-compose/ +├── client/ +│ ├── Dockerfile # Multi-stage build: Node (build) → Caddy (serve) +│ ├── Caddyfile # Caddy config for SPA routing +│ ├── .dockerignore +│ └── src/ # React application source +├── server/ +│ ├── Dockerfile # Node.js production image +│ ├── .dockerignore +│ └── src/ # Express API source (PostgreSQL) +├── docker-compose.yml # Production-like configuration (Traefik + services) +├── docker-compose.override.yml # Development overrides (live reload) +└── .env.example # Environment variable template +``` + +## Prerequisites + +- [Docker Desktop](https://www.docker.com/products/docker-desktop/) installed and running +- No other local dependencies needed — everything runs in containers + +## How to Run + +### Production-like mode + +```bash +# Remove the dev override to run production config only +docker compose -f docker-compose.yml up --build +``` + +- App: http://localhost:8080 +- Traefik dashboard: http://localhost:8081 +- Database: localhost:5432 + +### Development mode (with live reload) + +The `docker-compose.override.yml` is loaded automatically and mounts your source code into the containers: + +```bash +docker compose up --build +``` + +Edit files in `server/src/` or `client/src/` — changes are reflected without rebuilding. + +### Useful commands + +```bash +# View logs for a specific service +docker compose logs -f backend + +# Rebuild a single service +docker compose up --build backend + +# Stop everything +docker compose down + +# Stop and remove all data (including database) +docker compose down -v + +# Open a shell inside a running container +docker compose exec backend sh +``` + +## Architecture + +``` +Browser → :8080 + │ + ┌───▼───┐ + │Traefik│ (reverse proxy) + └───┬───┘ + │ + ┌───────┴────────┐ + │ │ +/api/* /* + │ │ +┌───▼───┐ ┌────▼────┐ +│Backend│ │Frontend │ +│:3001 │ │(Caddy) │ +└───┬───┘ │:80 │ + │ └─────────┘ +┌───▼───┐ +│ DB │ +│:5432 │ +└───────┘ +``` + +Traefik routes requests by path: +- `/api/*` → backend (Express on port 3001) — priority 2 +- `/*` → frontend (Caddy serving static files) — priority 1 + +The frontend uses `/api` as its API base URL (same origin via Traefik), so no CORS is needed in production. + +## Key Files + +### server/Dockerfile + +```dockerfile +FROM node:20-alpine +WORKDIR /app +COPY package.json package-lock.json ./ +RUN npm ci --only=production +COPY . . +EXPOSE 3001 +CMD ["node", "src/index.js"] +``` + +### client/Dockerfile + +```dockerfile +# Build stage +FROM node:20-alpine AS build +WORKDIR /app +COPY package.json package-lock.json ./ +RUN npm ci +COPY . . +RUN npm run build + +# Production stage +FROM caddy:2-alpine +COPY Caddyfile /etc/caddy/Caddyfile +COPY --from=build /app/dist /srv +EXPOSE 80 +``` + +### client/Caddyfile + +``` +:80 { + root * /srv + try_files {path} /index.html + file_server +} +``` + +### docker-compose.yml + +```yaml +services: + traefik: + image: traefik:v3.2 + command: + - "--api.dashboard=true" + - "--api.insecure=true" + - "--providers.docker=true" + - "--providers.docker.exposedbydefault=false" + - "--entrypoints.web.address=:80" + ports: + - "8080:80" + - "8081:8080" + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + + frontend: + build: + context: ./client + labels: + - "traefik.enable=true" + - "traefik.http.routers.frontend.rule=PathPrefix(`/`)" + - "traefik.http.routers.frontend.priority=1" + - "traefik.http.services.frontend.loadbalancer.server.port=80" + depends_on: + - backend + + backend: + build: + context: ./server + environment: + NODE_ENV: production + DATABASE_URL: postgresql://postgres:secret@db:5432/chat + CORS_ORIGIN: http://localhost:8080 + labels: + - "traefik.enable=true" + - "traefik.http.routers.backend.rule=PathPrefix(`/api`)" + - "traefik.http.routers.backend.priority=2" + - "traefik.http.services.backend.loadbalancer.server.port=3001" + depends_on: + db: + condition: service_healthy + + db: + image: postgres:16-alpine + environment: + POSTGRES_DB: chat + POSTGRES_PASSWORD: secret + volumes: + - pgdata:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres"] + interval: 5s + timeout: 3s + retries: 5 + +volumes: + pgdata: +``` + +## Exercises + +### 1. Add a Health Check + +Add a Docker health check to the backend service so Docker can detect when the API is ready: + +```dockerfile +HEALTHCHECK --interval=30s --timeout=3s --retries=3 \ + CMD wget --no-verbose --tries=1 --spider http://localhost:3001/api/health || exit 1 +``` + +Then update `docker-compose.yml` so the frontend `depends_on` the backend being healthy, not just started. + +### 2. Add Redis + +Extend `docker-compose.yml` to include a Redis container for session storage or caching. Connect it to the backend via environment variables and add Traefik labels if needed. + +### 3. Production Build Optimization + +Compare the image sizes of your frontend before and after multi-stage builds. Use `docker images` to check. How much space did the multi-stage build save? + +--- + +**Corresponds to**: [Module 19: Containerization & Orchestration](https://dev.episkopos.community/docs/curriculum/part-5-infrastructure/containerization-and-orchestration) diff --git a/examples/19-chat-docker-compose/client/.dockerignore b/examples/19-chat-docker-compose/client/.dockerignore new file mode 100644 index 0000000..79e40da --- /dev/null +++ b/examples/19-chat-docker-compose/client/.dockerignore @@ -0,0 +1,6 @@ +node_modules +dist +.git +.env +*.md +.DS_Store diff --git a/examples/19-chat-docker-compose/client/Caddyfile b/examples/19-chat-docker-compose/client/Caddyfile new file mode 100644 index 0000000..01dd023 --- /dev/null +++ b/examples/19-chat-docker-compose/client/Caddyfile @@ -0,0 +1,5 @@ +:80 { + root * /srv + try_files {path} /index.html + file_server +} diff --git a/examples/19-chat-docker-compose/client/Dockerfile b/examples/19-chat-docker-compose/client/Dockerfile new file mode 100644 index 0000000..a646ed1 --- /dev/null +++ b/examples/19-chat-docker-compose/client/Dockerfile @@ -0,0 +1,13 @@ +# Stage 1: Build +FROM node:20-alpine AS build +WORKDIR /app +COPY package.json package-lock.json ./ +RUN npm ci +COPY . . +RUN npm run build + +# Stage 2: Serve +FROM caddy:2-alpine +COPY Caddyfile /etc/caddy/Caddyfile +COPY --from=build /app/dist /srv +EXPOSE 80 diff --git a/examples/19-chat-docker-compose/client/index.html b/examples/19-chat-docker-compose/client/index.html new file mode 100644 index 0000000..933eba2 --- /dev/null +++ b/examples/19-chat-docker-compose/client/index.html @@ -0,0 +1,12 @@ + + + + + + DevFoundry Chat - Docker Compose + + +
+ + + diff --git a/examples/19-chat-docker-compose/client/package.json b/examples/19-chat-docker-compose/client/package.json new file mode 100644 index 0000000..e554c89 --- /dev/null +++ b/examples/19-chat-docker-compose/client/package.json @@ -0,0 +1,22 @@ +{ + "name": "chat-client", + "version": "1.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview" + }, + "dependencies": { + "react": "^18.2.0", + "react-dom": "^18.2.0" + }, + "devDependencies": { + "@tailwindcss/vite": "^4.1.18", + "@types/react": "^18.2.66", + "@types/react-dom": "^18.2.22", + "@vitejs/plugin-react": "^4.2.1", + "tailwindcss": "^4.1.18", + "vite": "^5.2.0" + } +} diff --git a/examples/19-chat-docker-compose/client/src/App.jsx b/examples/19-chat-docker-compose/client/src/App.jsx new file mode 100644 index 0000000..98f5616 --- /dev/null +++ b/examples/19-chat-docker-compose/client/src/App.jsx @@ -0,0 +1,72 @@ +import React, { useState, useEffect } from 'react'; +import MessageList from './components/MessageList'; +import MessageInput from './components/MessageInput'; +import { api } from './services/api'; +import { createMessage } from './utils/messages'; + +function App() { + const [messages, setMessages] = useState([]); + const [currentUser] = useState('Fullstack User'); + const [error, setError] = useState(null); + + // Load messages on mount + useEffect(() => { + loadMessages(); + + // Poll for new messages every 3 seconds (simple replacement for real-time) + const interval = setInterval(loadMessages, 3000); + return () => clearInterval(interval); + }, []); + + async function loadMessages() { + try { + const data = await api.getMessages(); + setMessages(data); + setError(null); + } catch (err) { + setError('Could not connect to server. Is the backend running?'); + } + } + + const handleSendMessage = async (text) => { + const newMessage = createMessage(text, currentUser); + + // Optimistic update + setMessages((prev) => [...prev, newMessage]); + + try { + await api.sendMessage(newMessage); + } catch (err) { + setError('Failed to send message.'); + // Refresh to sync state + loadMessages(); + } + }; + + return ( +
+
+

Fullstack Chat

+
+ {currentUser} +
+
+ + {error && ( +
+ {error} +
+ )} + +
+ +
+ +
+ +
+
+ ); +} + +export default App; diff --git a/examples/19-chat-docker-compose/client/src/components/MessageInput.jsx b/examples/19-chat-docker-compose/client/src/components/MessageInput.jsx new file mode 100644 index 0000000..613105c --- /dev/null +++ b/examples/19-chat-docker-compose/client/src/components/MessageInput.jsx @@ -0,0 +1,35 @@ +import React, { useState } from 'react'; + +function MessageInput({ onSendMessage }) { + const [text, setText] = useState(''); + + const handleSubmit = (e) => { + e.preventDefault(); + if (text.trim()) { + onSendMessage(text); + setText(''); + } + }; + + return ( +
+ setText(e.target.value)} + placeholder="Type a message..." + autoComplete="off" + className="flex-1 px-3 py-2 border border-gray-300 rounded-md outline-none focus:border-blue-500" + /> + +
+ ); +} + +export default MessageInput; diff --git a/examples/19-chat-docker-compose/client/src/components/MessageItem.jsx b/examples/19-chat-docker-compose/client/src/components/MessageItem.jsx new file mode 100644 index 0000000..e3d2ac6 --- /dev/null +++ b/examples/19-chat-docker-compose/client/src/components/MessageItem.jsx @@ -0,0 +1,20 @@ +import React from 'react'; +import { formatTime } from '../utils/messages'; + +function MessageItem({ message, isOwn }) { + return ( +
+
+ {message.sender} + {formatTime(message.timestamp)} +
+
{message.content}
+
+ ); +} + +export default MessageItem; diff --git a/examples/19-chat-docker-compose/client/src/components/MessageList.jsx b/examples/19-chat-docker-compose/client/src/components/MessageList.jsx new file mode 100644 index 0000000..e38e142 --- /dev/null +++ b/examples/19-chat-docker-compose/client/src/components/MessageList.jsx @@ -0,0 +1,30 @@ +import React, { useEffect, useRef } from 'react'; +import MessageItem from './MessageItem'; + +function MessageList({ messages, currentUser }) { + const endOfMessagesRef = useRef(null); + + // Auto-scroll to bottom when messages change + useEffect(() => { + endOfMessagesRef.current?.scrollIntoView({ behavior: 'smooth' }); + }, [messages]); + + return ( +
+ {messages.length === 0 ? ( +
No messages yet.
+ ) : ( + messages.map((msg) => ( + + )) + )} +
+
+ ); +} + +export default MessageList; diff --git a/examples/19-chat-docker-compose/client/src/index.css b/examples/19-chat-docker-compose/client/src/index.css new file mode 100644 index 0000000..5793f2c --- /dev/null +++ b/examples/19-chat-docker-compose/client/src/index.css @@ -0,0 +1,5 @@ +@import "tailwindcss"; + +#root { + @apply h-screen flex justify-center items-center bg-gray-100; +} diff --git a/examples/19-chat-docker-compose/client/src/main.jsx b/examples/19-chat-docker-compose/client/src/main.jsx new file mode 100644 index 0000000..54b39dd --- /dev/null +++ b/examples/19-chat-docker-compose/client/src/main.jsx @@ -0,0 +1,10 @@ +import React from 'react' +import ReactDOM from 'react-dom/client' +import App from './App.jsx' +import './index.css' + +ReactDOM.createRoot(document.getElementById('root')).render( + + + , +) diff --git a/examples/19-chat-docker-compose/client/src/services/api.js b/examples/19-chat-docker-compose/client/src/services/api.js new file mode 100644 index 0000000..1d6aab5 --- /dev/null +++ b/examples/19-chat-docker-compose/client/src/services/api.js @@ -0,0 +1,21 @@ +const API_URL = '/api'; + +export const api = { + getMessages: async () => { + const response = await fetch(`${API_URL}/messages`); + if (!response.ok) throw new Error('Failed to fetch messages'); + return response.json(); + }, + + sendMessage: async (message) => { + const response = await fetch(`${API_URL}/messages`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(message), + }); + if (!response.ok) throw new Error('Failed to send message'); + return response.json(); + } +}; diff --git a/examples/19-chat-docker-compose/client/src/utils/messages.js b/examples/19-chat-docker-compose/client/src/utils/messages.js new file mode 100644 index 0000000..22d1e3b --- /dev/null +++ b/examples/19-chat-docker-compose/client/src/utils/messages.js @@ -0,0 +1,21 @@ +/** + * Creates a new message object. + */ +export function createMessage(text, sender) { + return { + id: crypto.randomUUID(), + sender: sender, + content: text, + timestamp: new Date().toISOString() + }; +} + +/** + * Formats a timestamp for display. + */ +export function formatTime(isoString) { + return new Date(isoString).toLocaleTimeString([], { + hour: '2-digit', + minute: '2-digit' + }); +} diff --git a/examples/19-chat-docker-compose/client/vite.config.js b/examples/19-chat-docker-compose/client/vite.config.js new file mode 100644 index 0000000..c909975 --- /dev/null +++ b/examples/19-chat-docker-compose/client/vite.config.js @@ -0,0 +1,8 @@ +import { defineConfig } from 'vite' +import react from '@vitejs/plugin-react' +import tailwindcss from '@tailwindcss/vite' + +// https://vitejs.dev/config/ +export default defineConfig({ + plugins: [react(), tailwindcss()], +}) diff --git a/examples/19-chat-docker-compose/docker-compose.override.yml b/examples/19-chat-docker-compose/docker-compose.override.yml new file mode 100644 index 0000000..459ba14 --- /dev/null +++ b/examples/19-chat-docker-compose/docker-compose.override.yml @@ -0,0 +1,20 @@ +# Development overrides — loaded automatically by `docker compose up` +services: + frontend: + build: + context: ./client + dockerfile: Dockerfile + # In dev, run the Vite dev server instead of Caddy + command: ["sh", "-c", "npm install && npm run dev -- --host"] + volumes: + - ./client:/app + - /app/node_modules + labels: + - "traefik.http.services.frontend.loadbalancer.server.port=5173" + + backend: + volumes: + - ./server/src:/app/src + environment: + NODE_ENV: development + command: ["node", "--watch", "src/index.js"] diff --git a/examples/19-chat-docker-compose/docker-compose.yml b/examples/19-chat-docker-compose/docker-compose.yml new file mode 100644 index 0000000..2eab452 --- /dev/null +++ b/examples/19-chat-docker-compose/docker-compose.yml @@ -0,0 +1,61 @@ +services: + traefik: + image: traefik:v3.2 + command: + - "--api.dashboard=true" + - "--api.insecure=true" + - "--providers.docker=true" + - "--providers.docker.exposedbydefault=false" + - "--entrypoints.web.address=:80" + ports: + - "8080:80" + - "8081:8080" + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + + frontend: + build: + context: ./client + labels: + - "traefik.enable=true" + - "traefik.http.routers.frontend.rule=PathPrefix(`/`)" + - "traefik.http.routers.frontend.entrypoints=web" + - "traefik.http.routers.frontend.priority=1" + - "traefik.http.services.frontend.loadbalancer.server.port=80" + depends_on: + - backend + + backend: + build: + context: ./server + environment: + NODE_ENV: production + DATABASE_URL: postgresql://postgres:secret@db:5432/chat + CORS_ORIGIN: http://localhost:8080 + labels: + - "traefik.enable=true" + - "traefik.http.routers.backend.rule=PathPrefix(`/api`)" + - "traefik.http.routers.backend.entrypoints=web" + - "traefik.http.routers.backend.priority=2" + - "traefik.http.services.backend.loadbalancer.server.port=3001" + depends_on: + db: + condition: service_healthy + + db: + image: postgres:16-alpine + environment: + POSTGRES_DB: chat + POSTGRES_PASSWORD: secret + volumes: + - pgdata:/var/lib/postgresql/data + ports: + - "5432:5432" + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres"] + interval: 5s + timeout: 3s + retries: 5 + +volumes: + pgdata: diff --git a/examples/19-chat-docker-compose/server/.dockerignore b/examples/19-chat-docker-compose/server/.dockerignore new file mode 100644 index 0000000..bd6f0a1 --- /dev/null +++ b/examples/19-chat-docker-compose/server/.dockerignore @@ -0,0 +1,5 @@ +node_modules +.git +.env +*.md +.DS_Store diff --git a/examples/19-chat-docker-compose/server/Dockerfile b/examples/19-chat-docker-compose/server/Dockerfile new file mode 100644 index 0000000..102546f --- /dev/null +++ b/examples/19-chat-docker-compose/server/Dockerfile @@ -0,0 +1,7 @@ +FROM node:20-alpine +WORKDIR /app +COPY package.json package-lock.json ./ +RUN npm ci --only=production +COPY . . +EXPOSE 3001 +CMD ["node", "src/index.js"] diff --git a/examples/19-chat-docker-compose/server/package.json b/examples/19-chat-docker-compose/server/package.json new file mode 100644 index 0000000..f6a2e67 --- /dev/null +++ b/examples/19-chat-docker-compose/server/package.json @@ -0,0 +1,14 @@ +{ + "name": "chat-server", + "version": "1.0.0", + "type": "module", + "scripts": { + "start": "node src/index.js", + "dev": "node --watch src/index.js" + }, + "dependencies": { + "cors": "^2.8.5", + "express": "^4.18.3", + "pg": "^8.13.1" + } +} diff --git a/examples/19-chat-docker-compose/server/src/db/schema.sql b/examples/19-chat-docker-compose/server/src/db/schema.sql new file mode 100644 index 0000000..889fe81 --- /dev/null +++ b/examples/19-chat-docker-compose/server/src/db/schema.sql @@ -0,0 +1,6 @@ +CREATE TABLE IF NOT EXISTS messages ( + id TEXT PRIMARY KEY, + sender TEXT NOT NULL, + content TEXT NOT NULL, + timestamp TEXT NOT NULL +); diff --git a/examples/19-chat-docker-compose/server/src/index.js b/examples/19-chat-docker-compose/server/src/index.js new file mode 100644 index 0000000..8cd4072 --- /dev/null +++ b/examples/19-chat-docker-compose/server/src/index.js @@ -0,0 +1,32 @@ +import express from 'express'; +import cors from 'cors'; +import messageRoutes from './routes/messages.js'; +import { initialize } from './services/database.js'; + +const app = express(); +const PORT = process.env.PORT || 3001; + +app.use(cors({ + origin: process.env.CORS_ORIGIN || 'http://localhost:5173' +})); +app.use(express.json()); + +// Routes +app.use('/api/messages', messageRoutes); + +app.get('/api/health', (req, res) => { + res.json({ status: 'ok' }); +}); + +// Initialize database then start server +async function start() { + await initialize(); + app.listen(PORT, () => { + console.log(`Server running on http://localhost:${PORT}`); + }); +} + +start().catch((err) => { + console.error('Failed to start server:', err); + process.exit(1); +}); diff --git a/examples/19-chat-docker-compose/server/src/routes/messages.js b/examples/19-chat-docker-compose/server/src/routes/messages.js new file mode 100644 index 0000000..ecea423 --- /dev/null +++ b/examples/19-chat-docker-compose/server/src/routes/messages.js @@ -0,0 +1,28 @@ +import express from 'express'; +import { pool } from '../services/database.js'; + +const router = express.Router(); + +// Get all messages +router.get('/', async (req, res) => { + const result = await pool.query('SELECT * FROM messages ORDER BY timestamp ASC'); + res.json(result.rows); +}); + +// Create a message +router.post('/', async (req, res) => { + const { id, sender, content, timestamp } = req.body; + + if (!sender || !content) { + return res.status(400).json({ error: 'Sender and content are required' }); + } + + await pool.query( + 'INSERT INTO messages (id, sender, content, timestamp) VALUES ($1, $2, $3, $4)', + [id || crypto.randomUUID(), sender, content, timestamp || new Date().toISOString()] + ); + + res.status(201).json({ success: true }); +}); + +export default router; diff --git a/examples/19-chat-docker-compose/server/src/services/database.js b/examples/19-chat-docker-compose/server/src/services/database.js new file mode 100644 index 0000000..472c739 --- /dev/null +++ b/examples/19-chat-docker-compose/server/src/services/database.js @@ -0,0 +1,30 @@ +import pg from 'pg'; +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = path.dirname(fileURLToPath(import.meta.url)); +const schemaPath = path.join(__dirname, '../db/schema.sql'); + +export const pool = new pg.Pool({ + connectionString: process.env.DATABASE_URL || 'postgresql://postgres:secret@localhost:5432/chat', +}); + +export async function initialize(retries = 10, delay = 2000) { + const schema = fs.readFileSync(schemaPath, 'utf8'); + + for (let i = 0; i < retries; i++) { + try { + await pool.query(schema); + console.log('Database initialized'); + return; + } catch (err) { + if (i < retries - 1) { + console.log(`Database not ready, retrying in ${delay}ms... (${i + 1}/${retries})`); + await new Promise((r) => setTimeout(r, delay)); + } else { + throw err; + } + } + } +} diff --git a/examples/20-chat-kubernetes/README.md b/examples/20-chat-kubernetes/README.md new file mode 100644 index 0000000..34d8d7f --- /dev/null +++ b/examples/20-chat-kubernetes/README.md @@ -0,0 +1,248 @@ +# Chat Kubernetes (Module 19) + +The chat application deployed to a local Kubernetes cluster using kind and Tilt — the same manifests that would run in production on k3s. + +## What You'll Learn + +- **Kubernetes Manifests**: Writing Deployments, Services, ConfigMaps, and Ingress rules +- **kind**: Running a local Kubernetes cluster inside Docker +- **Tilt**: Automated build-deploy-reload workflow for Kubernetes development +- **Kustomize**: Managing environment-specific configuration with overlays +- **Traefik Ingress**: Path-based routing with the Traefik ingress controller +- **K8s Debugging**: Using `kubectl` to inspect pods, logs, and services + +## Structure + +``` +20-chat-kubernetes/ +├── client/ +│ ├── Dockerfile # Multi-stage build: Node (build) → Caddy (serve) +│ ├── Caddyfile # Caddy config for SPA routing +│ └── src/ +├── server/ +│ ├── Dockerfile # Node.js production image +│ └── src/ # Express API source (PostgreSQL) +├── k8s/ +│ ├── base/ +│ │ ├── kustomization.yaml +│ │ ├── backend-deployment.yaml +│ │ ├── frontend-deployment.yaml +│ │ ├── postgres-statefulset.yaml +│ │ ├── services.yaml +│ │ ├── configmap.yaml +│ │ └── ingress.yaml # Traefik ingress (ingressClassName: traefik) +│ └── overlays/ +│ ├── development/ +│ │ └── kustomization.yaml +│ └── production/ +│ └── kustomization.yaml +├── Tiltfile +├── kind-config.yaml +└── traefik-values.yaml # Helm values for Traefik on kind +``` + +## Prerequisites + +- [Docker Desktop](https://www.docker.com/products/docker-desktop/) installed and running +- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) installed +- [kubectl](https://kubernetes.io/docs/tasks/tools/) installed +- [Tilt](https://docs.tilt.dev/install.html) installed +- [Helm](https://helm.sh/docs/intro/install/) installed (for Traefik ingress controller) + +## How to Run + +### 1. Create a kind cluster + +```bash +kind create cluster --name chat-dev --config kind-config.yaml +``` + +### 2. Install Traefik ingress controller + +```bash +helm repo add traefik https://traefik.github.io/charts +helm repo update +helm install traefik traefik/traefik -f traefik-values.yaml +``` + +### 3. Start Tilt + +```bash +tilt up +``` + +Tilt will: +- Build Docker images for the frontend and backend +- Deploy all Kubernetes resources (pods, services, configmaps) +- Set up port forwards so you can access the app locally +- Watch for code changes and automatically rebuild/redeploy + +### 4. Access the app + +- **Tilt dashboard**: http://localhost:10350 +- **Frontend**: http://localhost:8080 +- **Backend API**: http://localhost:3001/api/health + +### 5. Clean up + +```bash +tilt down +kind delete cluster --name chat-dev +``` + +## Key Files + +### Tiltfile + +```python +# Build images +docker_build('chat-backend', './server') +docker_build('chat-frontend', './client') + +# Apply K8s manifests +k8s_yaml(kustomize('k8s/overlays/development')) + +# Port forwards for local access +k8s_resource('chat-backend', port_forwards='3001:3001') +k8s_resource('chat-frontend', port_forwards='8080:80') +k8s_resource('postgres', port_forwards='5432:5432') +``` + +### k8s/base/ingress.yaml + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: chat-ingress +spec: + ingressClassName: traefik + rules: + - http: + paths: + - path: /api + pathType: Prefix + backend: + service: + name: chat-backend + port: + number: 80 + - path: / + pathType: Prefix + backend: + service: + name: chat-frontend + port: + number: 80 +``` + +### k8s/base/backend-deployment.yaml + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: chat-backend +spec: + replicas: 1 + selector: + matchLabels: + app: chat-backend + template: + metadata: + labels: + app: chat-backend + spec: + containers: + - name: backend + image: chat-backend + ports: + - containerPort: 3001 + envFrom: + - configMapRef: + name: backend-config +``` + +### k8s/base/services.yaml + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: chat-backend +spec: + selector: + app: chat-backend + ports: + - port: 80 + targetPort: 3001 +--- +apiVersion: v1 +kind: Service +metadata: + name: chat-frontend +spec: + selector: + app: chat-frontend + ports: + - port: 80 + targetPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres +spec: + selector: + app: postgres + ports: + - port: 5432 + targetPort: 5432 +``` + +## Useful kubectl Commands + +```bash +# See all running pods +kubectl get pods + +# See all services +kubectl get services + +# View logs for a pod +kubectl logs -f deployment/chat-backend + +# Describe a pod (useful for debugging startup failures) +kubectl describe pod + +# Open a shell inside a pod +kubectl exec -it deployment/chat-backend -- sh + +# Apply manifests manually (without Tilt) +kubectl apply -k k8s/overlays/development + +# Delete all resources +kubectl delete -k k8s/overlays/development +``` + +## Exercises + +### 1. Scale the Backend + +Edit `k8s/overlays/development/kustomization.yaml` to run 2 replicas of the backend. Watch Tilt create the second pod. Verify with `kubectl get pods`. + +### 2. Update a ConfigMap + +Change a value in `k8s/base/configmap.yaml` and apply it. What happens to the running pods? (Hint: ConfigMap changes don't automatically restart pods — you need to trigger a rollout.) + +### 3. Production Overlay + +Apply the production overlay that sets 3 backend replicas and changes `LOG_LEVEL` to `"warn"`: `kubectl apply -k k8s/overlays/production`. + +### 4. Inspect the Network + +From inside the backend pod (`kubectl exec`), try to reach the postgres service by name: `wget -qO- postgres:5432`. Then try `wget -qO- chat-frontend:80`. Observe how Kubernetes DNS resolves service names. + +--- + +**Corresponds to**: [Module 19: Containerization & Orchestration](https://dev.episkopos.community/docs/curriculum/part-5-infrastructure/containerization-and-orchestration) diff --git a/examples/20-chat-kubernetes/Tiltfile b/examples/20-chat-kubernetes/Tiltfile new file mode 100644 index 0000000..649967d --- /dev/null +++ b/examples/20-chat-kubernetes/Tiltfile @@ -0,0 +1,11 @@ +# Build images +docker_build('chat-backend', './server') +docker_build('chat-frontend', './client') + +# Apply K8s manifests via Kustomize +k8s_yaml(kustomize('k8s/overlays/development')) + +# Port forwards for local access +k8s_resource('chat-backend', port_forwards='3001:3001') +k8s_resource('chat-frontend', port_forwards='8080:80') +k8s_resource('postgres', port_forwards='5432:5432') diff --git a/examples/20-chat-kubernetes/client/.dockerignore b/examples/20-chat-kubernetes/client/.dockerignore new file mode 100644 index 0000000..79e40da --- /dev/null +++ b/examples/20-chat-kubernetes/client/.dockerignore @@ -0,0 +1,6 @@ +node_modules +dist +.git +.env +*.md +.DS_Store diff --git a/examples/20-chat-kubernetes/client/Caddyfile b/examples/20-chat-kubernetes/client/Caddyfile new file mode 100644 index 0000000..01dd023 --- /dev/null +++ b/examples/20-chat-kubernetes/client/Caddyfile @@ -0,0 +1,5 @@ +:80 { + root * /srv + try_files {path} /index.html + file_server +} diff --git a/examples/20-chat-kubernetes/client/Dockerfile b/examples/20-chat-kubernetes/client/Dockerfile new file mode 100644 index 0000000..a646ed1 --- /dev/null +++ b/examples/20-chat-kubernetes/client/Dockerfile @@ -0,0 +1,13 @@ +# Stage 1: Build +FROM node:20-alpine AS build +WORKDIR /app +COPY package.json package-lock.json ./ +RUN npm ci +COPY . . +RUN npm run build + +# Stage 2: Serve +FROM caddy:2-alpine +COPY Caddyfile /etc/caddy/Caddyfile +COPY --from=build /app/dist /srv +EXPOSE 80 diff --git a/examples/20-chat-kubernetes/client/index.html b/examples/20-chat-kubernetes/client/index.html new file mode 100644 index 0000000..933eba2 --- /dev/null +++ b/examples/20-chat-kubernetes/client/index.html @@ -0,0 +1,12 @@ + + + + + + DevFoundry Chat - Docker Compose + + +
+ + + diff --git a/examples/20-chat-kubernetes/client/package.json b/examples/20-chat-kubernetes/client/package.json new file mode 100644 index 0000000..e554c89 --- /dev/null +++ b/examples/20-chat-kubernetes/client/package.json @@ -0,0 +1,22 @@ +{ + "name": "chat-client", + "version": "1.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview" + }, + "dependencies": { + "react": "^18.2.0", + "react-dom": "^18.2.0" + }, + "devDependencies": { + "@tailwindcss/vite": "^4.1.18", + "@types/react": "^18.2.66", + "@types/react-dom": "^18.2.22", + "@vitejs/plugin-react": "^4.2.1", + "tailwindcss": "^4.1.18", + "vite": "^5.2.0" + } +} diff --git a/examples/20-chat-kubernetes/client/src/App.jsx b/examples/20-chat-kubernetes/client/src/App.jsx new file mode 100644 index 0000000..98f5616 --- /dev/null +++ b/examples/20-chat-kubernetes/client/src/App.jsx @@ -0,0 +1,72 @@ +import React, { useState, useEffect } from 'react'; +import MessageList from './components/MessageList'; +import MessageInput from './components/MessageInput'; +import { api } from './services/api'; +import { createMessage } from './utils/messages'; + +function App() { + const [messages, setMessages] = useState([]); + const [currentUser] = useState('Fullstack User'); + const [error, setError] = useState(null); + + // Load messages on mount + useEffect(() => { + loadMessages(); + + // Poll for new messages every 3 seconds (simple replacement for real-time) + const interval = setInterval(loadMessages, 3000); + return () => clearInterval(interval); + }, []); + + async function loadMessages() { + try { + const data = await api.getMessages(); + setMessages(data); + setError(null); + } catch (err) { + setError('Could not connect to server. Is the backend running?'); + } + } + + const handleSendMessage = async (text) => { + const newMessage = createMessage(text, currentUser); + + // Optimistic update + setMessages((prev) => [...prev, newMessage]); + + try { + await api.sendMessage(newMessage); + } catch (err) { + setError('Failed to send message.'); + // Refresh to sync state + loadMessages(); + } + }; + + return ( +
+
+

Fullstack Chat

+
+ {currentUser} +
+
+ + {error && ( +
+ {error} +
+ )} + +
+ +
+ +
+ +
+
+ ); +} + +export default App; diff --git a/examples/20-chat-kubernetes/client/src/components/MessageInput.jsx b/examples/20-chat-kubernetes/client/src/components/MessageInput.jsx new file mode 100644 index 0000000..613105c --- /dev/null +++ b/examples/20-chat-kubernetes/client/src/components/MessageInput.jsx @@ -0,0 +1,35 @@ +import React, { useState } from 'react'; + +function MessageInput({ onSendMessage }) { + const [text, setText] = useState(''); + + const handleSubmit = (e) => { + e.preventDefault(); + if (text.trim()) { + onSendMessage(text); + setText(''); + } + }; + + return ( +
+ setText(e.target.value)} + placeholder="Type a message..." + autoComplete="off" + className="flex-1 px-3 py-2 border border-gray-300 rounded-md outline-none focus:border-blue-500" + /> + +
+ ); +} + +export default MessageInput; diff --git a/examples/20-chat-kubernetes/client/src/components/MessageItem.jsx b/examples/20-chat-kubernetes/client/src/components/MessageItem.jsx new file mode 100644 index 0000000..e3d2ac6 --- /dev/null +++ b/examples/20-chat-kubernetes/client/src/components/MessageItem.jsx @@ -0,0 +1,20 @@ +import React from 'react'; +import { formatTime } from '../utils/messages'; + +function MessageItem({ message, isOwn }) { + return ( +
+
+ {message.sender} + {formatTime(message.timestamp)} +
+
{message.content}
+
+ ); +} + +export default MessageItem; diff --git a/examples/20-chat-kubernetes/client/src/components/MessageList.jsx b/examples/20-chat-kubernetes/client/src/components/MessageList.jsx new file mode 100644 index 0000000..e38e142 --- /dev/null +++ b/examples/20-chat-kubernetes/client/src/components/MessageList.jsx @@ -0,0 +1,30 @@ +import React, { useEffect, useRef } from 'react'; +import MessageItem from './MessageItem'; + +function MessageList({ messages, currentUser }) { + const endOfMessagesRef = useRef(null); + + // Auto-scroll to bottom when messages change + useEffect(() => { + endOfMessagesRef.current?.scrollIntoView({ behavior: 'smooth' }); + }, [messages]); + + return ( +
+ {messages.length === 0 ? ( +
No messages yet.
+ ) : ( + messages.map((msg) => ( + + )) + )} +
+
+ ); +} + +export default MessageList; diff --git a/examples/20-chat-kubernetes/client/src/index.css b/examples/20-chat-kubernetes/client/src/index.css new file mode 100644 index 0000000..5793f2c --- /dev/null +++ b/examples/20-chat-kubernetes/client/src/index.css @@ -0,0 +1,5 @@ +@import "tailwindcss"; + +#root { + @apply h-screen flex justify-center items-center bg-gray-100; +} diff --git a/examples/20-chat-kubernetes/client/src/main.jsx b/examples/20-chat-kubernetes/client/src/main.jsx new file mode 100644 index 0000000..54b39dd --- /dev/null +++ b/examples/20-chat-kubernetes/client/src/main.jsx @@ -0,0 +1,10 @@ +import React from 'react' +import ReactDOM from 'react-dom/client' +import App from './App.jsx' +import './index.css' + +ReactDOM.createRoot(document.getElementById('root')).render( + + + , +) diff --git a/examples/20-chat-kubernetes/client/src/services/api.js b/examples/20-chat-kubernetes/client/src/services/api.js new file mode 100644 index 0000000..1d6aab5 --- /dev/null +++ b/examples/20-chat-kubernetes/client/src/services/api.js @@ -0,0 +1,21 @@ +const API_URL = '/api'; + +export const api = { + getMessages: async () => { + const response = await fetch(`${API_URL}/messages`); + if (!response.ok) throw new Error('Failed to fetch messages'); + return response.json(); + }, + + sendMessage: async (message) => { + const response = await fetch(`${API_URL}/messages`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(message), + }); + if (!response.ok) throw new Error('Failed to send message'); + return response.json(); + } +}; diff --git a/examples/20-chat-kubernetes/client/src/utils/messages.js b/examples/20-chat-kubernetes/client/src/utils/messages.js new file mode 100644 index 0000000..22d1e3b --- /dev/null +++ b/examples/20-chat-kubernetes/client/src/utils/messages.js @@ -0,0 +1,21 @@ +/** + * Creates a new message object. + */ +export function createMessage(text, sender) { + return { + id: crypto.randomUUID(), + sender: sender, + content: text, + timestamp: new Date().toISOString() + }; +} + +/** + * Formats a timestamp for display. + */ +export function formatTime(isoString) { + return new Date(isoString).toLocaleTimeString([], { + hour: '2-digit', + minute: '2-digit' + }); +} diff --git a/examples/20-chat-kubernetes/client/vite.config.js b/examples/20-chat-kubernetes/client/vite.config.js new file mode 100644 index 0000000..c909975 --- /dev/null +++ b/examples/20-chat-kubernetes/client/vite.config.js @@ -0,0 +1,8 @@ +import { defineConfig } from 'vite' +import react from '@vitejs/plugin-react' +import tailwindcss from '@tailwindcss/vite' + +// https://vitejs.dev/config/ +export default defineConfig({ + plugins: [react(), tailwindcss()], +}) diff --git a/examples/20-chat-kubernetes/k8s/base/backend-deployment.yaml b/examples/20-chat-kubernetes/k8s/base/backend-deployment.yaml new file mode 100644 index 0000000..18fe5a8 --- /dev/null +++ b/examples/20-chat-kubernetes/k8s/base/backend-deployment.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: chat-backend +spec: + replicas: 1 + selector: + matchLabels: + app: chat-backend + template: + metadata: + labels: + app: chat-backend + spec: + containers: + - name: backend + image: chat-backend + ports: + - containerPort: 3001 + envFrom: + - configMapRef: + name: backend-config diff --git a/examples/20-chat-kubernetes/k8s/base/configmap.yaml b/examples/20-chat-kubernetes/k8s/base/configmap.yaml new file mode 100644 index 0000000..a5754ce --- /dev/null +++ b/examples/20-chat-kubernetes/k8s/base/configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: backend-config +data: + DATABASE_URL: "postgresql://postgres:secret@postgres:5432/chat" + NODE_ENV: "production" + CORS_ORIGIN: "http://localhost:8080" + LOG_LEVEL: "info" diff --git a/examples/20-chat-kubernetes/k8s/base/frontend-deployment.yaml b/examples/20-chat-kubernetes/k8s/base/frontend-deployment.yaml new file mode 100644 index 0000000..3a3077e --- /dev/null +++ b/examples/20-chat-kubernetes/k8s/base/frontend-deployment.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: chat-frontend +spec: + replicas: 1 + selector: + matchLabels: + app: chat-frontend + template: + metadata: + labels: + app: chat-frontend + spec: + containers: + - name: frontend + image: chat-frontend + ports: + - containerPort: 80 diff --git a/examples/20-chat-kubernetes/k8s/base/ingress.yaml b/examples/20-chat-kubernetes/k8s/base/ingress.yaml new file mode 100644 index 0000000..5c10a9c --- /dev/null +++ b/examples/20-chat-kubernetes/k8s/base/ingress.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: chat-ingress +spec: + ingressClassName: traefik + rules: + - http: + paths: + - path: /api + pathType: Prefix + backend: + service: + name: chat-backend + port: + number: 80 + - path: / + pathType: Prefix + backend: + service: + name: chat-frontend + port: + number: 80 diff --git a/examples/20-chat-kubernetes/k8s/base/kustomization.yaml b/examples/20-chat-kubernetes/k8s/base/kustomization.yaml new file mode 100644 index 0000000..2235bdf --- /dev/null +++ b/examples/20-chat-kubernetes/k8s/base/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - backend-deployment.yaml + - frontend-deployment.yaml + - postgres-statefulset.yaml + - services.yaml + - configmap.yaml + - ingress.yaml diff --git a/examples/20-chat-kubernetes/k8s/base/postgres-statefulset.yaml b/examples/20-chat-kubernetes/k8s/base/postgres-statefulset.yaml new file mode 100644 index 0000000..961be8e --- /dev/null +++ b/examples/20-chat-kubernetes/k8s/base/postgres-statefulset.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres +spec: + serviceName: postgres + replicas: 1 + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: postgres:16-alpine + ports: + - containerPort: 5432 + env: + - name: POSTGRES_DB + value: chat + - name: POSTGRES_PASSWORD + value: secret + volumeMounts: + - name: pgdata + mountPath: /var/lib/postgresql/data + volumeClaimTemplates: + - metadata: + name: pgdata + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi diff --git a/examples/20-chat-kubernetes/k8s/base/services.yaml b/examples/20-chat-kubernetes/k8s/base/services.yaml new file mode 100644 index 0000000..eb4268e --- /dev/null +++ b/examples/20-chat-kubernetes/k8s/base/services.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Service +metadata: + name: chat-backend +spec: + selector: + app: chat-backend + ports: + - port: 80 + targetPort: 3001 +--- +apiVersion: v1 +kind: Service +metadata: + name: chat-frontend +spec: + selector: + app: chat-frontend + ports: + - port: 80 + targetPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres +spec: + selector: + app: postgres + ports: + - port: 5432 + targetPort: 5432 diff --git a/examples/20-chat-kubernetes/k8s/overlays/development/kustomization.yaml b/examples/20-chat-kubernetes/k8s/overlays/development/kustomization.yaml new file mode 100644 index 0000000..28c6761 --- /dev/null +++ b/examples/20-chat-kubernetes/k8s/overlays/development/kustomization.yaml @@ -0,0 +1,13 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../../base +patches: + - patch: |- + apiVersion: v1 + kind: ConfigMap + metadata: + name: backend-config + data: + NODE_ENV: "development" + LOG_LEVEL: "debug" diff --git a/examples/20-chat-kubernetes/k8s/overlays/production/kustomization.yaml b/examples/20-chat-kubernetes/k8s/overlays/production/kustomization.yaml new file mode 100644 index 0000000..4942520 --- /dev/null +++ b/examples/20-chat-kubernetes/k8s/overlays/production/kustomization.yaml @@ -0,0 +1,19 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../../base +patches: + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: chat-backend + spec: + replicas: 3 + - patch: |- + apiVersion: v1 + kind: ConfigMap + metadata: + name: backend-config + data: + LOG_LEVEL: "warn" diff --git a/examples/20-chat-kubernetes/kind-config.yaml b/examples/20-chat-kubernetes/kind-config.yaml new file mode 100644 index 0000000..0155422 --- /dev/null +++ b/examples/20-chat-kubernetes/kind-config.yaml @@ -0,0 +1,17 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + - role: control-plane + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + extraPortMappings: + - containerPort: 80 + hostPort: 8080 + protocol: TCP + - containerPort: 443 + hostPort: 8443 + protocol: TCP diff --git a/examples/20-chat-kubernetes/server/.dockerignore b/examples/20-chat-kubernetes/server/.dockerignore new file mode 100644 index 0000000..bd6f0a1 --- /dev/null +++ b/examples/20-chat-kubernetes/server/.dockerignore @@ -0,0 +1,5 @@ +node_modules +.git +.env +*.md +.DS_Store diff --git a/examples/20-chat-kubernetes/server/Dockerfile b/examples/20-chat-kubernetes/server/Dockerfile new file mode 100644 index 0000000..102546f --- /dev/null +++ b/examples/20-chat-kubernetes/server/Dockerfile @@ -0,0 +1,7 @@ +FROM node:20-alpine +WORKDIR /app +COPY package.json package-lock.json ./ +RUN npm ci --only=production +COPY . . +EXPOSE 3001 +CMD ["node", "src/index.js"] diff --git a/examples/20-chat-kubernetes/server/package.json b/examples/20-chat-kubernetes/server/package.json new file mode 100644 index 0000000..f6a2e67 --- /dev/null +++ b/examples/20-chat-kubernetes/server/package.json @@ -0,0 +1,14 @@ +{ + "name": "chat-server", + "version": "1.0.0", + "type": "module", + "scripts": { + "start": "node src/index.js", + "dev": "node --watch src/index.js" + }, + "dependencies": { + "cors": "^2.8.5", + "express": "^4.18.3", + "pg": "^8.13.1" + } +} diff --git a/examples/20-chat-kubernetes/server/src/db/schema.sql b/examples/20-chat-kubernetes/server/src/db/schema.sql new file mode 100644 index 0000000..889fe81 --- /dev/null +++ b/examples/20-chat-kubernetes/server/src/db/schema.sql @@ -0,0 +1,6 @@ +CREATE TABLE IF NOT EXISTS messages ( + id TEXT PRIMARY KEY, + sender TEXT NOT NULL, + content TEXT NOT NULL, + timestamp TEXT NOT NULL +); diff --git a/examples/20-chat-kubernetes/server/src/index.js b/examples/20-chat-kubernetes/server/src/index.js new file mode 100644 index 0000000..8cd4072 --- /dev/null +++ b/examples/20-chat-kubernetes/server/src/index.js @@ -0,0 +1,32 @@ +import express from 'express'; +import cors from 'cors'; +import messageRoutes from './routes/messages.js'; +import { initialize } from './services/database.js'; + +const app = express(); +const PORT = process.env.PORT || 3001; + +app.use(cors({ + origin: process.env.CORS_ORIGIN || 'http://localhost:5173' +})); +app.use(express.json()); + +// Routes +app.use('/api/messages', messageRoutes); + +app.get('/api/health', (req, res) => { + res.json({ status: 'ok' }); +}); + +// Initialize database then start server +async function start() { + await initialize(); + app.listen(PORT, () => { + console.log(`Server running on http://localhost:${PORT}`); + }); +} + +start().catch((err) => { + console.error('Failed to start server:', err); + process.exit(1); +}); diff --git a/examples/20-chat-kubernetes/server/src/routes/messages.js b/examples/20-chat-kubernetes/server/src/routes/messages.js new file mode 100644 index 0000000..ecea423 --- /dev/null +++ b/examples/20-chat-kubernetes/server/src/routes/messages.js @@ -0,0 +1,28 @@ +import express from 'express'; +import { pool } from '../services/database.js'; + +const router = express.Router(); + +// Get all messages +router.get('/', async (req, res) => { + const result = await pool.query('SELECT * FROM messages ORDER BY timestamp ASC'); + res.json(result.rows); +}); + +// Create a message +router.post('/', async (req, res) => { + const { id, sender, content, timestamp } = req.body; + + if (!sender || !content) { + return res.status(400).json({ error: 'Sender and content are required' }); + } + + await pool.query( + 'INSERT INTO messages (id, sender, content, timestamp) VALUES ($1, $2, $3, $4)', + [id || crypto.randomUUID(), sender, content, timestamp || new Date().toISOString()] + ); + + res.status(201).json({ success: true }); +}); + +export default router; diff --git a/examples/20-chat-kubernetes/server/src/services/database.js b/examples/20-chat-kubernetes/server/src/services/database.js new file mode 100644 index 0000000..472c739 --- /dev/null +++ b/examples/20-chat-kubernetes/server/src/services/database.js @@ -0,0 +1,30 @@ +import pg from 'pg'; +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = path.dirname(fileURLToPath(import.meta.url)); +const schemaPath = path.join(__dirname, '../db/schema.sql'); + +export const pool = new pg.Pool({ + connectionString: process.env.DATABASE_URL || 'postgresql://postgres:secret@localhost:5432/chat', +}); + +export async function initialize(retries = 10, delay = 2000) { + const schema = fs.readFileSync(schemaPath, 'utf8'); + + for (let i = 0; i < retries; i++) { + try { + await pool.query(schema); + console.log('Database initialized'); + return; + } catch (err) { + if (i < retries - 1) { + console.log(`Database not ready, retrying in ${delay}ms... (${i + 1}/${retries})`); + await new Promise((r) => setTimeout(r, delay)); + } else { + throw err; + } + } + } +} diff --git a/examples/20-chat-kubernetes/traefik-values.yaml b/examples/20-chat-kubernetes/traefik-values.yaml new file mode 100644 index 0000000..03e6fdc --- /dev/null +++ b/examples/20-chat-kubernetes/traefik-values.yaml @@ -0,0 +1,13 @@ +# Helm values for Traefik ingress controller on kind clusters +# Install with: helm install traefik traefik/traefik -f traefik-values.yaml +service: + type: NodePort +ports: + web: + nodePort: 80 + websecure: + nodePort: 443 +tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Equal" + effect: "NoSchedule" diff --git a/website/docs/curriculum/part-3-building-applications/deployed-app.md b/website/docs/curriculum/part-3-building-applications/deployed-app.md index 3ce9062..e8f6bf7 100644 --- a/website/docs/curriculum/part-3-building-applications/deployed-app.md +++ b/website/docs/curriculum/part-3-building-applications/deployed-app.md @@ -476,11 +476,14 @@ Is it just frontend (static)? │ └── Railway, Render (free tiers) └── Production/scale needed? ├── Managed services → Heroku, Railway Pro + ├── Container-based → Docker + Kubernetes (Module 19) └── More control → AWS, GCP, DigitalOcean ``` For learning: **Railway + Vercel** is a great free combination. +For production applications with a backend and database: consider the **container-based approach** covered in [Module 19: Containerization & Orchestration](../part-5-infrastructure/containerization-and-orchestration) — Docker for packaging, Kubernetes for running at scale. + --- ## Exercise 1: Deploy Frontend @@ -592,6 +595,7 @@ You've built: - Add features to your deployed app - Build your own project using these patterns - **[Explore Part IV: Historical Context](../part-4-historical-context/server-side-era)** — Understand why everything exists +- **[Explore Part V: Infrastructure & Operations](../part-5-infrastructure/containerization-and-orchestration)** — Containerize your app with Docker and learn Kubernetes fundamentals - Join the DevFoundry community and build in public --- diff --git a/website/docs/curriculum/part-5-infrastructure/containerization-and-orchestration.md b/website/docs/curriculum/part-5-infrastructure/containerization-and-orchestration.md new file mode 100644 index 0000000..401afaa --- /dev/null +++ b/website/docs/curriculum/part-5-infrastructure/containerization-and-orchestration.md @@ -0,0 +1,1192 @@ +--- +sidebar_position: 1 +title: "Module 19: Containerization & Orchestration" +description: "From 'works on my machine' to 'runs anywhere' — Docker, Kubernetes, and the infrastructure mental model for modern software" +--- + +# Module 19: Containerization & Orchestration + +**From "works on my machine" to "runs anywhere" — Docker, Kubernetes, and the infrastructure mental model for modern software.** + +--- + +## Learning Objectives + +By the end of this module, you will: + +- Understand why containers exist and what problems they solve +- Write Dockerfiles and build container images +- Run multi-container applications with Docker Compose +- Understand Kubernetes primitives and what role each plays +- Evaluate the K8s-first development stack (kind, Tilt, k3s) +- Articulate when containerization is the right choice + +**Time**: 4-5 hours (reading + exercises) + +--- + +## Introduction + +In Part III, you deployed your chat app to managed platforms like Vercel and Railway. You pushed code, the platform built it, and gave you a URL. That works — until it doesn't. + +What happens when: +- Your app needs a specific version of Node, plus a Redis cache, plus a PostgreSQL database? +- A new teammate joins and spends a full day getting the project running locally? +- Your staging environment behaves differently from production? +- You need to run 10 copies of your backend behind a load balancer? + +These are **environment problems** — and containers solve them. + +A container packages your application with everything it needs to run: code, runtime, libraries, system tools. If it runs in the container, it runs the same way everywhere — your laptop, your teammate's laptop, staging, production. + +This module covers two layers: + +1. **Docker** — How to build and run containers (the practical foundation) +2. **Kubernetes** — How to orchestrate containers at scale (the awareness layer) + +We'll spend most of our time on Docker because it's the skill you'll use daily. Kubernetes gets enough coverage to build intuition — so when the topic comes up in your team, you can ask the right questions and follow the conversation. + +--- + +## Architecture: Containers vs. Traditional Deployment + +### Traditional Deployment (Stage 5) + +``` +Your Laptop Production Server +├── Node v20 ├── Node v18 (different!) +├── npm packages ├── npm packages (maybe different) +├── OS: macOS ├── OS: Ubuntu Linux +└── Your app code └── Your app code + └── "works here" └── "crashes there" +``` + +### Container-Based Deployment + +``` +Your Laptop Production Server +├── Docker Engine ├── Docker Engine (or K8s) +└── Container └── Container (identical image) + ├── Node v20 ├── Node v20 + ├── npm packages ├── npm packages + ├── Alpine Linux ├── Alpine Linux + └── Your app code └── Your app code + └── "works here" └── "works here too" +``` + +**Key insight**: The container IS the environment. Ship the container, ship the guarantee. + +--- + +## Part 1: The Problem Containers Solve + +### "Works On My Machine" + +Every developer has heard (or said) this. The root cause is always the same: **the environment is different**. + +Differences that break things: +- Operating system (macOS vs. Linux vs. Windows) +- Language runtime version (Node 18 vs. 20) +- System libraries (OpenSSL 1.1 vs. 3.0) +- Environment variables (missing or different values) +- File paths and permissions +- Installed tools and their versions + +### Before Containers: Virtual Machines + +Virtual machines (VMs) solved this by running an entire operating system inside another operating system. You could ship a VM image and guarantee identical environments. + +The problem: VMs are heavy. Each one runs a full OS kernel, needs gigabytes of RAM, and takes minutes to start. + +``` +VM Approach: +┌──────────────────────────────────┐ +│ Host OS (your laptop) │ +│ ┌────────────────────────────┐ │ +│ │ VM (full guest OS) │ │ +│ │ ┌──────────────────────┐ │ │ +│ │ │ Your App │ │ │ +│ │ │ + Runtime │ │ │ +│ │ │ + Libraries │ │ │ +│ │ └──────────────────────┘ │ │ +│ │ Full Linux Kernel │ │ +│ │ Full OS Utilities │ │ +│ │ ~1-2 GB overhead │ │ +│ └────────────────────────────┘ │ +└──────────────────────────────────┘ +``` + +### Containers: Lightweight Isolation + +Containers share the host OS kernel but isolate everything above it. They package just your application and its dependencies — no redundant OS. + +``` +Container Approach: +┌──────────────────────────────────┐ +│ Host OS + Container Runtime │ +│ ┌────────────┐ ┌────────────┐ │ +│ │ Container A │ │ Container B │ │ +│ │ Your App │ │ Database │ │ +│ │ + Runtime │ │ + Config │ │ +│ │ ~100 MB │ │ ~200 MB │ │ +│ └────────────┘ └────────────┘ │ +│ Shared OS Kernel │ +└──────────────────────────────────┘ +``` + +**Result**: Start in seconds, use megabytes instead of gigabytes, run dozens on a single laptop. + +### Containers Are Not VMs + +This distinction matters: + +| | Virtual Machine | Container | +|---|---|---| +| Isolation | Full OS | Process-level | +| Size | Gigabytes | Megabytes | +| Startup | Minutes | Seconds | +| Overhead | High (full kernel) | Minimal (shared kernel) | +| Use case | Different OS needs | Application packaging | + +Containers use Linux kernel features (namespaces and cgroups) to isolate processes without the overhead of running a separate kernel. This is why Docker containers are Linux-native — on macOS and Windows, Docker runs a lightweight Linux VM under the hood to provide that kernel. + +--- + +## Part 2: Docker Fundamentals + +### Key Concepts + +**Image**: A read-only template containing your application and everything it needs to run. Think of it as a snapshot — a frozen, portable environment. + +**Container**: A running instance of an image. You can run multiple containers from the same image. Each gets its own isolated filesystem, network, and process space. + +**Dockerfile**: A text file with instructions for building an image. It's the recipe. + +**Registry**: A storage service for images. Docker Hub is the default public registry. Your team might use a private one (GitHub Container Registry, AWS ECR, etc.). + +``` +Dockerfile → docker build → Image → docker run → Container +(recipe) (cook) (dish) (serve) (running) +``` + +### Writing a Dockerfile + +Here's a Dockerfile for the chat app's Express backend: + +```dockerfile +# Start from a base image with Node.js installed +FROM node:20-alpine + +# Set the working directory inside the container +WORKDIR /app + +# Copy package files first (for better caching) +COPY package.json package-lock.json ./ + +# Install dependencies +RUN npm ci --only=production + +# Copy the rest of the application code +COPY . . + +# Tell Docker which port the app listens on +EXPOSE 3001 + +# The command to run when the container starts +CMD ["node", "src/index.js"] +``` + +Let's break down each instruction: + +| Instruction | What it does | +|---|---| +| `FROM` | Base image to build on. `node:20-alpine` = Node.js 20 on Alpine Linux (tiny) | +| `WORKDIR` | Sets the working directory for subsequent instructions | +| `COPY` | Copies files from your machine into the image | +| `RUN` | Executes a command during the build (here: install dependencies) | +| `EXPOSE` | Documents which port the app uses (doesn't actually publish it) | +| `CMD` | The default command when a container starts from this image | + +### Building an Image + +```bash +docker build -t chat-backend:latest . +``` + +- `-t chat-backend:latest` — Tags the image with a name and version +- `.` — Build context (current directory, where the Dockerfile is) + +### Running a Container + +```bash +docker run -p 3001:3001 -e NODE_ENV=production chat-backend:latest +``` + +- `-p 3001:3001` — Map host port 3001 to container port 3001 +- `-e NODE_ENV=production` — Set an environment variable +- `chat-backend:latest` — The image to run + +Your app is now running inside a container, accessible at `localhost:3001`. + +### Image Layers and Caching + +Each Dockerfile instruction creates a **layer**. Docker caches layers and only rebuilds from the point where something changed. This is why we copy `package.json` before the application code: + +```dockerfile +# These layers are cached if package.json hasn't changed +COPY package.json package-lock.json ./ +RUN npm ci --only=production + +# Only this layer rebuilds when you change application code +COPY . . +``` + +If you only changed a source file, Docker reuses the cached `npm ci` layer — saving minutes on every build. + +### The .dockerignore File + +Like `.gitignore`, but for Docker builds. Keeps unnecessary files out of your image: + +``` +node_modules +.git +.env +*.md +.DS_Store +``` + +This matters because `COPY . .` copies everything in the build context. Without `.dockerignore`, you'd copy `node_modules` (then install them again), `.git` history, and other waste. + +### Multi-Stage Builds + +For the React frontend, you need to build static files but don't need the build tools in production: + +```dockerfile +# Stage 1: Build +FROM node:20-alpine AS build +WORKDIR /app +COPY package.json package-lock.json ./ +RUN npm ci +COPY . . +RUN npm run build + +# Stage 2: Serve +FROM caddy:2-alpine +COPY Caddyfile /etc/caddy/Caddyfile +COPY --from=build /app/dist /srv +EXPOSE 80 +``` + +**Stage 1** installs all dependencies (including dev) and builds the app. **Stage 2** copies only the built files into a lightweight Caddy image. The final image is tiny — just Caddy and your static files, no Node.js, no source code, no `node_modules`. + +Caddy needs a minimal config file (`Caddyfile`) for SPA routing: + +``` +:80 { + root * /srv + try_files {path} /index.html + file_server +} +``` + +`try_files` ensures that client-side routes (like `/chat/room/1`) serve `index.html` instead of returning 404. + +--- + +## Part 3: Docker Compose — Multi-Container Applications + +Real applications aren't a single container. Your chat app needs at least a frontend and a backend. Add a database, a cache, a message queue — suddenly you're managing many containers that need to talk to each other. + +### The Problem + +Running containers individually: + +```bash +# Create a network so containers can communicate +docker network create chat-app + +# Start the database +docker run -d --name postgres --network chat-app \ + -e POSTGRES_PASSWORD=secret \ + postgres:16-alpine + +# Start the backend +docker run -d --name backend --network chat-app \ + -e DATABASE_URL=postgresql://postgres:secret@postgres:5432/chat \ + -p 3001:3001 \ + chat-backend:latest + +# Start the frontend +docker run -d --name frontend --network chat-app \ + -p 8080:80 \ + chat-frontend:latest +``` + +This is tedious, error-prone, and hard to reproduce. Docker Compose solves it. + +### docker-compose.yml + +```yaml +services: + traefik: + image: traefik:v3.2 + command: + - "--providers.docker=true" + - "--providers.docker.exposedbydefault=false" + - "--entrypoints.web.address=:80" + ports: + - "8080:80" + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + + frontend: + build: + context: ./client + labels: + - "traefik.enable=true" + - "traefik.http.routers.frontend.rule=PathPrefix(`/`)" + - "traefik.http.routers.frontend.priority=1" + - "traefik.http.services.frontend.loadbalancer.server.port=80" + depends_on: + - backend + + backend: + build: + context: ./server + environment: + NODE_ENV: production + DATABASE_URL: postgresql://postgres:secret@db:5432/chat + labels: + - "traefik.enable=true" + - "traefik.http.routers.backend.rule=PathPrefix(`/api`)" + - "traefik.http.routers.backend.priority=2" + - "traefik.http.services.backend.loadbalancer.server.port=3001" + depends_on: + - db + + db: + image: postgres:16-alpine + environment: + POSTGRES_DB: chat + POSTGRES_PASSWORD: secret + volumes: + - pgdata:/var/lib/postgresql/data + ports: + - "5432:5432" + +volumes: + pgdata: +``` + +Traefik acts as a reverse proxy, using Docker labels to discover services and route traffic. Requests to `/api/*` go to the backend (priority 2), everything else goes to the frontend (priority 1). The frontend uses `/api` as its API base URL — same origin, no CORS needed. + +### Running the Full Stack + +```bash +# Start everything +docker compose up + +# Start in background +docker compose up -d + +# View logs +docker compose logs -f backend + +# Stop everything +docker compose down + +# Stop and remove data volumes +docker compose down -v +``` + +One command. Every service starts, connects to the same network, and can find each other by service name (`backend` can reach `db` at hostname `db`). + +### Key Compose Concepts + +**Services**: Each entry under `services:` becomes a container. The name (`frontend`, `backend`, `db`) becomes the hostname on the internal network. + +**build vs. image**: Use `build` to build from a local Dockerfile, `image` to pull a pre-built image from a registry. + +**depends_on**: Controls startup order. `backend` waits for `db` to start (but not necessarily to be ready — that's an important distinction). + +**volumes**: Persist data outside the container. Without `pgdata`, your database would lose all data when the container stops. + +**ports**: Map `host:container` ports. `"5432:5432"` makes the database accessible from your host machine at `localhost:5432`. + +**environment**: Set environment variables. For sensitive values, use a `.env` file: + +```yaml +backend: + env_file: + - .env +``` + +### Compose for Development + +Compose is particularly valuable for local development. You can override settings for dev: + +```yaml +# docker-compose.override.yml (automatically loaded) +services: + backend: + volumes: + - ./server/src:/app/src # Mount source code for live changes + environment: + NODE_ENV: development + command: ["npx", "nodemon", "src/index.js"] +``` + +Now changes to your source code are reflected immediately inside the container — no rebuild needed. + +--- + +## Part 4: Volumes, Networks, and State + +### Volumes: Persistent Data + +Containers are **ephemeral** — when they stop, any data written inside them disappears. Volumes solve this. + +```yaml +volumes: + pgdata: # Named volume — Docker manages the storage location + +services: + db: + image: postgres:16-alpine + volumes: + - pgdata:/var/lib/postgresql/data # Persist database files +``` + +Three types of mounts: + +| Type | Syntax | Use Case | +|---|---|---| +| Named volume | `pgdata:/data` | Database storage, persistent data | +| Bind mount | `./src:/app/src` | Development (live code changes) | +| tmpfs | `tmpfs: /tmp` | Temporary data (never written to disk) | + +### Networks: Container Communication + +Docker Compose creates a default network for all services. Containers reach each other by service name: + +```javascript +// Inside the backend container, "db" resolves to the database container +const pool = new Pool({ + connectionString: 'postgresql://postgres:secret@db:5432/chat' + // ^^ service name +}); +``` + +You don't need to know IP addresses. Docker's internal DNS handles it. + +### Managing State Across Restarts + +```bash +# Data survives container restarts +docker compose down # Containers removed, volumes kept +docker compose up -d # New containers, same data + +# Nuclear option — remove everything including data +docker compose down -v # -v removes volumes too +``` + +--- + +## Part 5: Why Orchestration? + +Docker Compose works well for development and simple deployments. But production systems have requirements that Compose alone can't meet. + +### The Scaling Problem + +``` +docker compose up --scale backend=3 +``` + +This starts 3 backend containers, but: +- How does traffic get distributed between them? +- What if one crashes? Who restarts it? +- How do you update without downtime? +- What if you need containers spread across multiple servers? + +### What Orchestration Provides + +An orchestrator manages containers across a cluster of machines. You declare what you want ("run 3 copies of my backend, always"), and the orchestrator makes it happen. + +| Concern | Docker Compose | Orchestrator (K8s) | +|---|---|---| +| Scaling | Manual (`--scale`) | Automatic (CPU/memory rules) | +| Self-healing | None (container dies, stays dead) | Restarts automatically | +| Load balancing | Not built in | Built in | +| Rolling updates | Stop all, start all | Zero-downtime updates | +| Multi-server | Single host only | Cluster of machines | +| Service discovery | DNS by container name | DNS + advanced routing | + +### Kubernetes: The Industry Standard + +Kubernetes (often written K8s — K, 8 middle letters, s) is the dominant container orchestrator. Originally designed at Google, now open source and maintained by the Cloud Native Computing Foundation (CNCF). + +You don't need to master Kubernetes right now. But as a developer working on containerized applications, you need to understand its vocabulary and mental model so you can: + +- Read and modify deployment manifests +- Understand what your platform team is talking about +- Debug issues in staging and production environments +- Make informed architectural decisions + +--- + +## Part 6: Kubernetes Primitives + +Kubernetes has a lot of concepts, but the core ones you'll encounter daily are fewer than you think. Each solves a specific problem. + +### The Mental Model + +Kubernetes works on **declarative state**: you describe what you want, and Kubernetes continuously works to make reality match your description. If a container crashes, Kubernetes notices the mismatch and creates a new one. + +``` +You declare: "I want 3 copies of my backend running" +Kubernetes sees: 2 running (one crashed) +Kubernetes acts: Starts a new one +Result: 3 running again +``` + +### Pod + +The smallest deployable unit. A Pod wraps one or more containers that share storage and network. In practice, most Pods contain a single container. + +```yaml +# You rarely write Pod manifests directly — Deployments manage them +apiVersion: v1 +kind: Pod +metadata: + name: chat-backend +spec: + containers: + - name: backend + image: chat-backend:latest + ports: + - containerPort: 3001 +``` + +**Why it matters**: When someone says "the pod is crashing," they mean your container is failing to start or run. `kubectl logs ` is how you see what went wrong. + +### Deployment + +Manages a set of identical Pods. Handles scaling, updates, and self-healing. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: chat-backend +spec: + replicas: 3 # Run 3 copies + selector: + matchLabels: + app: chat-backend + template: # Pod template + metadata: + labels: + app: chat-backend + spec: + containers: + - name: backend + image: chat-backend:latest + ports: + - containerPort: 3001 + envFrom: + - configMapRef: + name: backend-config +``` + +**Key behavior**: If you update the image tag and apply the manifest, Kubernetes performs a **rolling update** — starting new Pods before stopping old ones, ensuring zero downtime. + +### StatefulSet + +Like a Deployment, but for workloads that need stable identity and persistent storage — primarily databases. + +```yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres +spec: + serviceName: postgres + replicas: 1 + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: postgres:16-alpine + volumeMounts: + - name: pgdata + mountPath: /var/lib/postgresql/data + volumeClaimTemplates: + - metadata: + name: pgdata + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi +``` + +**Deployment vs. StatefulSet**: Use Deployments for stateless services (your API, frontend). Use StatefulSets for stateful workloads (databases, caches) that need stable network identities and persistent volumes. + +### Service + +Provides a stable network endpoint for a set of Pods. Since Pods are ephemeral (they come and go), you need something permanent to point to. + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: chat-backend +spec: + selector: + app: chat-backend # Routes to all Pods with this label + ports: + - port: 80 # Service port + targetPort: 3001 # Container port + type: ClusterIP # Internal only (default) +``` + +Other Pods in the cluster can now reach the backend at `http://chat-backend:80`. The Service load-balances across all matching Pods automatically. + +**Service types**: +- `ClusterIP` — Internal only (default, most common) +- `NodePort` — Exposes on each node's IP at a static port +- `LoadBalancer` — Provisions an external load balancer (cloud providers) + +### Ingress + +Routes external HTTP traffic to internal Services. This is how the outside world reaches your app. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: chat-ingress +spec: + rules: + - host: chat.example.com + http: + paths: + - path: /api + pathType: Prefix + backend: + service: + name: chat-backend + port: + number: 80 + - path: / + pathType: Prefix + backend: + service: + name: chat-frontend + port: + number: 80 +``` + +**How it works**: An Ingress Controller (like Traefik, which is built into k3s) reads these rules and configures routing. `chat.example.com/api/*` goes to your backend Service, everything else goes to your frontend Service. + +### ConfigMap + +Stores non-sensitive configuration as key-value pairs, decoupled from your container image. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: backend-config +data: + NODE_ENV: "production" + CORS_ORIGIN: "https://chat.example.com" + LOG_LEVEL: "info" +``` + +Referenced by Pods via `envFrom` (load all keys as env vars) or `env` (load specific keys). Change the ConfigMap and restart the Pod — no image rebuild needed. + +For **sensitive** values (passwords, API keys), use a `Secret` instead of a ConfigMap. Secrets are base64-encoded and can be encrypted at rest. + +### Kustomize + +A tool (built into `kubectl`) for managing Kubernetes manifests across environments without templating. Instead of one massive YAML file with `if/else` logic, you write a clean base and overlay environment-specific changes. + +``` +k8s/ +├── base/ +│ ├── kustomization.yaml +│ ├── deployment.yaml +│ ├── service.yaml +│ └── configmap.yaml +└── overlays/ + ├── development/ + │ └── kustomization.yaml # Overrides for dev + ├── staging/ + │ └── kustomization.yaml # Overrides for staging + └── production/ + └── kustomization.yaml # Overrides for prod +``` + +Base `kustomization.yaml`: +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - deployment.yaml + - service.yaml + - configmap.yaml +``` + +Production overlay `kustomization.yaml`: +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../../base +patches: + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: chat-backend + spec: + replicas: 3 +``` + +Apply to a specific environment: +```bash +kubectl apply -k k8s/overlays/production +``` + +**Why Kustomize over Helm?** Helm uses templates (Go templating in YAML — hard to read, hard to debug). Kustomize uses plain YAML with strategic merge patches. For most applications, Kustomize is simpler and sufficient. Helm is better suited for packaging software you distribute to others. + +### Putting It All Together + +Here's how the primitives compose for the chat app: + +``` +Internet + │ + ▼ +Ingress (routes /api → backend, / → frontend) + │ + ├─▶ Service: chat-frontend + │ └─▶ Deployment (2 replicas) + │ ├─▶ Pod: frontend-abc12 + │ └─▶ Pod: frontend-def34 + │ + ├─▶ Service: chat-backend + │ └─▶ Deployment (3 replicas) + │ ├─▶ Pod: backend-ghi56 + │ ├─▶ Pod: backend-jkl78 + │ └─▶ Pod: backend-mno90 + │ + └─▶ Service: postgres + └─▶ StatefulSet (1 replica) + └─▶ Pod: postgres-0 + └─▶ PersistentVolume (1Gi) + +ConfigMaps: backend-config, frontend-config +Secrets: db-credentials +``` + +--- + +## Part 7: The K8s-First Development Stack + +Here's an opinionated take: for web-based software products, start with Kubernetes from day one. Not because you need the scale, but because the development workflow is better than you'd expect — and the production story is dramatically simpler when you get there. + +### The Stack + +| Tool | Role | Where | +|---|---|---| +| **kind** | Runs a K8s cluster inside Docker containers | Local development | +| **Tilt** | Watches your code, rebuilds and deploys to the cluster automatically | Local development | +| **k3s** | Lightweight, certified Kubernetes distribution | Production | +| **Kustomize** | Manages environment-specific configuration | Everywhere | + +### kind: Kubernetes in Docker + +[kind](https://kind.sigs.k8s.io/) runs a full Kubernetes cluster inside Docker containers. It's designed for testing Kubernetes itself, but it's an excellent local development tool. + +```bash +# Create a cluster +kind create cluster --name chat-dev + +# Your kubectl now points to the local cluster +kubectl cluster-info + +# Delete when done +kind delete cluster --name chat-dev +``` + +**Why kind over minikube?** kind is faster to start, uses fewer resources, and creates clusters identically to CI environments. It runs inside Docker, which you already have installed. + +### Tilt: The Developer Experience Layer + +[Tilt](https://tilt.dev/) is the tool that makes K8s development feel like local development. Without Tilt, deploying to a local K8s cluster means manually rebuilding images and reapplying manifests on every code change. Tilt automates all of it. + +You define a `Tiltfile` (written in Starlark, a Python-like language): + +```python +# Tiltfile + +# Build the backend image and deploy to K8s +docker_build('chat-backend', './server') +k8s_yaml('k8s/base/backend-deployment.yaml') + +# Build the frontend image and deploy to K8s +docker_build('chat-frontend', './client') +k8s_yaml('k8s/base/frontend-deployment.yaml') + +# Database — use the image directly, no build needed +k8s_yaml('k8s/base/postgres-statefulset.yaml') + +# Services and ingress +k8s_yaml('k8s/base/services.yaml') +k8s_yaml('k8s/base/ingress.yaml') + +# Port forwards for local access +k8s_resource('chat-backend', port_forwards='3001:3001') +k8s_resource('chat-frontend', port_forwards='8080:80') +``` + +Run `tilt up` and Tilt: +1. Builds your Docker images +2. Deploys everything to your kind cluster +3. Watches your source code for changes +4. Rebuilds and redeploys automatically on save +5. Streams logs from all containers +6. Provides a web dashboard showing the status of all services + +**This is the key insight**: with Tilt, the development experience is comparable to `docker compose up` with live reload — but you're running real Kubernetes. Same manifests, same networking model, same configuration. The gap between dev and prod shrinks to nearly zero. + +### k3s: Production Kubernetes Without the Complexity + +[k3s](https://k3s.io/) is a lightweight Kubernetes distribution built for production. It's fully certified K8s packaged as a single binary under 100MB. + +What makes k3s practical for smaller teams: +- **Single binary install**: `curl -sfL https://get.k3s.io | sh -` +- **Batteries included**: Built-in ingress controller (Traefik), load balancer, and storage +- **Low resource usage**: Runs on machines with 512MB RAM +- **Same API**: Anything that works on "full" Kubernetes works on k3s + +k3s runs in production for thousands of organizations, from edge deployments to multi-node clusters. It's not a toy — it's Kubernetes without the operational overhead of managing etcd clusters and control plane components separately. + +### The Argument for K8s-First + +**"Isn't Kubernetes overkill for a small project?"** + +The traditional thinking is: start simple (Heroku/Railway), outgrow it, then migrate to Kubernetes. This migration is expensive — you're rewriting deployment infrastructure at the same time your app is growing and your team is busy. + +The K8s-first alternative: + +1. **Local-production parity from day one.** Your `docker compose up` becomes `tilt up`. Same containers, same networking. But now your manifests *are* your production configuration. + +2. **No migration tax.** You never have to rewrite deployment. The same K8s manifests that run on kind locally run on k3s in production. Add Kustomize overlays for environment differences. + +3. **Tilt makes it developer-friendly.** The "Kubernetes is too complicated for development" argument assumed you were running `kubectl apply` manually. Tilt eliminates that friction. + +4. **k3s makes it operations-friendly.** You don't need a dedicated platform team to run k3s. A single $10/month VPS can run your entire stack. + +5. **Scales without architecture changes.** When you need 3 replicas instead of 1, change a number in a YAML file. When you need a second node, join it to the cluster. No re-platforming. + +**When this approach is NOT the right call:** +- **Static sites and JAMstack** — Vercel/Netlify are purpose-built and better +- **Serverless workloads** — Functions that run infrequently don't need always-on containers +- **You're the only developer and want maximum simplicity** — Railway/Render have lower initial learning investment +- **Your team has zero container experience** — Get comfortable with Docker first, then consider K8s + +The goal isn't dogma. It's recognizing that for **web applications with a backend, database, and foreseeable scaling needs**, the K8s-first stack (kind + Tilt + k3s + Kustomize) offers a better long-term trajectory than starting on a managed platform and migrating later. + +--- + +## Exercise 1: Containerize the Chat App Backend + +Write a Dockerfile for the chat app's Express backend. + +**Requirements:** +1. Use `node:20-alpine` as the base image +2. Set the working directory to `/app` +3. Copy and install dependencies first (layer caching) +4. Copy application code +5. Expose port 3001 +6. Set the default command + +**Test it:** +```bash +docker build -t chat-backend:latest ./server +docker run -p 3001:3001 -e NODE_ENV=production chat-backend:latest +curl http://localhost:3001/api/health +``` + +
+Solution + +```dockerfile +FROM node:20-alpine +WORKDIR /app +COPY package.json package-lock.json ./ +RUN npm ci --only=production +COPY . . +EXPOSE 3001 +CMD ["node", "src/index.js"] +``` + +And `.dockerignore`: +``` +node_modules +.git +.env +*.md +.DS_Store +``` + +
+ +--- + +## Exercise 2: Multi-Stage Frontend Build + +Write a multi-stage Dockerfile for the React frontend. + +**Requirements:** +1. Stage 1 (`build`): Install dependencies and run `npm run build` +2. Stage 2: Copy built files into a `caddy:2-alpine` image with a `Caddyfile` for SPA routing +3. The final image should contain only Caddy and the static files + +**Test it:** +```bash +docker build -t chat-frontend:latest ./client +docker run -p 8080:80 chat-frontend:latest +# Visit http://localhost:8080 +``` + +
+Solution + +```dockerfile +# Stage 1: Build +FROM node:20-alpine AS build +WORKDIR /app +COPY package.json package-lock.json ./ +RUN npm ci +COPY . . +RUN npm run build + +# Stage 2: Serve +FROM caddy:2-alpine +COPY Caddyfile /etc/caddy/Caddyfile +COPY --from=build /app/dist /srv +EXPOSE 80 +``` + +And `Caddyfile`: +``` +:80 { + root * /srv + try_files {path} /index.html + file_server +} +``` + +
+ +--- + +## Exercise 3: Docker Compose + +Write a `docker-compose.yml` that runs the frontend, backend, and a PostgreSQL database together. + +**Requirements:** +1. Frontend served on port 8080 +2. Backend on port 3001 with environment variables for the database +3. PostgreSQL with a named volume for persistence +4. Backend depends on the database; frontend depends on the backend + +**Test it:** +```bash +docker compose up +# Frontend at http://localhost:8080 +# Backend at http://localhost:3001/api/health +# Database at localhost:5432 +``` + +
+Solution + +See the [19-chat-docker-compose example](/docs/examples/chat-docker-compose) for the complete working setup. + +
+ +--- + +## Exercise 4: Read a Kubernetes Manifest + +Given this manifest, answer the questions below: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: api-server +spec: + replicas: 2 + selector: + matchLabels: + app: api + template: + metadata: + labels: + app: api + spec: + containers: + - name: api + image: myapp/api:v1.2.3 + ports: + - containerPort: 8080 + envFrom: + - configMapRef: + name: api-config +--- +apiVersion: v1 +kind: Service +metadata: + name: api-server +spec: + selector: + app: api + ports: + - port: 80 + targetPort: 8080 +``` + +**Questions:** +1. How many copies of the API will be running? +2. What Docker image is being used? +3. How would another Pod in the cluster reach this service? +4. If a Pod crashes, what happens? +5. Where does the Pod get its environment variables? + +
+Answers + +1. **2 replicas** — specified by `replicas: 2` +2. **myapp/api:v1.2.3** — specified in the container spec +3. **http://api-server:80** — the Service name becomes a DNS entry, port 80 maps to container port 8080 +4. **Kubernetes creates a replacement** — the Deployment controller notices the actual state (1 Pod) doesn't match desired state (2 Pods) and creates a new one +5. **From the ConfigMap named `api-config`** — `envFrom` with `configMapRef` loads all keys from the ConfigMap as environment variables + +
+ +--- + +## Exercise 5: Design a Kustomize Overlay + +Your chat app runs in development (1 replica, debug logging) and production (3 replicas, info logging). Using the Kustomize structure from Part 6, write the production overlay that changes the replica count. + +
+Solution + +`k8s/overlays/production/kustomization.yaml`: +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../../base +patches: + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: chat-backend + spec: + replicas: 3 + - patch: |- + apiVersion: v1 + kind: ConfigMap + metadata: + name: backend-config + data: + LOG_LEVEL: "info" +``` + +Apply with: `kubectl apply -k k8s/overlays/production` + +
+ +--- + +## Common Issues + +### "Cannot connect to the Docker daemon" + +``` +Cannot connect to the Docker daemon at unix:///var/run/docker.sock +``` + +**Fix**: Docker Desktop (or Docker Engine) isn't running. Start it, then try again. + +### Port Already in Use + +``` +Error: port is already allocated +``` + +**Fix**: Another process (or container) is using that port. Stop it, or map to a different host port: `-p 3002:3001`. + +### Image Build Fails at npm ci + +``` +npm ERR! could not determine executable to run +``` + +**Fix**: Make sure `package-lock.json` is included in the `COPY` and isn't in `.dockerignore`. + +### Container Starts Then Immediately Exits + +**Fix**: Check logs with `docker logs `. Common causes: +- Missing environment variables +- Database not reachable (if using `depends_on`, the database may not be *ready* yet — just started) +- Application crash on startup + +### "Connection Refused" Between Containers + +**Fix**: Use the **service name** (not `localhost`) as the hostname. Inside a Docker network, containers reach each other by name. `localhost` inside a container means that container itself. + +--- + +## Key Takeaways + +1. **Containers solve environment problems** — Package your app with its dependencies, and it runs the same everywhere. + +2. **Images are recipes, containers are instances** — Build once, run many times. Each container is isolated. + +3. **Layer caching is your friend** — Copy dependency files before source code. Structure your Dockerfile for fast rebuilds. + +4. **Docker Compose is essential for local development** — One `docker compose up` replaces a page of setup instructions. + +5. **Kubernetes manages containers at scale** — Declarative state, self-healing, rolling updates, service discovery. You describe what you want; K8s makes it happen. + +6. **You don't need to master K8s to benefit from it** — Learn the primitives (Pod, Deployment, Service, Ingress, ConfigMap, Kustomize). Understand the vocabulary. Ask good questions. + +7. **The K8s-first stack is worth evaluating** — kind + Tilt (dev) and k3s (prod) with Kustomize (config) offers local-prod parity without the traditional Kubernetes complexity tax. + +--- + +## What's Next + +This module gave you the foundation for containerized development and an awareness of Kubernetes. From here: + +- **Practice**: Work through the [Docker Compose example](/docs/examples/chat-docker-compose) and the [Kubernetes example](/docs/examples/chat-kubernetes) to get hands-on experience +- **Go deeper on Docker**: Learn about health checks, resource limits, and security scanning +- **Explore K8s further**: Set up a kind cluster and deploy the chat app with Tilt +- **Module 20 (planned)**: Observability & Reliability — monitoring what your containers are doing in production +- **Module 21 (planned)**: Infrastructure as Code — managing the infrastructure itself declaratively diff --git a/website/docs/examples/chat-docker-compose/index.md b/website/docs/examples/chat-docker-compose/index.md new file mode 100644 index 0000000..e27d065 --- /dev/null +++ b/website/docs/examples/chat-docker-compose/index.md @@ -0,0 +1,136 @@ +--- +sidebar_position: 8 +title: Chat Docker Compose +description: Containerized chat app with Traefik, Caddy, and PostgreSQL +--- + +# Chat Docker Compose (Stage 6) + +**Multi-container deployment with Docker Compose** + +--- + +## Overview + +This example takes the fullstack chat application and containerizes it with Docker. A Traefik reverse proxy routes requests to a Caddy-served React frontend and an Express backend, with PostgreSQL for persistence — all orchestrated by Docker Compose. + +**Why this matters:** +- Understand containerization (Dockerfiles, images, containers) +- Learn multi-stage builds for production-optimized images +- See how reverse proxies route traffic between services +- Foundation for Kubernetes and production deployments + +--- + +## What You'll Learn + +- **Dockerfiles**: Writing production-ready containers for Node.js and React +- **Multi-Stage Builds**: Build stage (Node) → serve stage (Caddy) for tiny images +- **Docker Compose**: Defining multi-container applications declaratively +- **Traefik**: Label-based reverse proxy with path routing +- **Container Networking**: Service discovery via DNS names +- **Volume Persistence**: Surviving database restarts + +--- + +## Prerequisites + +- Completed [Chat Fullstack](/docs/examples/chat-fullstack) +- [Docker Desktop](https://www.docker.com/products/docker-desktop/) installed +- No other local dependencies — everything runs in containers + +--- + +## Project Structure + +``` +19-chat-docker-compose/ +├── client/ +│ ├── Dockerfile # Multi-stage: Node (build) → Caddy (serve) +│ ├── Caddyfile # SPA routing config +│ └── src/ # React application +├── server/ +│ ├── Dockerfile # Node.js production image +│ └── src/ # Express API (PostgreSQL) +├── docker-compose.yml # Traefik + services +├── docker-compose.override.yml # Dev overrides (live reload) +└── .env.example +``` + +--- + +## Quick Start + +```bash +cd examples/19-chat-docker-compose + +# Production-like mode (Traefik + Caddy + Node + Postgres) +docker compose -f docker-compose.yml up --build + +# Development mode (with live reload) +docker compose up --build +``` + +- App: `http://localhost:8080` +- Traefik dashboard: `http://localhost:8081` + +--- + +## Key Concepts + +### 1. Multi-Stage Dockerfile + +Build your React app, then serve static files with Caddy — no Node.js in the final image: + +```dockerfile +FROM node:20-alpine AS build +WORKDIR /app +COPY package.json package-lock.json ./ +RUN npm ci +COPY . . +RUN npm run build + +FROM caddy:2-alpine +COPY Caddyfile /etc/caddy/Caddyfile +COPY --from=build /app/dist /srv +``` + +### 2. Traefik Reverse Proxy + +Traefik reads Docker labels to route traffic — no config files needed: + +```yaml +backend: + labels: + - "traefik.enable=true" + - "traefik.http.routers.backend.rule=PathPrefix(`/api`)" + - "traefik.http.routers.backend.priority=2" +``` + +### 3. Architecture + +``` +Browser → :8080 → Traefik + ├── /api/* → Backend (Express :3001) + │ └── PostgreSQL (:5432) + └── /* → Frontend (Caddy :80) +``` + +--- + +## Curriculum Alignment + +This example aligns with: +- **Module 19**: Containerization & Orchestration — Docker and Docker Compose + +--- + +## Next Steps + +Ready for Kubernetes? Continue to [Chat Kubernetes](/docs/examples/chat-kubernetes). + +--- + +## Source Code + +View the complete source: [`examples/19-chat-docker-compose/`](https://github.com/Episk-pos/DevFoundry/tree/main/examples/19-chat-docker-compose) diff --git a/website/docs/examples/chat-kubernetes/index.md b/website/docs/examples/chat-kubernetes/index.md new file mode 100644 index 0000000..b2cfc97 --- /dev/null +++ b/website/docs/examples/chat-kubernetes/index.md @@ -0,0 +1,145 @@ +--- +sidebar_position: 9 +title: Chat Kubernetes +description: Kubernetes deployment with kind, Tilt, and Traefik ingress +--- + +# Chat Kubernetes (Stage 7) + +**Local Kubernetes deployment with kind and Tilt** + +--- + +## Overview + +This example deploys the same containerized chat application to a local Kubernetes cluster. You'll write Deployments, Services, and Ingress manifests — the same resources that run in production on k3s or any managed Kubernetes provider. + +**Why this matters:** +- Understand Kubernetes primitives (Pods, Deployments, Services, Ingress) +- Learn the kind + Tilt development workflow +- See how Kustomize manages environment differences +- Same manifests work locally and in production + +--- + +## What You'll Learn + +- **Kubernetes Manifests**: Deployments, Services, ConfigMaps, Ingress +- **kind**: Local Kubernetes clusters inside Docker +- **Tilt**: Automated build-deploy-reload for K8s development +- **Kustomize**: Environment overlays without templating +- **Traefik Ingress**: Path-based routing in Kubernetes + +--- + +## Prerequisites + +- Completed [Chat Docker Compose](/docs/examples/chat-docker-compose) +- [Docker Desktop](https://www.docker.com/products/docker-desktop/) installed +- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation), [kubectl](https://kubernetes.io/docs/tasks/tools/), [Tilt](https://docs.tilt.dev/install.html), and [Helm](https://helm.sh/docs/intro/install/) installed + +--- + +## Project Structure + +``` +20-chat-kubernetes/ +├── client/ # Same as Docker Compose example +├── server/ # Same as Docker Compose example +├── k8s/ +│ ├── base/ +│ │ ├── kustomization.yaml +│ │ ├── backend-deployment.yaml +│ │ ├── frontend-deployment.yaml +│ │ ├── postgres-statefulset.yaml +│ │ ├── services.yaml +│ │ ├── configmap.yaml +│ │ └── ingress.yaml # ingressClassName: traefik +│ └── overlays/ +│ ├── development/ # Debug logging +│ └── production/ # 3 replicas, warn logging +├── Tiltfile # Build + deploy + watch +├── kind-config.yaml # Cluster with port mappings +└── traefik-values.yaml # Helm values for ingress +``` + +--- + +## Quick Start + +```bash +cd examples/20-chat-kubernetes + +# 1. Create cluster +kind create cluster --name chat-dev --config kind-config.yaml + +# 2. Install Traefik ingress controller +helm repo add traefik https://traefik.github.io/charts +helm repo update +helm install traefik traefik/traefik -f traefik-values.yaml + +# 3. Start Tilt (builds, deploys, watches for changes) +tilt up +``` + +- Tilt dashboard: `http://localhost:10350` +- Frontend: `http://localhost:8080` +- Backend API: `http://localhost:3001/api/health` + +--- + +## Key Concepts + +### 1. Declarative State + +You describe what you want; Kubernetes makes it happen: + +```yaml +spec: + replicas: 3 # "I want 3 copies" +``` + +If a Pod crashes, Kubernetes creates a replacement automatically. + +### 2. Kustomize Overlays + +Same base manifests, different settings per environment: + +```yaml +# k8s/overlays/production/kustomization.yaml +patches: + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: chat-backend + spec: + replicas: 3 +``` + +### 3. Ingress Routing + +Traefik routes external traffic to internal Services: + +```yaml +spec: + ingressClassName: traefik + rules: + - http: + paths: + - path: /api → chat-backend + - path: / → chat-frontend +``` + +--- + +## Curriculum Alignment + +This example aligns with: +- **Module 19**: Containerization & Orchestration — Kubernetes fundamentals + +--- + +## Source Code + +View the complete source: [`examples/20-chat-kubernetes/`](https://github.com/Episk-pos/DevFoundry/tree/main/examples/20-chat-kubernetes) diff --git a/website/sidebars.ts b/website/sidebars.ts index 77a652a..331841f 100644 --- a/website/sidebars.ts +++ b/website/sidebars.ts @@ -46,6 +46,14 @@ const sidebars: SidebarsConfig = { }, ], }, + { + type: 'category', + label: 'Part V: Infrastructure & Operations', + collapsed: false, + items: [ + 'curriculum/part-5-infrastructure/containerization-and-orchestration', + ], + }, ], }, { @@ -60,6 +68,8 @@ const sidebars: SidebarsConfig = { 'examples/chat-react/index', 'examples/chat-fullstack/index', 'examples/chat-realtime/index', + 'examples/chat-docker-compose/index', + 'examples/chat-kubernetes/index', ], }, { diff --git a/website/static/img/docusaurus-social-card.jpg b/website/static/img/devfoundry-social-card.jpg similarity index 100% rename from website/static/img/docusaurus-social-card.jpg rename to website/static/img/devfoundry-social-card.jpg