diff --git a/.github/workflows/smoke-test-services.yml b/.github/workflows/smoke-test-services.yml index 4de517c..1e0e6eb 100644 --- a/.github/workflows/smoke-test-services.yml +++ b/.github/workflows/smoke-test-services.yml @@ -43,106 +43,134 @@ jobs: working-directory: on-prem run: ./scripts/setup.sh --env-only --force + # ========================================================================== + # Start services + # ========================================================================== - name: Start infrastructure services working-directory: on-prem - run: | - echo "Starting Redis, MongoDB, and MongoDB init..." - docker compose -f docker-compose.full.yml up -d redis mongodb - echo "Waiting for services to initialize..." + run: docker compose -f docker-compose.full.yml up -d redis mongodb - name: Wait for Redis working-directory: on-prem - run: | - echo "Waiting for Redis to be ready..." - for i in {1..30}; do - if docker compose -f docker-compose.full.yml exec -T redis redis-cli ping | grep -q PONG; then - echo "✅ Redis is ready" - exit 0 - fi - echo "Attempt $i/30 - Redis not ready yet..." - sleep 2 - done - echo "❌ Redis failed to start" - docker compose -f docker-compose.full.yml logs redis - exit 1 + run: ./scripts/smoke-test/wait-for-redis.sh - name: Wait for MongoDB working-directory: on-prem - run: | - echo "Waiting for MongoDB to be healthy..." - for i in {1..60}; do - if docker compose -f docker-compose.full.yml exec -T mongodb mongosh --quiet --eval "db.runCommand('ping').ok" localhost:27017 2>/dev/null | grep -q 1; then - echo "✅ MongoDB is ready" - exit 0 - fi - echo "Attempt $i/60 - MongoDB not ready yet..." - sleep 2 - done - echo "❌ MongoDB failed to start" - docker compose -f docker-compose.full.yml logs mongodb - exit 1 + run: ./scripts/smoke-test/wait-for-mongodb.sh - name: Start application services working-directory: on-prem - run: | - echo "Starting Scheduler and API..." - docker compose -f docker-compose.full.yml up -d scheduler api - echo "Waiting for services to initialize..." + run: docker compose -f docker-compose.full.yml up -d scheduler api - name: Wait for API working-directory: on-prem - run: | - echo "Waiting for API to be ready..." - for i in {1..60}; do - if curl -sf http://localhost:4000/health > /dev/null 2>&1; then - echo "✅ API is ready" - exit 0 - fi - echo "Attempt $i/60 - API not ready yet..." - sleep 2 - done - echo "❌ API failed to start" - docker compose -f docker-compose.full.yml logs api - exit 1 + run: ./scripts/smoke-test/wait-for-api.sh - name: Wait for root user working-directory: on-prem - run: | - source .env - echo "Waiting for root user to be created..." - for i in {1..30}; do - if docker compose -f docker-compose.full.yml exec -T mongodb mongosh \ - -u "$MONGODB_USERNAME" -p "$MONGODB_PASSWORD" --authenticationDatabase admin \ - --quiet --eval "db.getSiblingDB('currents').user.findOne({email: '${ON_PREM_EMAIL:-root@currents.local}'})" 2>/dev/null | grep -q "_id"; then - echo "✅ Root user exists" - exit 0 - fi - echo "Attempt $i/30 - Root user not created yet..." - sleep 2 - done - echo "❌ Root user was not created" - docker compose -f docker-compose.full.yml logs api scheduler - exit 1 + run: ./scripts/smoke-test/wait-for-root-user.sh - name: Seed database id: seed working-directory: on-prem run: | echo "Seeding database with test data..." - # Capture the KEY=VALUE output from seed script eval $(./scripts/smoke-test/seed-database.sh) - # Export to GitHub Actions output echo "api_key=${API_KEY}" >> $GITHUB_OUTPUT echo "project_id=${PROJECT_ID}" >> $GITHUB_OUTPUT - - name: Run API smoke test + # ========================================================================== + # API Tests - Part 1: Create and Fetch + # ========================================================================== + - name: "API Test: Create action" + id: create_action + working-directory: on-prem + run: | + eval $(./scripts/smoke-test/api-test-create.sh "${{ steps.seed.outputs.api_key }}" "${{ steps.seed.outputs.project_id }}") + echo "action_id=${ACTION_ID}" >> $GITHUB_OUTPUT + echo "test_name=${TEST_NAME}" >> $GITHUB_OUTPUT + + - name: "API Test: Fetch action" + working-directory: on-prem + run: ./scripts/smoke-test/api-test-fetch.sh "${{ steps.seed.outputs.api_key }}" "${{ steps.create_action.outputs.action_id }}" "${{ steps.create_action.outputs.test_name }}" + + # ========================================================================== + # Backup/Restore Test + # ========================================================================== + - name: Create MongoDB backup + working-directory: on-prem + run: | + source .env + echo "Creating MongoDB backup..." + docker compose -f docker-compose.full.yml exec -T mongodb mongodump \ + -u "$MONGODB_USERNAME" -p "$MONGODB_PASSWORD" --authenticationDatabase admin \ + --archive=/data/db/backup.archive + docker compose -f docker-compose.full.yml cp mongodb:/data/db/backup.archive ./mongodb-backup.archive + echo "✅ Backup created" + + - name: Stop services and destroy data + working-directory: on-prem + run: | + docker compose -f docker-compose.full.yml down -v --remove-orphans + sudo rm -rf data + echo "✅ Data destroyed" + + - name: Restart infrastructure services + working-directory: on-prem + run: | + mkdir -p data/mongodb data/redis data/startup + docker compose -f docker-compose.full.yml up -d redis mongodb + + - name: Wait for MongoDB (after restart) + working-directory: on-prem + run: ./scripts/smoke-test/wait-for-mongodb.sh + + - name: Restore MongoDB backup working-directory: on-prem run: | - echo "Running API smoke test..." - ./scripts/smoke-test/api-test.sh "${{ steps.seed.outputs.api_key }}" "${{ steps.seed.outputs.project_id }}" + source .env + echo "Restoring MongoDB backup..." + docker compose -f docker-compose.full.yml cp ./mongodb-backup.archive mongodb:/data/db/backup.archive + docker compose -f docker-compose.full.yml exec -T mongodb mongorestore \ + -u "$MONGODB_USERNAME" -p "$MONGODB_PASSWORD" --authenticationDatabase admin \ + --archive=/data/db/backup.archive --drop + docker compose -f docker-compose.full.yml exec -T mongodb rm /data/db/backup.archive + echo "✅ Backup restored" + + - name: Restart application services + working-directory: on-prem + run: docker compose -f docker-compose.full.yml up -d scheduler api + + - name: Wait for API (after restore) + working-directory: on-prem + run: ./scripts/smoke-test/wait-for-api.sh + + # ========================================================================== + # API Tests - Part 2: Verify restore and cleanup + # ========================================================================== + - name: "API Test: Fetch action (after restore)" + working-directory: on-prem + run: ./scripts/smoke-test/api-test-fetch.sh "${{ steps.seed.outputs.api_key }}" "${{ steps.create_action.outputs.action_id }}" "${{ steps.create_action.outputs.test_name }}" + + - name: "API Test: Delete action" + working-directory: on-prem + run: ./scripts/smoke-test/api-test-delete.sh "${{ steps.seed.outputs.api_key }}" "${{ steps.create_action.outputs.action_id }}" + + - name: Test summary + run: | + echo "==========================================" + echo "✅ All tests passed!" + echo "==========================================" + echo "Verified:" + echo " - POST /actions (create)" + echo " - GET /actions (read)" + echo " - MongoDB backup/restore" + echo " - GET /actions (read after restore)" + echo " - DELETE /actions (delete)" - name: Cleanup if: always() working-directory: on-prem run: | docker compose -f docker-compose.full.yml down -v --remove-orphans + rm -f mongodb-backup.archive diff --git a/.github/workflows/validate-compose.yml b/.github/workflows/validate-compose.yml index c5e7778..e9e82d9 100644 --- a/.github/workflows/validate-compose.yml +++ b/.github/workflows/validate-compose.yml @@ -33,7 +33,6 @@ jobs: cat > .env << 'EOF' # Minimal env file for docker compose config validation CLICKHOUSE_CURRENTS_PASSWORD=placeholder - TRAEFIK_DOMAIN=example.com EOF - name: Validate docker-compose.full.yml @@ -85,7 +84,6 @@ jobs: cat > .env << 'EOF' # Minimal env file for podman compose config validation CLICKHOUSE_CURRENTS_PASSWORD=placeholder - TRAEFIK_DOMAIN=example.com EOF - name: Validate docker-compose.full.yml diff --git a/docs/README.md b/docs/README.md index 148861b..376a2ba 100644 --- a/docs/README.md +++ b/docs/README.md @@ -11,6 +11,8 @@ The Docker Compose configuration is modular, allowing you to choose which data s - [🚀 Quickstart Guide](./quickstart.md) - [Container Image Access](./container-images.md) - [Configuration Reference](./configuration.md) +- [Logging Configuration](./logging.md) +- [Backup and Restore](./backup-restore.md) - [Upgrading Currents On-Prem](./upgrading.md) - [Support Policy](./support.md) diff --git a/docs/backup-restore.md b/docs/backup-restore.md new file mode 100644 index 0000000..1cc4c5c --- /dev/null +++ b/docs/backup-restore.md @@ -0,0 +1,222 @@ +# Backup and Restore + +This guide covers backing up and restoring data for your Currents on-prem deployment. + +## Overview + +Currents stores data in several locations: + +| Service | Default Path | Data Type | +|---------|--------------|-----------| +| MongoDB | `data/mongodb` | Primary application data (projects, runs, tests, users) | +| ClickHouse | `data/clickhouse` | Analytics and reporting data | +| Redis | `data/redis` | Cache and session data (optional to backup) | +| RustFS | `data/rustfs` | Artifacts, screenshots, videos (if using the provided rustfs) | + +## Before You Begin + +> **Important:** Always stop services before backing up to ensure data consistency. + +```bash +cd on-prem +docker compose stop +``` + +## Backup Procedures + +### Full Backup (All Data) + +The simplest approach is to backup the entire `data/` directory: + +```bash +# Stop services +docker compose stop + +# Create timestamped backup +tar -czvf backup-$(date +%Y%m%d-%H%M%S).tar.gz data/ + +# Restart services +docker compose start +``` + +> **Important:** In addition to data backups, securely store: +> - Your `.env` file (contains credentials needed for restore) - use a password manager or secrets vault +> - Your `docker-compose.yml` (or keep it version controlled) + +### MongoDB Backup + +MongoDB contains your primary application data. For production environments, consider using `mongodump` for more reliable backups. + +#### Option 1: File-based backup (services stopped) + +```bash +docker compose stop +tar -czvf mongodb-backup-$(date +%Y%m%d).tar.gz data/mongodb/ +docker compose start +``` + +#### Option 2: mongodump (services running) + +```bash +# Source credentials from .env +source .env + +# Run mongodump inside the container +docker compose exec mongodb mongodump \ + -u "$MONGODB_USERNAME" \ + -p "$MONGODB_PASSWORD" \ + --authenticationDatabase admin \ + --archive=/data/db/backup.archive + +# Copy backup out of container +docker compose cp mongodb:/data/db/backup.archive ./mongodb-backup-$(date +%Y%m%d).archive + +# Clean up backup file in container +docker compose exec mongodb rm /data/db/backup.archive +``` + +### ClickHouse Backup + +ClickHouse stores analytics data. For large datasets, use ClickHouse's native backup. + +#### Option 1: File-based backup (services stopped) + +```bash +docker compose stop +tar -czvf clickhouse-backup-$(date +%Y%m%d).tar.gz data/clickhouse/ +docker compose start +``` + +#### Option 2: ClickHouse native backup (services running) + +```bash +# Source credentials from .env +source .env + +# Create backup using clickhouse-client +docker compose exec clickhouse clickhouse-client \ + --user currents \ + --password "$CLICKHOUSE_CURRENTS_PASSWORD" \ + --query "BACKUP DATABASE currents TO Disk('backups', 'backup-$(date +%Y%m%d)')" +``` + +> **Note:** Native backups require configuring a backup disk in ClickHouse. See [ClickHouse backup documentation](https://clickhouse.com/docs/en/operations/backup). + +### RustFS / Object Storage Backup + +If using local RustFS for object storage: + +```bash +docker compose stop +tar -czvf rustfs-backup-$(date +%Y%m%d).tar.gz data/rustfs/ +docker compose start +``` + +If using external S3-compatible storage, use your cloud provider's backup features or tools like `aws s3 sync` or `rclone`. + +### Redis Backup (Optional) + +Redis primarily stores cache data that can be regenerated. Backup is optional but can speed up recovery: + +```bash +docker compose stop +tar -czvf redis-backup-$(date +%Y%m%d).tar.gz data/redis/ +docker compose start +``` + +## Restore Procedures + +### Prerequisites + +Before restoring, ensure you have: +- Your backed up `.env` file (or recreate with the same credentials) +- Your `docker-compose.yml` file (or clone the repository and run setup) + +### Full Restore + +```bash +# Stop services +docker compose down + +# Remove existing data (careful!) +rm -rf data/ + +# Extract backup +tar -xzvf backup-YYYYMMDD-HHMMSS.tar.gz + +# Set permissions (Podman users - see quickstart troubleshooting) +# Example for rootful Podman: +# sudo chown -R 999:999 data/mongodb data/redis +# sudo chown -R 101:101 data/clickhouse + +# Restart services +docker compose up -d +``` + +### MongoDB Restore + +#### From file-based backup + +```bash +docker compose down +rm -rf data/mongodb/ +tar -xzvf mongodb-backup-YYYYMMDD.tar.gz +docker compose up -d +``` + +#### From mongodump archive + +```bash +# Copy backup into container +docker compose cp ./mongodb-backup-YYYYMMDD.archive mongodb:/data/db/backup.archive + +# Source credentials +source .env + +# Restore using mongorestore +docker compose exec mongodb mongorestore \ + -u "$MONGODB_USERNAME" \ + -p "$MONGODB_PASSWORD" \ + --authenticationDatabase admin \ + --archive=/data/db/backup.archive \ + --drop + +# Clean up +docker compose exec mongodb rm /data/db/backup.archive +``` + +### ClickHouse Restore + +#### From file-based backup + +```bash +docker compose down +rm -rf data/clickhouse/ +tar -xzvf clickhouse-backup-YYYYMMDD.tar.gz +docker compose up -d +``` + +#### From native backup + +```bash +# Source credentials from .env +source .env + +# Restore backup using clickhouse-client +docker compose exec clickhouse clickhouse-client \ + --user currents \ + --password "$CLICKHOUSE_CURRENTS_PASSWORD" \ + --query "RESTORE DATABASE currents FROM Disk('backups', 'backup-YYYYMMDD')" +``` + +> **Note:** See [ClickHouse restore documentation](https://clickhouse.com/docs/en/operations/backup#restore) for advanced options such as restoring to a different database or renaming tables. + +### RustFS Restore + +```bash +docker compose down +rm -rf data/rustfs/ +tar -xzvf rustfs-backup-YYYYMMDD.tar.gz +docker compose up -d +``` + diff --git a/docs/configuration.md b/docs/configuration.md index 2123c6d..9b059e1 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -45,7 +45,7 @@ These have defaults but you'll likely want to customize them. | `MONGODB_USERNAME` | string | `currents-user` | MongoDB username | | `MONGODB_DATABASE` | string | `currents` | MongoDB database name | | `MONGODB_URI` | string | _(derived)_ | Full MongoDB connection string | -| `TRAEFIK_DOMAIN` | string | _(commented)_ | Base domain for Traefik TLS routing | +| `TRAEFIK_DOMAIN` | string | `localhost` | Base domain for Traefik TLS routing | | `TRAEFIK_API_SUBDOMAIN` | string | `currents-app` | Subdomain for API/Dashboard | | `TRAEFIK_DIRECTOR_SUBDOMAIN` | string | `currents-record` | Subdomain for Director | | `TRAEFIK_STORAGE_SUBDOMAIN` | string | `currents-storage` | Subdomain for RustFS S3 API | diff --git a/docs/index.md b/docs/index.md index c2f6ccf..39882f6 100644 --- a/docs/index.md +++ b/docs/index.md @@ -10,6 +10,8 @@ Docker Compose configuration for self-hosted Currents deployment. - [Quickstart Guide](quickstart.md) — Get up and running with Docker Compose - [Configuration Reference](configuration.md) — All environment variables and settings +- [Logging Configuration](logging.md) — Configure container logging for production +- [Backup and Restore](backup-restore.md) — Backup and restore procedures - [Upgrading Currents On-Prem](upgrading.md) — Upgrade workflows and version management - [Support Policy](support.md) — What's supported and maintenance responsibilities diff --git a/docs/logging.md b/docs/logging.md new file mode 100644 index 0000000..8bd84e3 --- /dev/null +++ b/docs/logging.md @@ -0,0 +1,169 @@ +# Logging Configuration + +This guide covers configuring container logging for production deployments. Proper log management is essential for monitoring, debugging, and compliance. + +## Overview + +Container runtimes provide different default logging behaviors: + +| Runtime | Default Driver | Production-Ready | +|---------|---------------|------------------| +| Podman | journald | Yes | +| Docker | json-file | No (unbounded growth) | + +## Podman + +Podman uses **journald** as its default logging driver, which is already production-ready. Logs are written to the systemd journal, providing: + +- Automatic log rotation and retention +- Structured logging with metadata +- Integration with system logging infrastructure +- Rate limiting to prevent log floods + +### Viewing Logs + +```bash +# View logs for a specific container +journalctl CONTAINER_NAME=currents-api + +# Follow logs in real-time +journalctl -f CONTAINER_NAME=currents-api + +# View logs since a specific time +journalctl CONTAINER_NAME=currents-api --since "1 hour ago" +``` + +### Shipping Logs to Remote Systems + +Since logs are already in journald, you can use standard tools to ship them to remote logging systems: + +Some log shippers include: + +- **Fluent Bit** is a lightweight log processor that can read from journald and forward to various destinations +- **Vector** — High-performance observability data pipeline +- **Promtail** — Loki's log collector with journald support +- **rsyslog** — Traditional syslog with journald input module + +### Customizing journald Retention + +Configure retention in `/etc/systemd/journald.conf`: + +```ini +[Journal] +# Maximum disk space for logs +SystemMaxUse=2G + +# Maximum size of individual log files +SystemMaxFileSize=100M + +# How long to keep logs +MaxRetentionSec=30day +``` + +Apply changes with: + +```bash +sudo systemctl restart systemd-journald +``` + +## Docker + +Docker's default **json-file** logging driver writes logs to JSON files on disk without automatic rotation, which can cause disk space issues in production. + +### Recommended: Configure a Production Logging Driver + +For production deployments, configure Docker to use a logging driver with built-in rotation or remote shipping. + +#### Option 1: Syslog Driver + +Route logs to your system's syslog daemon: + +```json +{ + "log-driver": "syslog", + "log-opts": { + "syslog-address": "udp://localhost:514", + "tag": "{{.Name}}" + } +} +``` + +#### Option 2: json-file with Rotation + +If you prefer local files, enable rotation: + +```json +{ + "log-driver": "json-file", + "log-opts": { + "max-size": "100m", + "max-file": "5" + } +} +``` + +#### Option 3: Log Shipping Drivers + +Docker includes drivers for shipping logs directly to remote systems: + +| Driver | Destination | +|--------|-------------| +| `splunk` | Splunk Enterprise / Splunk Cloud | +| `fluentd` | Fluentd / Fluent Bit collectors | +| `gelf` | Graylog Extended Log Format (Graylog, Logstash) | +| `awslogs` | Amazon CloudWatch Logs | +| `gcplogs` | Google Cloud Logging | + +Example Fluentd configuration: + +```json +{ + "log-driver": "fluentd", + "log-opts": { + "fluentd-address": "fluentd.example.com:24224", + "tag": "docker.{{.Name}}" + } +} +``` + +### Applying Docker Logging Configuration + +1. Edit `/etc/docker/daemon.json` with your chosen configuration +2. Restart the Docker daemon: + + ```bash + sudo systemctl restart docker + ``` + +3. Recreate containers to apply the new logging driver: + + ```bash + docker compose down + docker compose up -d + ``` + +> **Note:** Logging driver changes only apply to newly created containers. Existing containers continue using their original logging configuration until recreated. + +### Per-Service Configuration + +You can also configure logging per-service using a Docker Compose override file. Create `docker-compose.override.yml` in the `on-prem/` directory—Docker Compose automatically merges this with the main compose file: + +```yaml +# on-prem/docker-compose.override.yml +services: + api: + logging: + driver: syslog + options: + syslog-address: "udp://localhost:514" + tag: "currents-api" +``` + +See the [Docker Compose documentation on merging files](https://docs.docker.com/compose/how-tos/multiple-compose-files/merge/) for more details. + +## Further Reading + +- [Docker Logging Drivers Documentation](https://docs.docker.com/config/containers/logging/configure/) +- [Podman Logging Documentation](https://docs.podman.io/en/latest/markdown/podman-logs.1.html) +- [Fluent Bit Documentation](https://docs.fluentbit.io/) +- [systemd-journald Documentation](https://www.freedesktop.org/software/systemd/man/journald.conf.html) diff --git a/docs/quickstart.md b/docs/quickstart.md index f0dacc9..0bec565 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -505,5 +505,6 @@ DC_CLICKHOUSE_VOLUME=clickhouse-data ## Next Steps - Review the [Configuration Reference](./configuration.md) for all available options +- Configure [Logging for Production](./logging.md) to ensure proper log management - Read the [Support Policy](./support.md) to understand support boundaries - Set up monitoring and backups for production use diff --git a/on-prem/docker-compose.cache.yml b/on-prem/docker-compose.cache.yml index ee65d9e..2d96e4f 100644 --- a/on-prem/docker-compose.cache.yml +++ b/on-prem/docker-compose.cache.yml @@ -182,7 +182,7 @@ configs: routers: # API / Dashboard service api: - rule: "Host(`${TRAEFIK_API_SUBDOMAIN:-currents-app}.${TRAEFIK_DOMAIN}`)" + rule: "Host(`${TRAEFIK_API_SUBDOMAIN:-currents-app}.${TRAEFIK_DOMAIN:-localhost}`)" service: api entryPoints: - websecure @@ -190,7 +190,7 @@ configs: # Director service (record API) director: - rule: "Host(`${TRAEFIK_DIRECTOR_SUBDOMAIN:-currents-record}.${TRAEFIK_DOMAIN}`)" + rule: "Host(`${TRAEFIK_DIRECTOR_SUBDOMAIN:-currents-record}.${TRAEFIK_DOMAIN:-localhost}`)" service: director entryPoints: - websecure @@ -199,7 +199,7 @@ configs: {{- if eq (env "TRAEFIK_ENABLE_STORAGE") "true" }} # RustFS S3 API (conditionally enabled) storage: - rule: "Host(`${TRAEFIK_STORAGE_SUBDOMAIN:-currents-storage}.${TRAEFIK_DOMAIN}`)" + rule: "Host(`${TRAEFIK_STORAGE_SUBDOMAIN:-currents-storage}.${TRAEFIK_DOMAIN:-localhost}`)" service: rustfs entryPoints: - websecure diff --git a/on-prem/docker-compose.database.yml b/on-prem/docker-compose.database.yml index e6dc197..2326524 100644 --- a/on-prem/docker-compose.database.yml +++ b/on-prem/docker-compose.database.yml @@ -276,7 +276,7 @@ configs: routers: # API / Dashboard service api: - rule: "Host(`${TRAEFIK_API_SUBDOMAIN:-currents-app}.${TRAEFIK_DOMAIN}`)" + rule: "Host(`${TRAEFIK_API_SUBDOMAIN:-currents-app}.${TRAEFIK_DOMAIN:-localhost}`)" service: api entryPoints: - websecure @@ -284,7 +284,7 @@ configs: # Director service (record API) director: - rule: "Host(`${TRAEFIK_DIRECTOR_SUBDOMAIN:-currents-record}.${TRAEFIK_DOMAIN}`)" + rule: "Host(`${TRAEFIK_DIRECTOR_SUBDOMAIN:-currents-record}.${TRAEFIK_DOMAIN:-localhost}`)" service: director entryPoints: - websecure @@ -293,7 +293,7 @@ configs: {{- if eq (env "TRAEFIK_ENABLE_STORAGE") "true" }} # RustFS S3 API (conditionally enabled) storage: - rule: "Host(`${TRAEFIK_STORAGE_SUBDOMAIN:-currents-storage}.${TRAEFIK_DOMAIN}`)" + rule: "Host(`${TRAEFIK_STORAGE_SUBDOMAIN:-currents-storage}.${TRAEFIK_DOMAIN:-localhost}`)" service: rustfs entryPoints: - websecure diff --git a/on-prem/docker-compose.full.yml b/on-prem/docker-compose.full.yml index e164787..892855c 100644 --- a/on-prem/docker-compose.full.yml +++ b/on-prem/docker-compose.full.yml @@ -318,7 +318,7 @@ configs: routers: # API / Dashboard service api: - rule: "Host(`${TRAEFIK_API_SUBDOMAIN:-currents-app}.${TRAEFIK_DOMAIN}`)" + rule: "Host(`${TRAEFIK_API_SUBDOMAIN:-currents-app}.${TRAEFIK_DOMAIN:-localhost}`)" service: api entryPoints: - websecure @@ -326,7 +326,7 @@ configs: # Director service (record API) director: - rule: "Host(`${TRAEFIK_DIRECTOR_SUBDOMAIN:-currents-record}.${TRAEFIK_DOMAIN}`)" + rule: "Host(`${TRAEFIK_DIRECTOR_SUBDOMAIN:-currents-record}.${TRAEFIK_DOMAIN:-localhost}`)" service: director entryPoints: - websecure @@ -335,7 +335,7 @@ configs: {{- if eq (env "TRAEFIK_ENABLE_STORAGE") "true" }} # RustFS S3 API (conditionally enabled) storage: - rule: "Host(`${TRAEFIK_STORAGE_SUBDOMAIN:-currents-storage}.${TRAEFIK_DOMAIN}`)" + rule: "Host(`${TRAEFIK_STORAGE_SUBDOMAIN:-currents-storage}.${TRAEFIK_DOMAIN:-localhost}`)" service: rustfs entryPoints: - websecure diff --git a/on-prem/scripts/smoke-test/api-test-create.sh b/on-prem/scripts/smoke-test/api-test-create.sh new file mode 100755 index 0000000..e4954a1 --- /dev/null +++ b/on-prem/scripts/smoke-test/api-test-create.sh @@ -0,0 +1,94 @@ +#!/bin/bash +# +# API smoke test - Create action +# Creates an action via the API and outputs the action ID +# +# Usage: ./api-test-create.sh +# Output: ACTION_ID= (for eval) +# + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ON_PREM_DIR="$SCRIPT_DIR/../.." + +API_KEY="${1:-}" +PROJECT_ID="${2:-}" +API_BASE_URL="${API_BASE_URL:-http://localhost:4000/v1}" + +# Function to show logs on failure +show_logs_on_failure() { + echo "" >&2 + echo "==========================================" >&2 + echo "API container logs (last 100 lines):" >&2 + echo "==========================================" >&2 + cd "$ON_PREM_DIR" + docker compose -f docker-compose.full.yml logs --tail=100 api >&2 + echo "==========================================" >&2 +} + +if [ -z "$API_KEY" ]; then + echo "❌ Error: API key is required" >&2 + echo "Usage: $0 " >&2 + exit 1 +fi + +if [ -z "$PROJECT_ID" ]; then + echo "❌ Error: Project ID is required" >&2 + echo "Usage: $0 " >&2 + exit 1 +fi + +echo "Creating test action..." >&2 +echo "API URL: $API_BASE_URL" >&2 +echo "Project ID: $PROJECT_ID" >&2 + +# Generate unique name for this test run +TEST_NAME="smoke-test-action-$(date +%s)" + +RESPONSE_FILE=$(mktemp) +trap 'rm -f "$RESPONSE_FILE"' EXIT + +HTTP_CODE=$(curl -s -o "$RESPONSE_FILE" -w "%{http_code}" -X POST "${API_BASE_URL}/actions?projectId=${PROJECT_ID}" \ + -H "Authorization: Bearer ${API_KEY}" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "'"${TEST_NAME}"'", + "description": "Smoke test action - safe to delete", + "action": [{"op": "skip"}], + "matcher": { + "op": "AND", + "cond": [ + { + "type": "title", + "op": "eq", + "value": "smoke-test-placeholder" + } + ] + } + }') + +RESPONSE_BODY=$(cat "$RESPONSE_FILE") + +if [ "$HTTP_CODE" != "201" ]; then + echo "❌ Failed to create action (HTTP $HTTP_CODE)" >&2 + echo "Response: $RESPONSE_BODY" >&2 + show_logs_on_failure + exit 1 +fi + +ACTION_ID=$(echo "$RESPONSE_BODY" | jq -r '.data.actionId') + +if [ -z "$ACTION_ID" ] || [ "$ACTION_ID" = "null" ]; then + echo "❌ Failed to extract actionId from response" >&2 + echo "Response: $RESPONSE_BODY" >&2 + show_logs_on_failure + exit 1 +fi + +echo "✅ Created action: $ACTION_ID" >&2 +echo " Name: $TEST_NAME" >&2 + +# Output for eval +printf "ACTION_ID=%q\n" "$ACTION_ID" +printf "TEST_NAME=%q\n" "$TEST_NAME" diff --git a/on-prem/scripts/smoke-test/api-test-delete.sh b/on-prem/scripts/smoke-test/api-test-delete.sh new file mode 100755 index 0000000..f344ff4 --- /dev/null +++ b/on-prem/scripts/smoke-test/api-test-delete.sh @@ -0,0 +1,58 @@ +#!/bin/bash +# +# API smoke test - Delete action +# Deletes (archives) an action by ID +# +# Usage: ./api-test-delete.sh +# + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ON_PREM_DIR="$SCRIPT_DIR/../.." + +API_KEY="${1:-}" +ACTION_ID="${2:-}" +API_BASE_URL="${API_BASE_URL:-http://localhost:4000/v1}" + +# Function to show logs on failure +show_logs_on_failure() { + echo "" >&2 + echo "==========================================" >&2 + echo "API container logs (last 100 lines):" >&2 + echo "==========================================" >&2 + cd "$ON_PREM_DIR" + docker compose -f docker-compose.full.yml logs --tail=100 api >&2 + echo "==========================================" >&2 +} + +if [ -z "$API_KEY" ]; then + echo "❌ Error: API key is required" >&2 + echo "Usage: $0 " >&2 + exit 1 +fi + +if [ -z "$ACTION_ID" ]; then + echo "❌ Error: Action ID is required" >&2 + echo "Usage: $0 " >&2 + exit 1 +fi + +echo "Deleting action: $ACTION_ID..." >&2 + +RESPONSE_FILE=$(mktemp) +trap 'rm -f "$RESPONSE_FILE"' EXIT + +HTTP_CODE=$(curl -s -o "$RESPONSE_FILE" -w "%{http_code}" -X DELETE "${API_BASE_URL}/actions/${ACTION_ID}" \ + -H "Authorization: Bearer ${API_KEY}") + +RESPONSE_BODY=$(cat "$RESPONSE_FILE") + +if [ "$HTTP_CODE" != "200" ]; then + echo "❌ Failed to delete action (HTTP $HTTP_CODE)" >&2 + echo "Response: $RESPONSE_BODY" >&2 + show_logs_on_failure + exit 1 +fi + +echo "✅ Action deleted (archived)" >&2 diff --git a/on-prem/scripts/smoke-test/api-test-fetch.sh b/on-prem/scripts/smoke-test/api-test-fetch.sh new file mode 100755 index 0000000..b2fdb6d --- /dev/null +++ b/on-prem/scripts/smoke-test/api-test-fetch.sh @@ -0,0 +1,73 @@ +#!/bin/bash +# +# API smoke test - Fetch action +# Fetches an action by ID and verifies it exists +# +# Usage: ./api-test-fetch.sh [expected_name] +# + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ON_PREM_DIR="$SCRIPT_DIR/../.." + +API_KEY="${1:-}" +ACTION_ID="${2:-}" +EXPECTED_NAME="${3:-}" +API_BASE_URL="${API_BASE_URL:-http://localhost:4000/v1}" + +# Function to show logs on failure +show_logs_on_failure() { + echo "" >&2 + echo "==========================================" >&2 + echo "API container logs (last 100 lines):" >&2 + echo "==========================================" >&2 + cd "$ON_PREM_DIR" + docker compose -f docker-compose.full.yml logs --tail=100 api >&2 + echo "==========================================" >&2 +} + +if [ -z "$API_KEY" ]; then + echo "❌ Error: API key is required" >&2 + echo "Usage: $0 [expected_name]" >&2 + exit 1 +fi + +if [ -z "$ACTION_ID" ]; then + echo "❌ Error: Action ID is required" >&2 + echo "Usage: $0 [expected_name]" >&2 + exit 1 +fi + +echo "Fetching action: $ACTION_ID..." >&2 + +RESPONSE_FILE=$(mktemp) +trap 'rm -f "$RESPONSE_FILE"' EXIT + +HTTP_CODE=$(curl -s -o "$RESPONSE_FILE" -w "%{http_code}" "${API_BASE_URL}/actions/${ACTION_ID}" \ + -H "Authorization: Bearer ${API_KEY}") + +RESPONSE_BODY=$(cat "$RESPONSE_FILE") + +if [ "$HTTP_CODE" != "200" ]; then + echo "❌ Failed to fetch action (HTTP $HTTP_CODE)" >&2 + echo "Response: $RESPONSE_BODY" >&2 + show_logs_on_failure + exit 1 +fi + +FETCHED_NAME=$(echo "$RESPONSE_BODY" | jq -r '.data.name') +FETCHED_STATUS=$(echo "$RESPONSE_BODY" | jq -r '.data.status') + +# Verify name if provided +if [ -n "$EXPECTED_NAME" ] && [ "$FETCHED_NAME" != "$EXPECTED_NAME" ]; then + echo "❌ Action name mismatch" >&2 + echo "Expected: $EXPECTED_NAME" >&2 + echo "Got: $FETCHED_NAME" >&2 + show_logs_on_failure + exit 1 +fi + +echo "✅ Fetched action successfully" >&2 +echo " Name: $FETCHED_NAME" >&2 +echo " Status: $FETCHED_STATUS" >&2 diff --git a/on-prem/scripts/smoke-test/api-test.sh b/on-prem/scripts/smoke-test/api-test.sh index 1b0c42f..3df35c2 100755 --- a/on-prem/scripts/smoke-test/api-test.sh +++ b/on-prem/scripts/smoke-test/api-test.sh @@ -18,41 +18,44 @@ API_BASE_URL="${API_BASE_URL:-http://localhost:4000/v1}" # Function to show logs on failure show_logs_on_failure() { - echo "" - echo "==========================================" - echo "API container logs (last 100 lines):" - echo "==========================================" + echo "" >&2 + echo "==========================================" >&2 + echo "API container logs (last 100 lines):" >&2 + echo "==========================================" >&2 cd "$ON_PREM_DIR" - docker compose -f docker-compose.full.yml logs --tail=100 api - echo "==========================================" + docker compose -f docker-compose.full.yml logs --tail=100 api >&2 + echo "==========================================" >&2 } if [ -z "$API_KEY" ]; then - echo "❌ Error: API key is required" - echo "Usage: $0 " + echo "❌ Error: API key is required" >&2 + echo "Usage: $0 " >&2 exit 1 fi if [ -z "$PROJECT_ID" ]; then - echo "❌ Error: Project ID is required" - echo "Usage: $0 " + echo "❌ Error: Project ID is required" >&2 + echo "Usage: $0 " >&2 exit 1 fi -echo "Running API smoke test..." -echo "API URL: $API_BASE_URL" -echo "Project ID: $PROJECT_ID" -echo "" +echo "Running API smoke test..." >&2 +echo "API URL: $API_BASE_URL" >&2 +echo "Project ID: $PROJECT_ID" >&2 +echo "" >&2 # Generate unique name for this test run TEST_NAME="smoke-test-action-$(date +%s)" +RESPONSE_FILE=$(mktemp) +trap 'rm -f "$RESPONSE_FILE"' EXIT + # ============================================================================= # Step 1: Create an action # ============================================================================= -echo "Step 1: Creating test action..." +echo "Step 1: Creating test action..." >&2 -CREATE_RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "${API_BASE_URL}/actions?projectId=${PROJECT_ID}" \ +HTTP_CODE=$(curl -s -o "$RESPONSE_FILE" -w "%{http_code}" -X POST "${API_BASE_URL}/actions?projectId=${PROJECT_ID}" \ -H "Authorization: Bearer ${API_KEY}" \ -H "Content-Type: application/json" \ -d '{ @@ -71,12 +74,11 @@ CREATE_RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "${API_BASE_URL}/actions?p } }') -HTTP_CODE=$(echo "$CREATE_RESPONSE" | tail -n1) -RESPONSE_BODY=$(echo "$CREATE_RESPONSE" | sed '$d') +RESPONSE_BODY=$(cat "$RESPONSE_FILE") if [ "$HTTP_CODE" != "201" ]; then - echo "❌ Failed to create action (HTTP $HTTP_CODE)" - echo "Response: $RESPONSE_BODY" + echo "❌ Failed to create action (HTTP $HTTP_CODE)" >&2 + echo "Response: $RESPONSE_BODY" >&2 show_logs_on_failure exit 1 fi @@ -84,29 +86,28 @@ fi ACTION_ID=$(echo "$RESPONSE_BODY" | jq -r '.data.actionId') if [ -z "$ACTION_ID" ] || [ "$ACTION_ID" = "null" ]; then - echo "❌ Failed to extract actionId from response" - echo "Response: $RESPONSE_BODY" + echo "❌ Failed to extract actionId from response" >&2 + echo "Response: $RESPONSE_BODY" >&2 show_logs_on_failure exit 1 fi -echo "✅ Created action: $ACTION_ID" +echo "✅ Created action: $ACTION_ID" >&2 # ============================================================================= # Step 2: Fetch the action back # ============================================================================= -echo "" -echo "Step 2: Fetching action..." +echo "" >&2 +echo "Step 2: Fetching action..." >&2 -GET_RESPONSE=$(curl -s -w "\n%{http_code}" "${API_BASE_URL}/actions/${ACTION_ID}" \ +HTTP_CODE=$(curl -s -o "$RESPONSE_FILE" -w "%{http_code}" "${API_BASE_URL}/actions/${ACTION_ID}" \ -H "Authorization: Bearer ${API_KEY}") -HTTP_CODE=$(echo "$GET_RESPONSE" | tail -n1) -RESPONSE_BODY=$(echo "$GET_RESPONSE" | sed '$d') +RESPONSE_BODY=$(cat "$RESPONSE_FILE") if [ "$HTTP_CODE" != "200" ]; then - echo "❌ Failed to fetch action (HTTP $HTTP_CODE)" - echo "Response: $RESPONSE_BODY" + echo "❌ Failed to fetch action (HTTP $HTTP_CODE)" >&2 + echo "Response: $RESPONSE_BODY" >&2 show_logs_on_failure exit 1 fi @@ -115,47 +116,46 @@ FETCHED_NAME=$(echo "$RESPONSE_BODY" | jq -r '.data.name') FETCHED_STATUS=$(echo "$RESPONSE_BODY" | jq -r '.data.status') if [ "$FETCHED_NAME" != "$TEST_NAME" ]; then - echo "❌ Action name mismatch" - echo "Expected: $TEST_NAME" - echo "Got: $FETCHED_NAME" + echo "❌ Action name mismatch" >&2 + echo "Expected: $TEST_NAME" >&2 + echo "Got: $FETCHED_NAME" >&2 show_logs_on_failure exit 1 fi -echo "✅ Fetched action successfully" -echo " Name: $FETCHED_NAME" -echo " Status: $FETCHED_STATUS" +echo "✅ Fetched action successfully" >&2 +echo " Name: $FETCHED_NAME" >&2 +echo " Status: $FETCHED_STATUS" >&2 # ============================================================================= # Step 3: Clean up - delete the action # ============================================================================= -echo "" -echo "Step 3: Cleaning up (deleting action)..." +echo "" >&2 +echo "Step 3: Cleaning up (deleting action)..." >&2 -DELETE_RESPONSE=$(curl -s -w "\n%{http_code}" -X DELETE "${API_BASE_URL}/actions/${ACTION_ID}" \ +HTTP_CODE=$(curl -s -o "$RESPONSE_FILE" -w "%{http_code}" -X DELETE "${API_BASE_URL}/actions/${ACTION_ID}" \ -H "Authorization: Bearer ${API_KEY}") -HTTP_CODE=$(echo "$DELETE_RESPONSE" | tail -n1) -RESPONSE_BODY=$(echo "$DELETE_RESPONSE" | sed '$d') +RESPONSE_BODY=$(cat "$RESPONSE_FILE") if [ "$HTTP_CODE" != "200" ]; then - echo "⚠️ Warning: Failed to delete action (HTTP $HTTP_CODE)" - echo "Response: $RESPONSE_BODY" + echo "⚠️ Warning: Failed to delete action (HTTP $HTTP_CODE)" >&2 + echo "Response: $RESPONSE_BODY" >&2 # Don't fail the test for cleanup issues else - echo "✅ Action deleted (archived)" + echo "✅ Action deleted (archived)" >&2 fi # ============================================================================= # Summary # ============================================================================= -echo "" -echo "==========================================" -echo "✅ API smoke test passed!" -echo "==========================================" -echo "" -echo "Verified:" -echo " - POST /actions (create)" -echo " - GET /actions/{actionId} (read)" -echo " - DELETE /actions/{actionId} (delete)" -echo "" +echo "" >&2 +echo "==========================================" >&2 +echo "✅ API smoke test passed!" >&2 +echo "==========================================" >&2 +echo "" >&2 +echo "Verified:" >&2 +echo " - POST /actions (create)" >&2 +echo " - GET /actions/{actionId} (read)" >&2 +echo " - DELETE /actions/{actionId} (delete)" >&2 +echo "" >&2 diff --git a/on-prem/scripts/smoke-test/seed-database.sh b/on-prem/scripts/smoke-test/seed-database.sh index 6fec434..9fa85a5 100755 --- a/on-prem/scripts/smoke-test/seed-database.sh +++ b/on-prem/scripts/smoke-test/seed-database.sh @@ -214,8 +214,8 @@ if ! echo "$RESULT" | grep -q "SUCCESS"; then fi # Output variables that can be eval'd by the caller -echo "API_KEY=${API_KEY}" -echo "PROJECT_ID=${PROJECT_ID}" +printf "API_KEY=%q\n" "${API_KEY}" +printf "PROJECT_ID=%q\n" "${PROJECT_ID}" echo "✅ Database seeded successfully" >&2 echo " Project ID: ${PROJECT_ID}" >&2 diff --git a/on-prem/scripts/smoke-test/wait-for-api.sh b/on-prem/scripts/smoke-test/wait-for-api.sh new file mode 100755 index 0000000..5130a8a --- /dev/null +++ b/on-prem/scripts/smoke-test/wait-for-api.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# +# Wait for API to be ready +# +# Usage: ./wait-for-api.sh [max_attempts] +# + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ON_PREM_DIR="$SCRIPT_DIR/../.." + +MAX_ATTEMPTS="${1:-60}" + +cd "$ON_PREM_DIR" + +echo "Waiting for API to be ready..." >&2 +for i in $(seq 1 "$MAX_ATTEMPTS"); do + if curl -sf http://localhost:4000/health > /dev/null 2>&1; then + echo "✅ API is ready" >&2 + exit 0 + fi + echo "Attempt $i/$MAX_ATTEMPTS - API not ready yet..." >&2 + sleep 2 +done + +echo "❌ API failed to start" >&2 +docker compose -f docker-compose.full.yml logs api >&2 +exit 1 diff --git a/on-prem/scripts/smoke-test/wait-for-mongodb.sh b/on-prem/scripts/smoke-test/wait-for-mongodb.sh new file mode 100755 index 0000000..e7cebd0 --- /dev/null +++ b/on-prem/scripts/smoke-test/wait-for-mongodb.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# +# Wait for MongoDB to be healthy (authenticated and replica set ready) +# +# Usage: ./wait-for-mongodb.sh [max_attempts] +# + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ON_PREM_DIR="$SCRIPT_DIR/../.." + +MAX_ATTEMPTS="${1:-60}" + +cd "$ON_PREM_DIR" + +# Load credentials from .env +source .env + +echo "Waiting for MongoDB to be healthy..." >&2 +for i in $(seq 1 "$MAX_ATTEMPTS"); do + # Check that authentication works and replica set is PRIMARY + # This matches what the container healthcheck verifies + if docker compose -f docker-compose.full.yml exec -T mongodb mongosh \ + -u "$MONGODB_USERNAME" -p "$MONGODB_PASSWORD" --authenticationDatabase admin \ + --quiet --eval "rs.status().myState === 1" 2>/dev/null | grep -q true; then + echo "✅ MongoDB is ready (authenticated, replica set PRIMARY)" >&2 + exit 0 + fi + echo "Attempt $i/$MAX_ATTEMPTS - MongoDB not ready yet..." >&2 + sleep 2 +done + +echo "❌ MongoDB failed to start" >&2 +docker compose -f docker-compose.full.yml logs mongodb >&2 +exit 1 diff --git a/on-prem/scripts/smoke-test/wait-for-redis.sh b/on-prem/scripts/smoke-test/wait-for-redis.sh new file mode 100755 index 0000000..e6fb8da --- /dev/null +++ b/on-prem/scripts/smoke-test/wait-for-redis.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# +# Wait for Redis to be ready +# +# Usage: ./wait-for-redis.sh [max_attempts] +# + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ON_PREM_DIR="$SCRIPT_DIR/../.." + +MAX_ATTEMPTS="${1:-30}" + +cd "$ON_PREM_DIR" + +echo "Waiting for Redis to be ready..." >&2 +for i in $(seq 1 "$MAX_ATTEMPTS"); do + if docker compose -f docker-compose.full.yml exec -T redis redis-cli ping 2>/dev/null | grep -q PONG; then + echo "✅ Redis is ready" >&2 + exit 0 + fi + echo "Attempt $i/$MAX_ATTEMPTS - Redis not ready yet..." >&2 + sleep 2 +done + +echo "❌ Redis failed to start" >&2 +docker compose -f docker-compose.full.yml logs redis >&2 +exit 1 diff --git a/on-prem/scripts/smoke-test/wait-for-root-user.sh b/on-prem/scripts/smoke-test/wait-for-root-user.sh new file mode 100755 index 0000000..0e22424 --- /dev/null +++ b/on-prem/scripts/smoke-test/wait-for-root-user.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# +# Wait for root user to be created by scheduler +# +# Usage: ./wait-for-root-user.sh [max_attempts] +# + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ON_PREM_DIR="$SCRIPT_DIR/../.." + +MAX_ATTEMPTS="${1:-30}" + +cd "$ON_PREM_DIR" +source .env + +echo "Waiting for root user to be created..." >&2 +for i in $(seq 1 "$MAX_ATTEMPTS"); do + if docker compose -f docker-compose.full.yml exec -T mongodb mongosh \ + -u "$MONGODB_USERNAME" -p "$MONGODB_PASSWORD" --authenticationDatabase admin \ + --quiet --eval "db.getSiblingDB('currents').user.findOne({email: '${ON_PREM_EMAIL:-root@currents.local}'})" 2>/dev/null | grep -q "_id"; then + echo "✅ Root user exists" >&2 + exit 0 + fi + echo "Attempt $i/$MAX_ATTEMPTS - Root user not created yet..." >&2 + sleep 2 +done + +echo "❌ Root user was not created" >&2 +docker compose -f docker-compose.full.yml logs api scheduler >&2 +exit 1 diff --git a/on-prem/templates/compose.traefik.yml b/on-prem/templates/compose.traefik.yml index 1688978..a1b8b70 100644 --- a/on-prem/templates/compose.traefik.yml +++ b/on-prem/templates/compose.traefik.yml @@ -60,7 +60,7 @@ configs: routers: # API / Dashboard service api: - rule: "Host(`${TRAEFIK_API_SUBDOMAIN:-currents-app}.${TRAEFIK_DOMAIN}`)" + rule: "Host(`${TRAEFIK_API_SUBDOMAIN:-currents-app}.${TRAEFIK_DOMAIN:-localhost}`)" service: api entryPoints: - websecure @@ -68,7 +68,7 @@ configs: # Director service (record API) director: - rule: "Host(`${TRAEFIK_DIRECTOR_SUBDOMAIN:-currents-record}.${TRAEFIK_DOMAIN}`)" + rule: "Host(`${TRAEFIK_DIRECTOR_SUBDOMAIN:-currents-record}.${TRAEFIK_DOMAIN:-localhost}`)" service: director entryPoints: - websecure @@ -77,7 +77,7 @@ configs: {{- if eq (env "TRAEFIK_ENABLE_STORAGE") "true" }} # RustFS S3 API (conditionally enabled) storage: - rule: "Host(`${TRAEFIK_STORAGE_SUBDOMAIN:-currents-storage}.${TRAEFIK_DOMAIN}`)" + rule: "Host(`${TRAEFIK_STORAGE_SUBDOMAIN:-currents-storage}.${TRAEFIK_DOMAIN:-localhost}`)" service: rustfs entryPoints: - websecure