Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,10 @@ HOSTNAME=
SSH_USER=ubuntu # Initial SSH user — changed to adminclaw during hardening
SSH_PORT=22 # Current SSH port — updated to SSH_HARDENED_PORT after hardening
SSH_HARDENED_PORT=222 # Target SSH port for hardening (removed from .env after hardening completes)
SSH_KEY=~/.ssh/vps1_openclaw_ed25519

## Set SSH_KEY or SSH_IDENTITY_AGENT so scripts know how to reach the VPS.
SSH_KEY= # Optional: path to SSH private key file (e.g. ~/.ssh/vps1_openclaw_ed25519)
SSH_IDENTITY_AGENT= # Optional: path to SSH agent socket (e.g. ~/.bitwarden-ssh-agent.sock)

# ── STACK CONFIG ENV ─────────────────────────────────────────────────
# The rest of the below vars are referenced in stack.yml.example
Expand Down
3 changes: 2 additions & 1 deletion CLAUDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,8 @@ See [00-fresh-deploy-setup.md](playbooks/00-fresh-deploy-setup.md) § 0.7 for ex

```bash
# After base setup, SSH as adminclaw (not ubuntu)
ssh -i <SSH_KEY:~/.ssh/vps1_openclaw_ed25519> -p <SSH_PORT:222> <SSH_USER:adminclaw>@<VPS_IP>
# If using an SSH agent, omit -i and rely on your normal ssh config.
ssh [-i <SSH_KEY>] -p <SSH_PORT:222> <SSH_USER:adminclaw>@<VPS_IP>

# Run commands as openclaw
sudo -u openclaw <command>
Expand Down
20 changes: 15 additions & 5 deletions build/pre-deploy.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -197,19 +197,29 @@ async function queryVpsCapacity(env) {
const ip = env.VPS_IP;
const user = env.SSH_USER || "adminclaw";
const port = env.SSH_PORT || "222";
const keyPath = env.SSH_KEY || "~/.ssh/vps1_openclaw_ed25519";
const keyPath = env.SSH_KEY?.trim() || "";
const identityAgent = env.SSH_IDENTITY_AGENT?.trim() || "";

if (!ip) fatal("VPS_IP not set in .env — cannot query VPS capacity for resource % resolution");

const expandedKey = keyPath.replace(/^~/, process.env.HOME || "");
info(`Querying VPS capacity at ${user}@${ip}:${port}...`);

const sshArgs = [
"-o", "StrictHostKeyChecking=accept-new", "-o", "ConnectTimeout=10",
"-i", expandedKey, "-p", port, `${user}@${ip}`,
"nproc && grep MemTotal /proc/meminfo | awk '{print $2}'"
];

if (keyPath) {
sshArgs.push("-i", keyPath.replace(/^~/, process.env.HOME || ""));
}
if (identityAgent) {
sshArgs.push("-o", `IdentityAgent=${identityAgent.replace(/^~/, process.env.HOME || "")}`);
}

sshArgs.push(
"-p", port, `${user}@${ip}`,
"nproc && grep MemTotal /proc/meminfo | awk '{print $2}'"
);

const { stdout, stderr, exitCode } = await spawnAsync("ssh", sshArgs);

if (exitCode !== 0) {
Expand Down Expand Up @@ -421,7 +431,7 @@ function generateStackEnv(env, config, claws) {

// Source: .env
const envVars = [
"VPS_IP", "SSH_KEY", "SSH_PORT", "SSH_USER",
"VPS_IP", "SSH_KEY", "SSH_IDENTITY_AGENT", "SSH_PORT", "SSH_USER",
"HOSTALERT_TELEGRAM_BOT_TOKEN", "HOSTALERT_TELEGRAM_CHAT_ID",
"CLOUDFLARE_API_TOKEN", "CLOUDFLARE_TUNNEL_TOKEN",
];
Expand Down
14 changes: 12 additions & 2 deletions docs/TESTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ source deploy/host/source-config.sh
This exports `ENV__*` vars (from `.env`) and `STACK__*` vars (from `stack.yml`). Key variables used in tests below:

- `ENV__VPS_IP` - OpenClaw VPS
- `ENV__SSH_KEY` - SSH key location
- `ENV__SSH_KEY` - Optional SSH key location
- `ENV__SSH_IDENTITY_AGENT` - Optional SSH agent socket path
- `ENV__SSH_USER` - SSH username (should be `adminclaw`)
- `ENV__SSH_PORT` - SSH port (should be `222`)
- `STACK__STACK__INSTALL_DIR` - VPS install base (default: `/home/openclaw`)
Expand Down Expand Up @@ -174,7 +175,16 @@ For a rapid health check, run this single command. Source config first for varia
```bash
source deploy/host/source-config.sh
echo "=== VPS-1 Health ===" && \
ssh -i "${ENV__SSH_KEY}" -p "${ENV__SSH_PORT}" "${ENV__SSH_USER}@${ENV__VPS_IP}" \
if [ -n "${ENV__SSH_KEY:-}" ]; then
SSH_KEY_ARGS=(-i "${ENV__SSH_KEY}")
else
SSH_KEY_ARGS=()
fi
if [ -n "${ENV__SSH_IDENTITY_AGENT:-}" ]; then
SSH_KEY_ARGS+=(-o "IdentityAgent=${ENV__SSH_IDENTITY_AGENT}")
fi

ssh "${SSH_KEY_ARGS[@]}" -p "${ENV__SSH_PORT}" "${ENV__SSH_USER}@${ENV__VPS_IP}" \
"sudo -u openclaw bash -c 'cd ${INSTALL_DIR} && docker compose ps --format \"{{.Name}}: {{.Status}}\"' && \
echo && \
echo '=== Claw Instances ===' && \
Expand Down
11 changes: 9 additions & 2 deletions docs/VPS-SETUP-GUIDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,9 @@ Record it in `.env`:

VPS_IP=x.x.x.x

# SSH Configuration (required)
SSH_KEY=~/.ssh/vps1_openclaw_ed25519 # Path to your ssh key generated in Step 2
# SSH Configuration (use SSH_KEY, SSH_IDENTITY_AGENT, or your normal ssh config)
SSH_KEY=~/.ssh/vps1_openclaw_ed25519 # Optional: path to your ssh key file
SSH_IDENTITY_AGENT= # Optional: ssh agent socket path if you use an agent instead of a key file
SSH_USER=ubuntu # Initial user created by OVH, changed to adminclaw during hardening
SSH_PORT=22 # Initial SSH port, changed to 222 during hardening
```
Expand All @@ -117,6 +118,8 @@ ssh-add ~/.ssh/vps1_openclaw_ed25519

# Test VPS-1 (OpenClaw)
ssh -i ~/.ssh/vps1_openclaw_ed25519 ubuntu@<VPS-1-IP>
# Or, if your key is already available through your SSH agent/config:
ssh ubuntu@<VPS-1-IP>
```

On first connection, accept the host key fingerprint.
Expand Down Expand Up @@ -187,9 +190,13 @@ ssh-add ~/.ssh/vps1_openclaw_ed25519

# SSH to OpenClaw VPS (before deployment - default port 22)
ssh -i ~/.ssh/vps1_openclaw_ed25519 ubuntu@<VPS-1-IP>
# Or with agent-based auth:
ssh ubuntu@<VPS-1-IP>

# After claude deployment and hardening - use port 222 and adminclaw user
ssh -i ~/.ssh/vps1_openclaw_ed25519 -p 222 adminclaw@<VPS-1-IP>
# Or with agent-based auth:
ssh -p 222 adminclaw@<VPS-1-IP>
```

### OVHCloud Control Panel Links
Expand Down
23 changes: 15 additions & 8 deletions playbooks/00-fresh-deploy-setup.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ This playbook validates the configuration needed to deploy OpenClaw on a fresh U
## Prerequisites

- A fresh Ubuntu VPS (>= 24.04) with root/sudo access
- An SSH key pair for VPS access
- SSH access to the VPS, either via a private key file or an SSH agent
- A Cloudflare account with a domain
- Cloudflare Tunnel token (`CLOUDFLARE_TUNNEL_TOKEN`, manual) OR Cloudflare API token (`CLOUDFLARE_API_TOKEN`, automated)
- Cloudflare Access application protecting the domain
Expand Down Expand Up @@ -47,7 +47,8 @@ echo "VPS_IP=${VPS_IP:-EMPTY}" && \
echo "CF_TUNNEL_TOKEN=${CLOUDFLARE_TUNNEL_TOKEN:+SET}" && \
echo "CF_API_TOKEN=${CLOUDFLARE_API_TOKEN:+SET}" && \
echo "ADMIN_TELEGRAM_ID=${ADMIN_TELEGRAM_ID:-EMPTY}" && \
echo "SSH_KEY=${SSH_KEY:-~/.ssh/vps1_openclaw_ed25519}" && \
echo "SSH_KEY=${SSH_KEY:-EMPTY}" && \
echo "SSH_IDENTITY_AGENT=${SSH_IDENTITY_AGENT:-EMPTY}" && \
grep '_TELEGRAM_BOT_TOKEN=' .env | grep -v '^#' && \
echo "=== stack.yml ===" && \
grep '^\s*domain:' stack.yml | head -1 && \
Expand All @@ -64,6 +65,7 @@ grep -A1 '^claws:' stack.yml | tail -n +2 | grep '^\s\+[a-z]' | sed 's/://;s/^\s
5. **`ADMIN_TELEGRAM_ID`** — Must be numeric. If empty: "Send a message to @userinfobot on Telegram to get your numeric user ID."
6. **Bot tokens** — Each claw name needs a matching `<NAME>_TELEGRAM_BOT_TOKEN` line in `.env` (uppercased, hyphens→underscores). If missing: "Create a Telegram bot via @BotFather and paste the token. See `docs/TELEGRAM.md`."
7. **Claws** — The `claws` section lists claw names. Single claw = standard deploy. Multiple claws: inform user each gets its own container/domain.
8. **SSH auth** — Use `SSH_KEY`, `SSH_IDENTITY_AGENT`, or a compatible local SSH config/agent setup. `SSH_KEY` is a file path; `SSH_IDENTITY_AGENT` is an agent socket path such as `~/.bitwarden-ssh-agent.sock`. If both env vars are empty, confirm your normal `ssh` config can reach the VPS without extra flags.

### If any fields are invalid or missing

Expand Down Expand Up @@ -117,11 +119,13 @@ When `CF_API_TOKEN` is set, automate tunnel creation, route configuration, and D

## 0.3 SSH Check

1. Validate `SSH_KEY` exists on the local system (default: `~/.ssh/vps1_openclaw_ed25519`).
2. Test SSH connectivity using values from `.env` (`SSH_USER`, `SSH_PORT`):
1. If `SSH_KEY` is set, validate the file exists on the local system.
2. If `SSH_IDENTITY_AGENT` is set, validate the socket exists on the local system.
3. Test SSH connectivity using values from `.env` (`SSH_USER`, `SSH_PORT`):

```bash
ssh -i <SSH_KEY> -o ConnectTimeout=10 -o BatchMode=yes -p <SSH_PORT> <SSH_USER>@<VPS_IP> echo "VPS OK"
ssh [ -i <SSH_KEY> ] [ -o IdentityAgent=<SSH_IDENTITY_AGENT> ] \
-o ConnectTimeout=10 -o BatchMode=yes -p <SSH_PORT> <SSH_USER>@<VPS_IP> echo "VPS OK"
```

**If SSH fails — diagnose by error type:**
Expand Down Expand Up @@ -152,7 +156,8 @@ Then retry the SSH test.
>
> - The key at `<SSH_KEY>` wasn't added to the VPS during provisioning
> - The key file doesn't exist — check: `ls -la <SSH_KEY>`
> - The SSH agent doesn't have the key loaded — try: `ssh-add <SSH_KEY>`"
> - The SSH agent socket is wrong — check: `ls -l <SSH_IDENTITY_AGENT>`
> - The SSH agent doesn't have the key loaded — try: `ssh-add <SSH_KEY>` if you use a file-backed key"

---

Expand All @@ -163,7 +168,8 @@ After SSH is confirmed working, query the VPS hardware to verify gateway contain
### Query VPS Resources

```bash
ssh -i <SSH_KEY> -p <SSH_PORT> <SSH_USER>@<VPS_IP> "nproc && free -b | awk '/^Mem:/{print \$2}'"
ssh [ -i <SSH_KEY> ] [ -o IdentityAgent=<SSH_IDENTITY_AGENT> ] \
-p <SSH_PORT> <SSH_USER>@<VPS_IP> "nproc && free -b | awk '/^Mem:/{print \$2}'"
```

This returns two lines: CPU count (e.g., `6`) and total memory in bytes (e.g., `11811160064`).
Expand Down Expand Up @@ -320,6 +326,7 @@ After the user confirms, launch **01-workers and 02-base-setup as parallel subag
- A command fails and the error requires user input to resolve
- A playbook step explicitly says to wait for user input (e.g., a blocking error with multiple resolution paths)
- **SSH verification (02-base-setup.md § 2.4 Step 3):** You MUST test SSH on port `<SSH_HARDENED_PORT>` from the local machine and confirm it works before proceeding. This is a mandatory stop point — do not skip it during automated deployment.
- **Local SSH config update:** If the user connects via a `~/.ssh/config` host alias or agent-based SSH config, prompt them to update that local entry to port `<SSH_HARDENED_PORT>` immediately after the local hardened-port test passes and before removing port 22.
- **07-verification.md:** Run in the main context (not a subagent) so the user sees real-time progress and errors can be handled directly. By this point, all heavy steps have been offloaded to subagents and the context window has room. Report the summary table, then run `scripts/sync-workspaces.sh down --all` to pull back any files OpenClaw generated on first start, before proceeding to 08a-configure-llm-proxy.md.

Normal informational output (progress updates, version notes, check results) should be reported inline without pausing. The first user interaction after confirmation should be device pairing in `08b-pair-devices.md`.
Expand Down Expand Up @@ -349,7 +356,7 @@ A full deployment consumes significant context. To avoid mid-deploy compaction,

```
Read playbooks/04-vps1-openclaw.md §4.2 and execute the infrastructure setup.
SSH: ssh -i <key> -p <port> <user>@<ip>
SSH: ssh [ -i <key> ] [ -o IdentityAgent=<agent-socket> ] -p <port> <user>@<ip>
Log: Write detailed execution log (all commands, full output, errors, recovery steps)
to .deploy-logs/<timestamp>/04-infra-config.md
Return: pass/fail.
Expand Down
5 changes: 3 additions & 2 deletions playbooks/00-onboarding.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,13 @@ Triggered when the user says **"onboard"**. Walk through each configuration deci

Before starting, verify that `install.sh` was run:

1. Check `.env` exists and has `VPS_IP`, `SSH_USER`, `SSH_KEY` populated (non-empty values)
1. Check `.env` exists and has `VPS_IP`, `SSH_USER`, and either SSH auth env vars populated or a compatible local SSH config/agent setup
2. Check `stack.yml` exists
3. Verify SSH connectivity:

```bash
ssh -i <SSH_KEY> -o BatchMode=yes -o ConnectTimeout=5 -p <SSH_PORT:22> <SSH_USER>@<VPS_IP> echo "ok"
ssh [ -i <SSH_KEY> ] [ -o IdentityAgent=<SSH_IDENTITY_AGENT> ] \
-o BatchMode=yes -o ConnectTimeout=5 -p <SSH_PORT:22> <SSH_USER>@<VPS_IP> echo "ok"
```

**If `.env` is missing or VPS fields are empty:** Tell the user to run `bash install.sh` first and stop here.
Expand Down
21 changes: 20 additions & 1 deletion playbooks/02-base-setup.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,15 @@ This playbook configures:
Config variables (read from `.env`):

- `VPS_IP` - Public IP of VPS-1
- `SSH_KEY` - Path to SSH private key
- `SSH_KEY` - Optional path to SSH private key
- `SSH_IDENTITY_AGENT` - Optional path to SSH agent socket
- `SSH_USER` - Initial SSH user (e.g., ubuntu, root, debian — depends on provider)
- `SSH_HARDENED_PORT` - Target SSH port for hardening (default: 222 if not set)
- `CLOUDFLARE_TUNNEL_TOKEN` - Cloudflare Tunnel token
- `HOSTNAME` - Optional, friendly hostname (replaces provider default)

> **SSH auth convention:** Examples below use `ssh -i <SSH_KEY> ...` for brevity. If you use agent-based auth instead, omit `-i <SSH_KEY>` and add `-o IdentityAgent=<SSH_IDENTITY_AGENT>` when needed.

## Execution Order

Complete sections 2.1–2.6 on VPS-1.
Expand Down Expand Up @@ -314,6 +317,22 @@ ssh -i <SSH_KEY> -p <SSH_HARDENED_PORT> adminclaw@<VPS_IP> "echo 'Port <SSH_HARD
2. Change `SSH_PORT=22` to `SSH_PORT=<SSH_HARDENED_PORT> # Changed from 22 during hardening`
3. Delete the `SSH_HARDENED_PORT=` line entirely

If the user relies on a local `~/.ssh/config` host alias or SSH agent-based config, this is also the safe moment to update that host entry to the hardened port. Prompt them explicitly before locking down port 22.

Example `~/.ssh/config` update:

```sshconfig
Host <alias-or-ip>
HostName <VPS_IP>
User adminclaw
Port <SSH_HARDENED_PORT>
IdentityAgent <SSH_IDENTITY_AGENT>
IdentitiesOnly yes
PreferredAuthentications publickey
```

Require the user to confirm they updated their local SSH config entry, or that they intentionally do not use one, before continuing to remove port 22.

Then lock down SSH:

```bash
Expand Down
2 changes: 2 additions & 0 deletions playbooks/04-vps1-openclaw.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ Config values are read from `.env` and `stack.yml` (resolved by `npm run pre-dep
- `defaults.install_dir` (`stack.yml`) - Base installation directory on VPS (default: `/home/openclaw`)
- Per-claw overrides in `stack.yml` under `claws.<name>`

> **SSH auth convention:** Commands below may show `ssh -i ${SSH_KEY} ...`. If the stack uses agent-based auth, omit `-i ${SSH_KEY}` and rely on your SSH config or add `-o IdentityAgent=${SSH_IDENTITY_AGENT}`.

---

## 4.2 Infrastructure Setup
Expand Down
2 changes: 2 additions & 0 deletions playbooks/07-verification.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ This playbook verifies:
- Workers deployed (01-workers.md)
- VPS-1 rebooted after configuration

> **SSH auth convention:** Commands below may show `ssh -i <SSH_KEY> ...`. If you use an SSH agent, omit `-i <SSH_KEY>` and use your normal SSH config or add `-o IdentityAgent=<SSH_IDENTITY_AGENT>`.

## Pre-Verification: Reboot VPS-1

Before running verification tests, reboot VPS-1 to ensure all configuration changes take effect cleanly (especially kernel parameters, SSH config, and systemd services).
Expand Down
2 changes: 2 additions & 0 deletions playbooks/08b-pair-devices.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ Pair browser and Telegram devices with each claw's gateway.
- Domain verified as protected by Cloudflare Access (during `00-fresh-deploy-setup.md`)
- LLM proxy configured (optional but recommended — `08a-configure-llm-proxy.md`)

> **SSH auth convention:** Examples below may show `ssh -i <SSH_KEY> ...`. If you use agent-based auth, omit `-i <SSH_KEY>` and rely on your SSH config or add `-o IdentityAgent=<SSH_IDENTITY_AGENT>`.

---

## Open the Claw URLs
Expand Down
2 changes: 2 additions & 0 deletions playbooks/08c-deploy-report.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ The report is saved to `.deploy-logs/<timestamp>/08-deploy-report.md` (same time

Collect the following values and present them in a single, neatly formatted report:

> **SSH auth convention:** Examples below may show `ssh -i <SSH_KEY> ...`. If you use agent-based auth, omit `-i <SSH_KEY>` and rely on your SSH config or add `-o IdentityAgent=<SSH_IDENTITY_AGENT>`.

## Values to collect

1. **User passwords** — source `scripts/lib/source-config.sh` to get `ADMINCLAW_PASSWORD` and `OPENCLAW_PASSWORD`. These are auto-generated and persisted in `.env`.
Expand Down
2 changes: 2 additions & 0 deletions playbooks/maintenance.md
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,8 @@ sudo -u openclaw bash -c 'cd <INSTALL_DIR> && docker compose up -d'

#### SSH Keys

If your deployment uses agent-based auth, rotate the underlying key in your agent and update `SSH_IDENTITY_AGENT` only if the socket path changes. The file-based example below applies when `.env` uses `SSH_KEY`.

```bash
# 1. Generate new key pair (local machine)
ssh-keygen -t ed25519 -f ~/.ssh/vps1_openclaw_ed25519_new
Expand Down
12 changes: 6 additions & 6 deletions scripts/health-check.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ set -euo pipefail

SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/lib/source-config.sh"
source "$SCRIPT_DIR/lib/ssh.sh"

QUIET=false
while [[ $# -gt 0 ]]; do
Expand Down Expand Up @@ -36,7 +37,6 @@ while [[ $# -gt 0 ]]; do
done

export TERM=xterm-256color
SSH_CMD="ssh -i ${ENV__SSH_KEY} -p ${ENV__SSH_PORT} ${ENV__SSH_USER}@${ENV__VPS_IP}"
FAILURES=0

log() {
Expand All @@ -61,7 +61,7 @@ warn() {
# --- SSH connectivity ---
log ""
log "Checking VPS connectivity..."
if ! $SSH_CMD "true" 2>/dev/null; then
if ! "${SSH_CMD[@]}" "$VPS" "true" 2>/dev/null; then
fail "Cannot reach VPS at ${ENV__VPS_IP}:${ENV__SSH_PORT}"
log ""
log "$(printf '\033[31m%s check(s) failed.\033[0m')" "$FAILURES"
Expand Down Expand Up @@ -97,7 +97,7 @@ CONTAINERS=("${CLAW_CONTAINERS[@]}" "${INFRA_CONTAINERS[@]}")
log ""
log "Checking Docker containers..."
for CONTAINER in "${CONTAINERS[@]}"; do
STATUS=$($SSH_CMD "sudo docker inspect -f '{{.State.Status}}' $CONTAINER 2>/dev/null" 2>/dev/null || echo "not_found")
STATUS=$("${SSH_CMD[@]}" "$VPS" "sudo docker inspect -f '{{.State.Status}}' $CONTAINER 2>/dev/null" 2>/dev/null || echo "not_found")

if [ "$STATUS" = "running" ]; then
pass "$CONTAINER is running"
Expand All @@ -110,7 +110,7 @@ for CONTAINER in "${CONTAINERS[@]}"; do
fi

# Check Docker healthcheck status if the container defines one
HEALTH=$($SSH_CMD "sudo docker inspect -f '{{if .State.Health}}{{.State.Health.Status}}{{else}}none{{end}}' $CONTAINER 2>/dev/null" 2>/dev/null || echo "unknown")
HEALTH=$("${SSH_CMD[@]}" "$VPS" "sudo docker inspect -f '{{if .State.Health}}{{.State.Health.Status}}{{else}}none{{end}}' $CONTAINER 2>/dev/null" 2>/dev/null || echo "unknown")

case "$HEALTH" in
healthy) pass "$CONTAINER healthcheck: healthy" ;;
Expand All @@ -125,7 +125,7 @@ done
log ""
log "Checking for recent container restarts..."
for CONTAINER in "${CONTAINERS[@]}"; do
RESTART_COUNT=$($SSH_CMD "sudo docker inspect -f '{{.RestartCount}}' $CONTAINER 2>/dev/null" 2>/dev/null || echo "unknown")
RESTART_COUNT=$("${SSH_CMD[@]}" "$VPS" "sudo docker inspect -f '{{.RestartCount}}' $CONTAINER 2>/dev/null" 2>/dev/null || echo "unknown")
if [ "$RESTART_COUNT" = "unknown" ]; then
continue
elif [ "$RESTART_COUNT" -gt 0 ] 2>/dev/null; then
Expand All @@ -141,7 +141,7 @@ log "Checking OpenClaw gateway health..."

for CLAW_CONTAINER in "${CLAW_CONTAINERS[@]}"; do
INSTANCE_NAME="${CLAW_CONTAINER#${PROJECT_NAME}-openclaw-}"
HEALTH_OUTPUT=$($SSH_CMD "openclaw --instance $INSTANCE_NAME health 2>&1" 2>/dev/null) && HEALTH_EXIT=0 || HEALTH_EXIT=$?
HEALTH_OUTPUT=$("${SSH_CMD[@]}" "$VPS" "openclaw --instance $INSTANCE_NAME health 2>&1" 2>/dev/null) && HEALTH_EXIT=0 || HEALTH_EXIT=$?

if [ "$HEALTH_EXIT" -eq 0 ]; then
pass "openclaw health ($INSTANCE_NAME): OK"
Expand Down
Loading