diff --git a/.agents/skills/docs/nemoclaw-overview/references/how-it-works.md b/.agents/skills/docs/nemoclaw-overview/references/how-it-works.md index 9530758b7..6b53ffa9c 100644 --- a/.agents/skills/docs/nemoclaw-overview/references/how-it-works.md +++ b/.agents/skills/docs/nemoclaw-overview/references/how-it-works.md @@ -111,4 +111,4 @@ Approved endpoints persist for the current session but are not saved to the base - Follow the Quickstart (see the `nemoclaw-get-started` skill) to launch your first sandbox. - Refer to the Architecture (see the `nemoclaw-reference` skill) for the full technical structure, including file layouts and the blueprint lifecycle. -- Refer to Inference Profiles (see the `nemoclaw-reference` skill) for detailed provider configuration. \ No newline at end of file +- Refer to Inference Profiles (see the `nemoclaw-reference` skill) for detailed provider configuration. diff --git a/.agents/skills/docs/nemoclaw-overview/references/overview.md b/.agents/skills/docs/nemoclaw-overview/references/overview.md index 7134e56a6..71203b472 100644 --- a/.agents/skills/docs/nemoclaw-overview/references/overview.md +++ b/.agents/skills/docs/nemoclaw-overview/references/overview.md @@ -53,4 +53,4 @@ Explore the following pages to learn more about NemoClaw. - Switch Inference Providers (see the `nemoclaw-configure-inference` skill) to configure the inference provider. - Approve or Deny Network Requests (see the `nemoclaw-manage-policy` skill) to manage egress approvals. - Deploy to a Remote GPU Instance (see the `nemoclaw-deploy-remote` skill) for persistent operation. -- Monitor Sandbox Activity (see the `nemoclaw-monitor-sandbox` skill) to observe agent behavior. \ No newline at end of file +- Monitor Sandbox Activity (see the `nemoclaw-monitor-sandbox` skill) to observe agent behavior. diff --git a/.agents/skills/docs/nemoclaw-overview/references/release-notes.md b/.agents/skills/docs/nemoclaw-overview/references/release-notes.md index 9e0242dee..92949fafe 100644 --- a/.agents/skills/docs/nemoclaw-overview/references/release-notes.md +++ b/.agents/skills/docs/nemoclaw-overview/references/release-notes.md @@ -12,4 +12,4 @@ NVIDIA NemoClaw is available in early preview starting March 16, 2026. Use the f | [Releases](https://github.com/NVIDIA/NemoClaw/releases) | Versioned release notes and downloadable assets. | | [Release comparison](https://github.com/NVIDIA/NemoClaw/compare) | Diff between any two tags or branches. | | [Merged pull requests](https://github.com/NVIDIA/NemoClaw/pulls?q=is%3Apr+is%3Amerged) | Individual changes with review discussion. | -| [Commit history](https://github.com/NVIDIA/NemoClaw/commits/main) | Full commit log on `main`. | \ No newline at end of file +| [Commit history](https://github.com/NVIDIA/NemoClaw/commits/main) | Full commit log on `main`. | diff --git a/.agents/skills/docs/nemoclaw-reference/SKILL.md b/.agents/skills/docs/nemoclaw-reference/SKILL.md index e6b61e8f8..612300149 100644 --- a/.agents/skills/docs/nemoclaw-reference/SKILL.md +++ b/.agents/skills/docs/nemoclaw-reference/SKILL.md @@ -13,4 +13,4 @@ Learn how NemoClaw combines a lightweight CLI plugin with a versioned blueprint - [NemoClaw CLI Commands Reference](references/commands.md) - [NemoClaw Inference Profiles — NVIDIA Endpoint](references/inference-profiles.md) - [NemoClaw Network Policies — Baseline Rules and Operator Approval](references/network-policies.md) -- [NemoClaw Troubleshooting Guide](references/troubleshooting.md) \ No newline at end of file +- [NemoClaw Troubleshooting Guide](references/troubleshooting.md) diff --git a/.agents/skills/docs/nemoclaw-reference/references/architecture.md b/.agents/skills/docs/nemoclaw-reference/references/architecture.md index c5b9dada2..c28d4287b 100644 --- a/.agents/skills/docs/nemoclaw-reference/references/architecture.md +++ b/.agents/skills/docs/nemoclaw-reference/references/architecture.md @@ -79,4 +79,4 @@ OpenShell intercepts them and routes to the configured provider: Agent (sandbox) ──▶ OpenShell gateway ──▶ NVIDIA Endpoint (build.nvidia.com) ``` -Refer to Inference Profiles (see the `nemoclaw-reference` skill) for provider configuration details. \ No newline at end of file +Refer to Inference Profiles (see the `nemoclaw-reference` skill) for provider configuration details. diff --git a/.agents/skills/docs/nemoclaw-reference/references/commands.md b/.agents/skills/docs/nemoclaw-reference/references/commands.md index 5085ed8d6..fd08af4c1 100644 --- a/.agents/skills/docs/nemoclaw-reference/references/commands.md +++ b/.agents/skills/docs/nemoclaw-reference/references/commands.md @@ -149,4 +149,4 @@ After the fixes complete, the script prompts you to run `nemoclaw onboard` to co ```console $ sudo nemoclaw setup-spark -``` \ No newline at end of file +``` diff --git a/.agents/skills/docs/nemoclaw-reference/references/inference-profiles.md b/.agents/skills/docs/nemoclaw-reference/references/inference-profiles.md index 57c8f1374..9586246b6 100644 --- a/.agents/skills/docs/nemoclaw-reference/references/inference-profiles.md +++ b/.agents/skills/docs/nemoclaw-reference/references/inference-profiles.md @@ -50,4 +50,4 @@ $ openshell inference set --provider nvidia-nim --model ``` The change takes effect immediately. -No sandbox restart is needed. \ No newline at end of file +No sandbox restart is needed. diff --git a/.agents/skills/docs/nemoclaw-reference/references/network-policies.md b/.agents/skills/docs/nemoclaw-reference/references/network-policies.md index d094c94c3..a32e4158d 100644 --- a/.agents/skills/docs/nemoclaw-reference/references/network-policies.md +++ b/.agents/skills/docs/nemoclaw-reference/references/network-policies.md @@ -119,4 +119,4 @@ Apply policy updates to a running sandbox without restarting: ```console $ openshell policy set -``` \ No newline at end of file +``` diff --git a/.agents/skills/docs/nemoclaw-reference/references/troubleshooting.md b/.agents/skills/docs/nemoclaw-reference/references/troubleshooting.md index bc97b041d..8dd01774c 100644 --- a/.agents/skills/docs/nemoclaw-reference/references/troubleshooting.md +++ b/.agents/skills/docs/nemoclaw-reference/references/troubleshooting.md @@ -161,4 +161,4 @@ View the error output for the failed blueprint run: $ nemoclaw logs ``` -Use `--follow` to stream logs in real time while debugging. \ No newline at end of file +Use `--follow` to stream logs in real time while debugging. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 9e84b051d..d1edcbeaf 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -16,7 +16,7 @@ ## Testing -- [ ] `make check` passes. +- [ ] `npx prek run --all-files` passes (or equivalently `make check`). - [ ] `npm test` passes. - [ ] `make docs` builds without warnings. (for doc-only changes) @@ -28,7 +28,7 @@ ### Code Changes -- [ ] `make format` applied (TypeScript and Python). +- [ ] Formatters applied — `npx prek run --all-files` auto-fixes formatting (or `make format` for targeted runs). - [ ] Tests added or updated for new or changed behavior. - [ ] No secrets, API keys, or credentials committed. - [ ] Doc pages updated for any user-facing behavior changes (new commands, changed defaults, new features, bug fixes that contradict existing docs). diff --git a/.github/workflows/commit-lint.yaml b/.github/workflows/commit-lint.yaml index 03d259ea8..dec431f75 100644 --- a/.github/workflows/commit-lint.yaml +++ b/.github/workflows/commit-lint.yaml @@ -20,12 +20,12 @@ jobs: timeout-minutes: 5 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 - name: Setup Node.js - uses: actions/setup-node@v4 + uses: actions/setup-node@v6 with: node-version: "22" cache: npm diff --git a/.github/workflows/docker-pin-check.yaml b/.github/workflows/docker-pin-check.yaml index bc72381d6..740a95ee0 100644 --- a/.github/workflows/docker-pin-check.yaml +++ b/.github/workflows/docker-pin-check.yaml @@ -22,7 +22,7 @@ jobs: timeout-minutes: 5 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Check Dockerfile base-image pin run: bash scripts/update-docker-pin.sh --check diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index d59f31df4..3b42ccfa9 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -26,15 +26,15 @@ jobs: timeout-minutes: 10 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: "3.11" - name: Install uv - uses: astral-sh/setup-uv@v4 + uses: astral-sh/setup-uv@v7 - name: Install doc dependencies run: uv sync --group docs diff --git a/.github/workflows/nightly-e2e.yaml b/.github/workflows/nightly-e2e.yaml index 31a12219b..903ec43e1 100644 --- a/.github/workflows/nightly-e2e.yaml +++ b/.github/workflows/nightly-e2e.yaml @@ -29,7 +29,7 @@ jobs: timeout-minutes: 45 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Run full E2E test env: diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 372aa074f..d95796d62 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -17,56 +17,66 @@ concurrency: jobs: lint: runs-on: ubuntu-latest - timeout-minutes: 5 + timeout-minutes: 10 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Setup Node.js - uses: actions/setup-node@v4 + uses: actions/setup-node@v6 with: node-version: "22" cache: npm - name: Setup Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: "3.11" - name: Install uv - uses: astral-sh/setup-uv@v4 + uses: astral-sh/setup-uv@v7 - - name: Install ruff - run: uv tool install ruff + - name: Install hadolint + run: | + HADOLINT_URL="https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64" + curl -fsSL -o /usr/local/bin/hadolint "$HADOLINT_URL" + EXPECTED=$(curl -fsSL "${HADOLINT_URL}.sha256" | awk '{print $1}') + ACTUAL=$(sha256sum /usr/local/bin/hadolint | awk '{print $1}') + [ "$EXPECTED" = "$ACTUAL" ] || { echo "::error::hadolint checksum mismatch"; exit 1; } + chmod +x /usr/local/bin/hadolint + + - name: Install dependencies + run: | + npm install --ignore-scripts + cd nemoclaw && npm install + cd ../nemoclaw-blueprint && uv sync --extra dev - - name: Install Node dependencies - working-directory: nemoclaw - run: npm install + - name: Build TypeScript plugin + run: cd nemoclaw && npm run build - - name: Run all linters - run: make check + - name: Run all hooks (pre-commit + pre-push) + run: npx prek run --all-files --stage pre-push test-unit: runs-on: ubuntu-latest timeout-minutes: 10 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Setup Node.js - uses: actions/setup-node@v4 + uses: actions/setup-node@v6 with: node-version: "22" cache: npm - - name: Install root dependencies - run: npm install - - - name: Install and build TypeScript plugin - working-directory: nemoclaw + - name: Install dependencies run: | - npm install - npm run build + npm install --ignore-scripts + cd nemoclaw && npm install + + - name: Build TypeScript plugin + run: cd nemoclaw && npm run build - name: Run all unit tests with coverage run: npx vitest run --coverage @@ -79,7 +89,7 @@ jobs: timeout-minutes: 15 steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Build sandbox test image run: docker build -f test/Dockerfile.sandbox -t nemoclaw-sandbox-test . diff --git a/.gitignore b/.gitignore index 6561d6a91..9ddd809b8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ # Build artifacts and caches *.pyc +*.tsbuildinfo .pytest_cache/ __pycache__/ coverage/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f6a147121..923e870c3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,8 +1,10 @@ # NemoClaw — prek hook configuration # prek: https://github.com/j178/prek — single binary, no Python required for the runner # Installed as an npm devDependency (@j178/prek) — available after `npm install`. -# All git hooks (pre-commit, commit-msg, pre-push) are managed by prek via this file. -# The "prepare" script in package.json runs `prek install` to wire them up. +# All git hooks (pre-commit, commit-msg, pre-push) are managed by prek via this file only. +# The "prepare" script in package.json runs `prek install` (writes `.git/hooks/*`). +# If you previously used Husky, run: git config --unset core.hooksPath +# then `npm install` again so Git uses the hooks prek installs. # # Usage: # npx prek install @@ -20,6 +22,13 @@ exclude: ^(nemoclaw/dist/|nemoclaw/node_modules/|docs/_build/|\.venv/|uv\.lock$) +# Which git hook shims `prek install` writes (separate from each hook's `stages:`). +# https://prek.j178.dev/configuration/#default_install_hook_types +default_install_hook_types: + - pre-commit + - commit-msg + - pre-push + repos: # ── Priority 0: general file fixers ─────────────────────────────────────── - repo: https://github.com/pre-commit/pre-commit-hooks @@ -124,10 +133,14 @@ repos: - --exclude=SC1091 priority: 10 - - repo: https://github.com/hadolint/hadolint - rev: v2.14.0 + - repo: local hooks: - id: hadolint + name: hadolint + entry: hadolint + language: system + files: (Dockerfile[^/]*|.*\.dockerfile)$ + types: [file] priority: 10 - repo: local @@ -152,32 +165,32 @@ repos: hooks: - id: vitest-plugin name: Vitest (plugin project) - entry: npx vitest run --project plugin + entry: bash -c 'root="$(git rev-parse --show-toplevel)" && cd "$root" && exec ./node_modules/.bin/vitest run --project plugin' language: system pass_filenames: false files: ^nemoclaw/ priority: 20 # ── commit-msg hooks ──────────────────────────────────────────────────────── - - repo: local + - repo: https://github.com/alessandrojcm/commitlint-pre-commit-hook + rev: v9.24.0 hooks: - id: commitlint - name: commitlint - entry: npx commitlint --edit - language: system stages: [commit-msg] - always_run: true + additional_dependencies: ["@commitlint/config-conventional@20"] + priority: 10 # ── pre-push hooks ───────────────────────────────────────────────────────── - repo: local hooks: - id: tsc-check name: TypeScript type check (tsc --noEmit) - entry: bash -c 'cd nemoclaw && npx tsc --noEmit' + entry: bash -c 'cd nemoclaw && npx tsc --noEmit --incremental' language: system pass_filenames: false always_run: true stages: [pre-push] + priority: 10 - id: pyright-check name: Pyright (nemoclaw-blueprint) @@ -186,6 +199,7 @@ repos: pass_filenames: false always_run: true stages: [pre-push] + priority: 10 default_language_version: python: python3 diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index efdeccace..3a65b3ffe 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -67,7 +67,7 @@ reported by contacting GitHub_Conduct@nvidia.com. All complaints will be reviewe investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies -may be posted separately. +may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other @@ -81,4 +81,4 @@ available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.ht [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq \ No newline at end of file +https://www.contributor-covenant.org/faq diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f8f8405d0..48c3dd0f6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -18,6 +18,7 @@ Install the following before you begin. - Python 3.11+ (for blueprint and documentation builds) - Docker (running) - [uv](https://docs.astral.sh/uv/) (for Python dependency management) +- [hadolint](https://github.com/hadolint/hadolint) (Dockerfile linter — `brew install hadolint` on macOS) ## Getting Started @@ -71,6 +72,8 @@ All git hooks are managed by [prek](https://prek.j178.dev/), a fast, single-bina For a full manual check: `npx prek run --all-files`. For scoped runs: `npx prek run --from-ref --to-ref HEAD`. +If you still have `core.hooksPath` set from an old Husky setup, Git will ignore `.git/hooks`. Run `git config --unset core.hooksPath` in this repo, then `npm install` so `prek install` (via `prepare`) can register the hooks. + `make check` remains the primary documented linter entry point. ## Project Structure diff --git a/Dockerfile b/Dockerfile index 471a806b9..fe85ed2e1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,6 +12,7 @@ FROM node:22-slim@sha256:4f77a690f2f8946ab16fe1e791a3ac0667ae1c3575c3e4d0d4589e9 ENV DEBIAN_FRONTEND=noninteractive +# hadolint ignore=DL3008 RUN apt-get update && apt-get install -y --no-install-recommends \ python3 python3-pip python3-venv \ curl git ca-certificates \ @@ -51,11 +52,9 @@ RUN mkdir -p /sandbox/.openclaw-data/agents/main/agent \ && ln -s /sandbox/.openclaw-data/update-check.json /sandbox/.openclaw/update-check.json \ && chown -R sandbox:sandbox /sandbox/.openclaw /sandbox/.openclaw-data -# Install OpenClaw CLI -RUN npm install -g openclaw@2026.3.11 - -# Install PyYAML for blueprint runner -RUN pip3 install --break-system-packages pyyaml +# Install OpenClaw CLI and PyYAML for blueprint runner (single layer) +RUN npm install -g openclaw@2026.3.11 \ + && pip3 install --no-cache-dir --break-system-packages "pyyaml==6.0.2" # Copy built plugin and blueprint into the sandbox COPY --from=builder /opt/nemoclaw/dist/ /opt/nemoclaw/dist/ diff --git a/Makefile b/Makefile index 7eaf3c64a..d7a4ddd7b 100644 --- a/Makefile +++ b/Makefile @@ -1,10 +1,12 @@ .PHONY: check lint format lint-ts lint-py format-ts format-py docs docs-strict docs-live docs-clean -check: lint-ts lint-py +check: + npx prek run --all-files @echo "All checks passed." -lint: lint-ts lint-py +lint: check +# Targeted subproject checks (not part of `make check` — use for focused runs). lint-ts: cd nemoclaw && npm run check diff --git a/docs/about/overview.md b/docs/about/overview.md index 23a49126e..f6f732744 100644 --- a/docs/about/overview.md +++ b/docs/about/overview.md @@ -71,4 +71,4 @@ Explore the following pages to learn more about NemoClaw. - [Switch Inference Providers](../inference/switch-inference-providers.md) to configure the inference provider. - [Approve or Deny Network Requests](../network-policy/approve-network-requests.md) to manage egress approvals. - [Deploy to a Remote GPU Instance](../deployment/deploy-to-remote-gpu.md) for persistent operation. -- [Monitor Sandbox Activity](../monitoring/monitor-sandbox-activity.md) to observe agent behavior. \ No newline at end of file +- [Monitor Sandbox Activity](../monitoring/monitor-sandbox-activity.md) to observe agent behavior. diff --git a/docs/about/release-notes.md b/docs/about/release-notes.md index e9a74ca66..b5f3b88e1 100644 --- a/docs/about/release-notes.md +++ b/docs/about/release-notes.md @@ -30,4 +30,4 @@ NVIDIA NemoClaw is available in early preview starting March 16, 2026. Use the f | [Releases](https://github.com/NVIDIA/NemoClaw/releases) | Versioned release notes and downloadable assets. | | [Release comparison](https://github.com/NVIDIA/NemoClaw/compare) | Diff between any two tags or branches. | | [Merged pull requests](https://github.com/NVIDIA/NemoClaw/pulls?q=is%3Apr+is%3Amerged) | Individual changes with review discussion. | -| [Commit history](https://github.com/NVIDIA/NemoClaw/commits/main) | Full commit log on `main`. | \ No newline at end of file +| [Commit history](https://github.com/NVIDIA/NemoClaw/commits/main) | Full commit log on `main`. | diff --git a/install.sh b/install.sh index 88c767980..40a0d6e2a 100755 --- a/install.sh +++ b/install.sh @@ -27,9 +27,9 @@ NEMOCLAW_VERSION="$(resolve_installer_version)" # --------------------------------------------------------------------------- if [[ -z "${NO_COLOR:-}" && -t 1 ]]; then if [[ "${COLORTERM:-}" == "truecolor" || "${COLORTERM:-}" == "24bit" ]]; then - C_GREEN=$'\033[38;2;118;185;0m' # #76B900 — exact NVIDIA green + C_GREEN=$'\033[38;2;118;185;0m' # #76B900 — exact NVIDIA green else - C_GREEN=$'\033[38;5;148m' # closest 256-color on dark backgrounds + C_GREEN=$'\033[38;5;148m' # closest 256-color on dark backgrounds fi C_BOLD=$'\033[1m' C_DIM=$'\033[2m' @@ -44,10 +44,13 @@ fi # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- -info() { printf "${C_CYAN}[INFO]${C_RESET} %s\n" "$*"; } -warn() { printf "${C_YELLOW}[WARN]${C_RESET} %s\n" "$*"; } -error() { printf "${C_RED}[ERROR]${C_RESET} %s\n" "$*" >&2; exit 1; } -ok() { printf " ${C_GREEN}✓${C_RESET} %s\n" "$*"; } +info() { printf "${C_CYAN}[INFO]${C_RESET} %s\n" "$*"; } +warn() { printf "${C_YELLOW}[WARN]${C_RESET} %s\n" "$*"; } +error() { + printf "${C_RED}[ERROR]${C_RESET} %s\n" "$*" >&2 + exit 1 +} +ok() { printf " ${C_GREEN}✓${C_RESET} %s\n" "$*"; } resolve_default_sandbox_name() { local registry_file="${HOME}/.nemoclaw/sandboxes.json" @@ -95,7 +98,7 @@ print_banner() { } print_done() { - local elapsed=$(( SECONDS - _INSTALL_START )) + local elapsed=$((SECONDS - _INSTALL_START)) local sandbox_name sandbox_name="$(resolve_default_sandbox_name)" info "=== Installation complete ===" @@ -146,7 +149,8 @@ usage() { # Stdout/stderr are captured; dumped only on failure. # Falls back to plain output when stdout is not a TTY (CI / piped installs). spin() { - local msg="$1"; shift + local msg="$1" + shift if [[ ! -t 1 ]]; then info "$msg" @@ -154,7 +158,8 @@ spin() { return fi - local log; log=$(mktemp) + local log + log=$(mktemp) "$@" >"$log" 2>&1 & local pid=$! i=0 local frames=('⠋' '⠙' '⠹' '⠸' '⠼' '⠴' '⠦' '⠧' '⠇' '⠏') @@ -191,12 +196,13 @@ ORIGINAL_PATH="${PATH:-}" # Compare two semver strings (major.minor.patch). Returns 0 if $1 >= $2. version_gte() { - local IFS=. - local -a a=($1) b=($2) + local -a a b + IFS=. read -ra a <<<"$1" + IFS=. read -ra b <<<"$2" for i in 0 1 2; do local ai=${a[$i]:-0} bi=${b[$i]:-0} - if (( ai > bi )); then return 0; fi - if (( ai < bi )); then return 1; fi + if ((ai > bi)); then return 0; fi + if ((ai < bi)); then return 1; fi done return 0 } @@ -265,7 +271,7 @@ ensure_supported_runtime() { [[ "$node_major" =~ ^[0-9]+$ ]] || error "Could not determine Node.js version from '${node_version}'. ${RUNTIME_REQUIREMENT_MSG}" [[ "$npm_major" =~ ^[0-9]+$ ]] || error "Could not determine npm version from '${npm_version}'. ${RUNTIME_REQUIREMENT_MSG}" - if (( node_major < MIN_NODE_MAJOR || npm_major < MIN_NPM_MAJOR )); then + if ((node_major < MIN_NODE_MAJOR || npm_major < MIN_NPM_MAJOR)); then error "Unsupported runtime detected: Node.js ${node_version:-unknown}, npm ${npm_version:-unknown}. ${RUNTIME_REQUIREMENT_MSG} Upgrade Node.js and rerun the installer." fi @@ -288,7 +294,10 @@ install_nodejs() { local nvm_tmp nvm_tmp="$(mktemp)" curl -fsSL "https://raw.githubusercontent.com/nvm-sh/nvm/${NVM_VERSION}/install.sh" -o "$nvm_tmp" \ - || { rm -f "$nvm_tmp"; error "Failed to download nvm installer"; } + || { + rm -f "$nvm_tmp" + error "Failed to download nvm installer" + } local actual_hash if command_exists sha256sum; then actual_hash="$(sha256sum "$nvm_tmp" | awk '{print $1}')" @@ -296,7 +305,7 @@ install_nodejs() { actual_hash="$(shasum -a 256 "$nvm_tmp" | awk '{print $1}')" else warn "No SHA-256 tool found — skipping nvm integrity check" - actual_hash="$NVM_SHA256" # allow execution + actual_hash="$NVM_SHA256" # allow execution fi if [[ "$actual_hash" != "$NVM_SHA256" ]]; then rm -f "$nvm_tmp" @@ -341,7 +350,7 @@ get_vram_mb() { if [[ "$(uname -s)" == "Darwin" ]] && command_exists sysctl; then local bytes bytes=$(sysctl -n hw.memsize 2>/dev/null || echo 0) - echo $(( bytes / 1024 / 1024 )) + echo $((bytes / 1024 / 1024)) return fi echo 0 @@ -373,10 +382,10 @@ install_or_upgrade_ollama() { # Pull the appropriate model based on VRAM local vram_mb vram_mb=$(get_vram_mb) - local vram_gb=$(( vram_mb / 1024 )) + local vram_gb=$((vram_mb / 1024)) info "Detected ${vram_gb} GB VRAM" - if (( vram_gb >= 120 )); then + if ((vram_gb >= 120)); then info "Pulling nemotron-3-super:120b…" ollama pull nemotron-3-super:120b else @@ -406,13 +415,12 @@ pre_extract_openclaw() { info "Pre-extracting openclaw@${openclaw_version} with system tar (GH-503 workaround)…" local tmpdir tmpdir="$(mktemp -d)" - if npm pack "openclaw@${openclaw_version}" --pack-destination "$tmpdir" > /dev/null 2>&1; then + if npm pack "openclaw@${openclaw_version}" --pack-destination "$tmpdir" >/dev/null 2>&1; then local tgz tgz="$(find "$tmpdir" -maxdepth 1 -name 'openclaw-*.tgz' -print -quit)" if [[ -n "$tgz" && -f "$tgz" ]]; then if mkdir -p "${install_dir}/node_modules/openclaw" \ - && tar xzf "$tgz" -C "${install_dir}/node_modules/openclaw" --strip-components=1 - then + && tar xzf "$tgz" -C "${install_dir}/node_modules/openclaw" --strip-components=1; then info "openclaw pre-extracted successfully" else warn "Failed to extract openclaw tarball" @@ -435,8 +443,8 @@ pre_extract_openclaw() { install_nemoclaw() { if [[ -f "./package.json" ]] && grep -q '"name": "nemoclaw"' ./package.json 2>/dev/null; then info "NemoClaw package.json found in current directory — installing from source…" - spin "Preparing OpenClaw package" bash -lc "$(declare -f pre_extract_openclaw); pre_extract_openclaw \"\$1\"" _ "$(pwd)" || \ - warn "Pre-extraction failed — npm install may fail if openclaw tarball is broken" + spin "Preparing OpenClaw package" bash -lc "$(declare -f pre_extract_openclaw); pre_extract_openclaw \"\$1\"" _ "$(pwd)" \ + || warn "Pre-extraction failed — npm install may fail if openclaw tarball is broken" spin "Installing NemoClaw dependencies" npm install --ignore-scripts spin "Building NemoClaw plugin" bash -lc 'cd nemoclaw && npm install --ignore-scripts && npm run build' spin "Linking NemoClaw CLI" npm link @@ -449,8 +457,8 @@ install_nemoclaw() { rm -rf "$nemoclaw_src" mkdir -p "$(dirname "$nemoclaw_src")" spin "Cloning NemoClaw source" git clone --depth 1 https://github.com/NVIDIA/NemoClaw.git "$nemoclaw_src" - spin "Preparing OpenClaw package" bash -lc "$(declare -f pre_extract_openclaw); pre_extract_openclaw \"\$1\"" _ "$nemoclaw_src" || \ - warn "Pre-extraction failed — npm install may fail if openclaw tarball is broken" + spin "Preparing OpenClaw package" bash -lc "$(declare -f pre_extract_openclaw); pre_extract_openclaw \"\$1\"" _ "$nemoclaw_src" \ + || warn "Pre-extraction failed — npm install may fail if openclaw tarball is broken" spin "Installing NemoClaw dependencies" bash -lc "cd \"$nemoclaw_src\" && npm install --ignore-scripts" spin "Building NemoClaw plugin" bash -lc "cd \"$nemoclaw_src\"/nemoclaw && npm install --ignore-scripts && npm run build" spin "Linking NemoClaw CLI" bash -lc "cd \"$nemoclaw_src\" && npm link" @@ -558,9 +566,18 @@ main() { for arg in "$@"; do case "$arg" in --non-interactive) NON_INTERACTIVE=1 ;; - --version|-v) printf "nemoclaw-installer v%s\n" "$NEMOCLAW_VERSION"; exit 0 ;; - --help|-h) usage; exit 0 ;; - *) usage; error "Unknown option: $arg" ;; + --version | -v) + printf "nemoclaw-installer v%s\n" "$NEMOCLAW_VERSION" + exit 0 + ;; + --help | -h) + usage + exit 0 + ;; + *) + usage + error "Unknown option: $arg" + ;; esac done # Also honor env var diff --git a/nemoclaw-blueprint/migrations/snapshot.py b/nemoclaw-blueprint/migrations/snapshot.py old mode 100644 new mode 100755 diff --git a/nemoclaw-blueprint/orchestrator/__init__.py b/nemoclaw-blueprint/orchestrator/__init__.py index e69de29bb..52a7a9daf 100644 --- a/nemoclaw-blueprint/orchestrator/__init__.py +++ b/nemoclaw-blueprint/orchestrator/__init__.py @@ -0,0 +1,2 @@ +# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 diff --git a/package.json b/package.json index 504f88998..cbbe0bc3d 100644 --- a/package.json +++ b/package.json @@ -8,7 +8,7 @@ }, "scripts": { "test": "vitest run", - "prepare": "if [ -d .git ]; then prek install --hook-type pre-commit --hook-type commit-msg --hook-type pre-push; fi", + "prepare": "if [ -d .git ]; then prek install; fi", "prepublishOnly": "cd nemoclaw && env -u npm_config_global -u npm_config_prefix -u npm_config_omit npm install --ignore-scripts && ./node_modules/.bin/tsc" }, "dependencies": { diff --git a/scripts/backup-workspace.sh b/scripts/backup-workspace.sh index 0e83ca3fe..573823443 100755 --- a/scripts/backup-workspace.sh +++ b/scripts/backup-workspace.sh @@ -16,10 +16,13 @@ NC='\033[0m' info() { echo -e "${GREEN}[backup]${NC} $1"; } warn() { echo -e "${YELLOW}[backup]${NC} $1"; } -fail() { echo -e "${RED}[backup]${NC} $1" >&2; exit 1; } +fail() { + echo -e "${RED}[backup]${NC} $1" >&2 + exit 1 +} usage() { - cat < $(basename "$0") restore [timestamp] @@ -31,88 +34,88 @@ Commands: Backup location: ${BACKUP_BASE}// EOF - exit 1 + exit 1 } do_backup() { - local sandbox="$1" - local ts - ts="$(date +%Y%m%d-%H%M%S)" - local dest="${BACKUP_BASE}/${ts}" - - mkdir -p "$BACKUP_BASE" - chmod 0700 "${HOME}/.nemoclaw" "$BACKUP_BASE" || \ - fail "Failed to set secure permissions on ${HOME}/.nemoclaw — check directory ownership." - mkdir -p "$dest" - chmod 0700 "$dest" - - info "Backing up workspace from sandbox '${sandbox}'..." - - local count=0 - for f in "${FILES[@]}"; do - if openshell sandbox download "$sandbox" "${WORKSPACE_PATH}/${f}" "${dest}/"; then - count=$((count + 1)) - else - warn "Skipped ${f} (not found or download failed)" - fi - done - - for d in "${DIRS[@]}"; do - if openshell sandbox download "$sandbox" "${WORKSPACE_PATH}/${d}/" "${dest}/${d}/"; then - count=$((count + 1)) - else - warn "Skipped ${d}/ (not found or download failed)" - fi - done - - if [ "$count" -eq 0 ]; then - fail "No files were backed up. Check that the sandbox '${sandbox}' exists and has workspace files." + local sandbox="$1" + local ts + ts="$(date +%Y%m%d-%H%M%S)" + local dest="${BACKUP_BASE}/${ts}" + + mkdir -p "$BACKUP_BASE" + chmod 0700 "${HOME}/.nemoclaw" "$BACKUP_BASE" \ + || fail "Failed to set secure permissions on ${HOME}/.nemoclaw — check directory ownership." + mkdir -p "$dest" + chmod 0700 "$dest" + + info "Backing up workspace from sandbox '${sandbox}'..." + + local count=0 + for f in "${FILES[@]}"; do + if openshell sandbox download "$sandbox" "${WORKSPACE_PATH}/${f}" "${dest}/"; then + count=$((count + 1)) + else + warn "Skipped ${f} (not found or download failed)" + fi + done + + for d in "${DIRS[@]}"; do + if openshell sandbox download "$sandbox" "${WORKSPACE_PATH}/${d}/" "${dest}/${d}/"; then + count=$((count + 1)) + else + warn "Skipped ${d}/ (not found or download failed)" fi + done + + if [ "$count" -eq 0 ]; then + fail "No files were backed up. Check that the sandbox '${sandbox}' exists and has workspace files." + fi - info "Backup saved to ${dest}/ (${count} items)" + info "Backup saved to ${dest}/ (${count} items)" } do_restore() { - local sandbox="$1" - local ts="${2:-}" - - if [ -z "$ts" ]; then - ts="$(ls -1 "$BACKUP_BASE" 2>/dev/null | sort -r | head -n1)" - [ -n "$ts" ] || fail "No backups found in ${BACKUP_BASE}/" - info "Using most recent backup: ${ts}" + local sandbox="$1" + local ts="${2:-}" + + if [ -z "$ts" ]; then + ts="$(ls -1 "$BACKUP_BASE" 2>/dev/null | sort -r | head -n1)" + [ -n "$ts" ] || fail "No backups found in ${BACKUP_BASE}/" + info "Using most recent backup: ${ts}" + fi + + local src="${BACKUP_BASE}/${ts}" + [ -d "$src" ] || fail "Backup directory not found: ${src}" + + info "Restoring workspace to sandbox '${sandbox}' from ${src}..." + + local count=0 + for f in "${FILES[@]}"; do + if [ -f "${src}/${f}" ]; then + if openshell sandbox upload "$sandbox" "${src}/${f}" "${WORKSPACE_PATH}/"; then + count=$((count + 1)) + else + warn "Failed to restore ${f}" + fi fi - - local src="${BACKUP_BASE}/${ts}" - [ -d "$src" ] || fail "Backup directory not found: ${src}" - - info "Restoring workspace to sandbox '${sandbox}' from ${src}..." - - local count=0 - for f in "${FILES[@]}"; do - if [ -f "${src}/${f}" ]; then - if openshell sandbox upload "$sandbox" "${src}/${f}" "${WORKSPACE_PATH}/"; then - count=$((count + 1)) - else - warn "Failed to restore ${f}" - fi - fi - done - - for d in "${DIRS[@]}"; do - if [ -d "${src}/${d}" ]; then - if openshell sandbox upload "$sandbox" "${src}/${d}/" "${WORKSPACE_PATH}/${d}/"; then - count=$((count + 1)) - else - warn "Failed to restore ${d}/" - fi - fi - done - - if [ "$count" -eq 0 ]; then - fail "No files were restored. Check that the sandbox '${sandbox}' is running." + done + + for d in "${DIRS[@]}"; do + if [ -d "${src}/${d}" ]; then + if openshell sandbox upload "$sandbox" "${src}/${d}/" "${WORKSPACE_PATH}/${d}/"; then + count=$((count + 1)) + else + warn "Failed to restore ${d}/" + fi fi + done + + if [ "$count" -eq 0 ]; then + fail "No files were restored. Check that the sandbox '${sandbox}' is running." + fi - info "Restored ${count} items to sandbox '${sandbox}'." + info "Restored ${count} items to sandbox '${sandbox}'." } # --- Main --- @@ -125,7 +128,7 @@ sandbox="$2" shift 2 case "$action" in - backup) do_backup "$sandbox" ;; - restore) do_restore "$sandbox" "$@" ;; - *) usage ;; + backup) do_backup "$sandbox" ;; + restore) do_restore "$sandbox" "$@" ;; + *) usage ;; esac diff --git a/scripts/brev-setup.sh b/scripts/brev-setup.sh index a4b421577..cc8701ba9 100755 --- a/scripts/brev-setup.sh +++ b/scripts/brev-setup.sh @@ -23,7 +23,10 @@ NC='\033[0m' info() { echo -e "${GREEN}[brev]${NC} $1"; } warn() { echo -e "${YELLOW}[brev]${NC} $1"; } -fail() { echo -e "${RED}[brev]${NC} $1"; exit 1; } +fail() { + echo -e "${RED}[brev]${NC} $1" + exit 1 +} SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" @@ -34,20 +37,20 @@ export NEEDRESTART_MODE=a export DEBIAN_FRONTEND=noninteractive # --- 0. Node.js (needed for services) --- -if ! command -v node > /dev/null 2>&1; then +if ! command -v node >/dev/null 2>&1; then info "Installing Node.js..." - curl -fsSL https://deb.nodesource.com/setup_22.x | sudo -E bash - > /dev/null 2>&1 - sudo apt-get install -y -qq nodejs > /dev/null 2>&1 + curl -fsSL https://deb.nodesource.com/setup_22.x | sudo -E bash - >/dev/null 2>&1 + sudo apt-get install -y -qq nodejs >/dev/null 2>&1 info "Node.js $(node --version) installed" else info "Node.js already installed: $(node --version)" fi # --- 1. Docker --- -if ! command -v docker > /dev/null 2>&1; then +if ! command -v docker >/dev/null 2>&1; then info "Installing Docker..." - sudo apt-get update -qq > /dev/null 2>&1 - sudo apt-get install -y -qq docker.io > /dev/null 2>&1 + sudo apt-get update -qq >/dev/null 2>&1 + sudo apt-get install -y -qq docker.io >/dev/null 2>&1 sudo usermod -aG docker "$(whoami)" info "Docker installed" else @@ -55,17 +58,17 @@ else fi # --- 2. NVIDIA Container Toolkit (if GPU present) --- -if command -v nvidia-smi > /dev/null 2>&1; then - if ! dpkg -s nvidia-container-toolkit > /dev/null 2>&1; then +if command -v nvidia-smi >/dev/null 2>&1; then + if ! dpkg -s nvidia-container-toolkit >/dev/null 2>&1; then info "Installing NVIDIA Container Toolkit..." curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey \ | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \ | sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' \ - | sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list > /dev/null - sudo apt-get update -qq > /dev/null 2>&1 - sudo apt-get install -y -qq nvidia-container-toolkit > /dev/null 2>&1 - sudo nvidia-ctk runtime configure --runtime=docker > /dev/null 2>&1 + | sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list >/dev/null + sudo apt-get update -qq >/dev/null 2>&1 + sudo apt-get install -y -qq nvidia-container-toolkit >/dev/null 2>&1 + sudo nvidia-ctk runtime configure --runtime=docker >/dev/null 2>&1 sudo systemctl restart docker info "NVIDIA Container Toolkit installed" else @@ -74,16 +77,16 @@ if command -v nvidia-smi > /dev/null 2>&1; then fi # --- 3. openshell CLI (binary release, not pip) --- -if ! command -v openshell > /dev/null 2>&1; then +if ! command -v openshell >/dev/null 2>&1; then info "Installing openshell CLI from GitHub release..." - if ! command -v gh > /dev/null 2>&1; then - sudo apt-get update -qq > /dev/null 2>&1 - sudo apt-get install -y -qq gh > /dev/null 2>&1 + if ! command -v gh >/dev/null 2>&1; then + sudo apt-get update -qq >/dev/null 2>&1 + sudo apt-get install -y -qq gh >/dev/null 2>&1 fi ARCH="$(uname -m)" case "$ARCH" in - x86_64|amd64) ASSET="openshell-x86_64-unknown-linux-musl.tar.gz" ;; - aarch64|arm64) ASSET="openshell-aarch64-unknown-linux-musl.tar.gz" ;; + x86_64 | amd64) ASSET="openshell-x86_64-unknown-linux-musl.tar.gz" ;; + aarch64 | arm64) ASSET="openshell-aarch64-unknown-linux-musl.tar.gz" ;; *) fail "Unsupported architecture: $ARCH" ;; esac tmpdir="$(mktemp -d)" @@ -98,12 +101,12 @@ else fi # --- 3b. cloudflared (for public tunnel) --- -if ! command -v cloudflared > /dev/null 2>&1; then +if ! command -v cloudflared >/dev/null 2>&1; then info "Installing cloudflared..." CF_ARCH="$(uname -m)" case "$CF_ARCH" in - x86_64|amd64) CF_ARCH="amd64" ;; - aarch64|arm64) CF_ARCH="arm64" ;; + x86_64 | amd64) CF_ARCH="amd64" ;; + aarch64 | arm64) CF_ARCH="arm64" ;; *) fail "Unsupported architecture for cloudflared: $CF_ARCH" ;; esac tmpdir=$(mktemp -d) @@ -117,11 +120,11 @@ fi # --- 4. vLLM (local inference, if GPU present) --- VLLM_MODEL="nvidia/nemotron-3-nano-30b-a3b" -if command -v nvidia-smi > /dev/null 2>&1; then +if command -v nvidia-smi >/dev/null 2>&1; then if ! python3 -c "import vllm" 2>/dev/null; then info "Installing vLLM..." - if ! command -v pip3 > /dev/null 2>&1; then - sudo apt-get install -y -qq python3-pip > /dev/null 2>&1 + if ! command -v pip3 >/dev/null 2>&1; then + sudo apt-get install -y -qq python3-pip >/dev/null 2>&1 fi pip3 install --break-system-packages vllm 2>/dev/null || pip3 install vllm info "vLLM installed" @@ -130,7 +133,7 @@ if command -v nvidia-smi > /dev/null 2>&1; then fi # Start vLLM if not already running - if curl -s http://localhost:8000/v1/models > /dev/null 2>&1; then + if curl -s http://localhost:8000/v1/models >/dev/null 2>&1; then info "vLLM already running on :8000" elif python3 -c "import vllm" 2>/dev/null; then info "Starting vLLM with $VLLM_MODEL..." @@ -138,11 +141,11 @@ if command -v nvidia-smi > /dev/null 2>&1; then --model "$VLLM_MODEL" \ --port 8000 \ --host 0.0.0.0 \ - > /tmp/vllm-server.log 2>&1 & + >/tmp/vllm-server.log 2>&1 & VLLM_PID=$! info "Waiting for vLLM to load model (this can take a few minutes)..." for _ in $(seq 1 120); do - if curl -s http://localhost:8000/v1/models > /dev/null 2>&1; then + if curl -s http://localhost:8000/v1/models >/dev/null 2>&1; then info "vLLM ready (PID $VLLM_PID)" break fi diff --git a/scripts/check-spdx-headers.sh b/scripts/check-spdx-headers.sh index e67d0d49c..4f56378f8 100755 --- a/scripts/check-spdx-headers.sh +++ b/scripts/check-spdx-headers.sh @@ -13,11 +13,11 @@ LICENSE="SPDX-License-Identifier: Apache-2.0" failed=0 for file in "$@"; do file_head="$(head -n 5 -- "$file")" - if ! grep -Fq "$COPYRIGHT" <<< "$file_head"; then + if ! grep -Fq "$COPYRIGHT" <<<"$file_head"; then echo "Missing SPDX-FileCopyrightText: $file" failed=1 fi - if ! grep -Fq "$LICENSE" <<< "$file_head"; then + if ! grep -Fq "$LICENSE" <<<"$file_head"; then echo "Missing SPDX-License-Identifier: $file" failed=1 fi diff --git a/scripts/debug.sh b/scripts/debug.sh index 3fa2647e6..045f38fc9 100755 --- a/scripts/debug.sh +++ b/scripts/debug.sh @@ -26,9 +26,12 @@ RED='\033[0;31m' CYAN='\033[0;36m' NC='\033[0m' -info() { echo -e "${GREEN}[debug]${NC} $1"; } -warn() { echo -e "${YELLOW}[debug]${NC} $1"; } -fail() { echo -e "${RED}[debug]${NC} $1"; exit 1; } +info() { echo -e "${GREEN}[debug]${NC} $1"; } +warn() { echo -e "${YELLOW}[debug]${NC} $1"; } +fail() { + echo -e "${RED}[debug]${NC} $1" + exit 1 +} section() { echo -e "\n${CYAN}═══ $1 ═══${NC}\n"; } # ── Parse flags ────────────────────────────────────────────────── @@ -47,11 +50,11 @@ while [ $# -gt 0 ]; do QUICK=true shift ;; - --output|-o) + --output | -o) OUTPUT="${2:?--output requires a path}" shift 2 ;; - --help|-h) + --help | -h) cat <<'USAGE' Usage: scripts/debug.sh [OPTIONS] @@ -130,12 +133,12 @@ collect() { local rc=0 local tmpout="${outfile}.raw" if [ -n "$TIMEOUT_BIN" ]; then - "$TIMEOUT_BIN" 30 "$@" > "$tmpout" 2>&1 || rc=$? + "$TIMEOUT_BIN" 30 "$@" >"$tmpout" 2>&1 || rc=$? else - "$@" > "$tmpout" 2>&1 || rc=$? + "$@" >"$tmpout" 2>&1 || rc=$? fi - redact < "$tmpout" > "$outfile" + redact <"$tmpout" >"$outfile" rm -f "$tmpout" cat "$outfile" @@ -244,14 +247,14 @@ fi if command -v openshell &>/dev/null \ && openshell sandbox list 2>/dev/null \ - | awk 'NF { if (tolower($1) == "name") next; print $1 }' \ + | awk 'NF { if (tolower($1) == "name") next; print $1 }' \ | grep -Fxq -- "$SANDBOX_NAME"; then section "Sandbox Internals" # Build a temporary SSH config so we can run commands inside the sandbox. # This follows the pattern from OpenShell's own demo.sh. SANDBOX_SSH_CONFIG=$(mktemp "${TMPDIR_BASE}/nemoclaw-ssh-XXXXXX") - if openshell sandbox ssh-config "$SANDBOX_NAME" > "$SANDBOX_SSH_CONFIG" 2>/dev/null; then + if openshell sandbox ssh-config "$SANDBOX_NAME" >"$SANDBOX_SSH_CONFIG" 2>/dev/null; then SANDBOX_SSH_HOST="openshell-${SANDBOX_NAME}" SANDBOX_SSH_OPTS=(-F "$SANDBOX_SSH_CONFIG" -o StrictHostKeyChecking=no -o ConnectTimeout=10) @@ -322,4 +325,3 @@ fi echo "" info "Done. If filing a bug, run with --output and attach the tarball to your issue:" info " nemoclaw debug --output /tmp/nemoclaw-debug.tar.gz" - diff --git a/scripts/docs-to-skills.py b/scripts/docs-to-skills.py old mode 100644 new mode 100755 index 805b20791..5e8fed79d --- a/scripts/docs-to-skills.py +++ b/scripts/docs-to-skills.py @@ -61,6 +61,7 @@ # Frontmatter / doc parsing # --------------------------------------------------------------------------- + @dataclass class DocPage: """A single documentation page with parsed metadata and content.""" @@ -96,7 +97,7 @@ def parse_yaml_frontmatter(text: str) -> tuple[dict, str]: return {}, text fm_text = text[4:end].strip() - body = text[end + 4:].strip() + body = text[end + 4 :].strip() fm = _parse_simple_yaml(fm_text) return fm, body @@ -105,7 +106,6 @@ def _parse_simple_yaml(text: str) -> dict: """Minimal YAML parser for doc frontmatter. Handles nested keys, lists.""" result: dict = {} current_key: str | None = None - current_indent = 0 parent_stack: list[tuple[str, dict, int]] = [] for line in text.split("\n"): @@ -138,8 +138,11 @@ def _parse_simple_yaml(text: str) -> dict: target = _current_dict(result, parent_stack) if val.startswith("[") and val.endswith("]"): - items = [v.strip().strip('"').strip("'") - for v in val[1:-1].split(",") if v.strip()] + items = [ + v.strip().strip('"').strip("'") + for v in val[1:-1].split(",") + if v.strip() + ] target[key] = items current_key = key elif val: @@ -150,8 +153,6 @@ def _parse_simple_yaml(text: str) -> dict: parent_stack.append((key, target, indent)) current_key = None - current_indent = indent - return result @@ -218,6 +219,7 @@ def _extract_sections(body: str) -> list[tuple[str, str]]: # Content transformation # --------------------------------------------------------------------------- + def clean_myst_directives(text: str) -> str: """Convert MyST/Sphinx directives to standard markdown equivalents.""" # Multi-line {include} directives with :start-after: etc. @@ -250,8 +252,11 @@ def clean_myst_directives(text: str) -> str: def _format_admonition(title: str, body: str) -> str: """Format an admonition as a blockquote, stripping directive lines.""" - lines = [l for l in body.strip().split("\n") - if not re.match(r"^\s*:[a-z_-]+:", l)] + lines = [ + line + for line in body.strip().split("\n") + if not re.match(r"^\s*:[a-z_-]+:", line) + ] while lines and not lines[0].strip(): lines.pop(0) while lines and not lines[-1].strip(): @@ -315,9 +320,7 @@ def resolve_includes(text: str, source_dir: Path) -> str: Handles :start-after: and :end-before: markers for partial content extraction. Falls back to a placeholder when the file cannot be read. """ - pattern = re.compile( - r"```\{include\}\s*([^\n]+)\n((?::[^\n]+\n)*)```" - ) + pattern = re.compile(r"```\{include\}\s*([^\n]+)\n((?::[^\n]+\n)*)```") def _resolve(match: re.Match) -> str: raw_path = match.group(1).strip() @@ -328,9 +331,9 @@ def _resolve(match: re.Match) -> str: for line in directives.strip().split("\n"): line = line.strip() if line.startswith(":start-after:"): - start_after = line[len(":start-after:"):].strip() + start_after = line[len(":start-after:") :].strip() elif line.startswith(":end-before:"): - end_before = line[len(":end-before:"):].strip() + end_before = line[len(":end-before:") :].strip() resolved = (source_dir / raw_path).resolve() if not resolved.is_file(): @@ -344,7 +347,7 @@ def _resolve(match: re.Match) -> str: if start_after: idx = content.find(start_after) if idx != -1: - content = content[idx + len(start_after):] + content = content[idx + len(start_after) :] if end_before: idx = content.find(end_before) if idx != -1: @@ -441,8 +444,8 @@ def extract_related_skills(text: str) -> tuple[str, list[str]]: # Match H2 or H3 "Next Steps" / "Related Topics" sections and their content pattern = re.compile( r"^(#{2,3})\s+(Next Steps|Related Topics)\s*\n+" - r"(?:.*?\n)*?" # optional intro line - r"((?:- .+\n?)+)", # the bullet list + r"(?:.*?\n)*?" # optional intro line + r"((?:- .+\n?)+)", # the bullet list re.MULTILINE, ) @@ -479,7 +482,7 @@ def _safe_truncation_point(lines: list[str], target: int) -> int: """Find a safe truncation point that doesn't break code fences.""" in_fence = False last_safe = target - for i, line in enumerate(lines[:target + 20]): + for i, line in enumerate(lines[: target + 20]): if line.strip().startswith("```"): in_fence = not in_fence if i >= target and not in_fence: @@ -505,8 +508,25 @@ def extract_trigger_keywords(pages: list[DocPage]) -> list[str]: # Extract meaningful words from the title if page.title: title_words = re.sub(r"[^a-zA-Z\s]", "", page.title).lower().split() - stop_words = {"the", "a", "an", "and", "or", "for", "to", "in", "of", - "it", "how", "what", "with", "from", "by", "on", "is"} + stop_words = { + "the", + "a", + "an", + "and", + "or", + "for", + "to", + "in", + "of", + "it", + "how", + "what", + "with", + "from", + "by", + "on", + "is", + } title_words = [w for w in title_words if w not in stop_words and len(w) > 2] if len(title_words) >= 2: keywords.add(" ".join(title_words[:4])) @@ -575,14 +595,58 @@ def extract_trigger_keywords(pages: list[DocPage]) -> list[str]: "security": "security", } -NOUN_STOP = {"the", "a", "an", "and", "or", "for", "to", "in", "of", "it", - "how", "what", "with", "from", "by", "on", "is", "your", "that", - "this", "its", "use", "using", "at", "runtime", "activity", - "issues", "guide", "configuration", "settings", "options", - "models", "providers", "requests", "resources", "instances", - "debug", "troubleshoot", "fix", "check", "verify", "test", - "deny", "approve", "enable", "disable", "manage", "works", - "agent", "agents"} +NOUN_STOP = { + "the", + "a", + "an", + "and", + "or", + "for", + "to", + "in", + "of", + "it", + "how", + "what", + "with", + "from", + "by", + "on", + "is", + "your", + "that", + "this", + "its", + "use", + "using", + "at", + "runtime", + "activity", + "issues", + "guide", + "configuration", + "settings", + "options", + "models", + "providers", + "requests", + "resources", + "instances", + "debug", + "troubleshoot", + "fix", + "check", + "verify", + "test", + "deny", + "approve", + "enable", + "disable", + "manage", + "works", + "agent", + "agents", +} PROJECT_STOP = set() # Populated at runtime from --prefix @@ -603,14 +667,16 @@ def _extract_noun_from_title(title: str) -> str | None: # Strip the leading verb phrase for phrase in sorted(TITLE_VERBS, key=lambda x: -len(x)): if lower.startswith(phrase): - lower = lower[len(phrase):].strip() + lower = lower[len(phrase) :].strip() break # Strip everything after em-dash, en-dash, or colon (subtitle) lower = re.split(r"\s*[—–]\s*|\s*:\s*|\s*-{2,}\s*", lower)[0] words = re.sub(r"[^a-z\s]", "", lower).split() - nouns = [w for w in words if w not in NOUN_STOP and w not in PROJECT_STOP and len(w) > 2] + nouns = [ + w for w in words if w not in NOUN_STOP and w not in PROJECT_STOP and len(w) > 2 + ] if len(nouns) >= 2: return "-".join(nouns[:2]) @@ -681,7 +747,9 @@ def generate_skill_name( return name -def build_skill_description(name: str, pages: list[DocPage], keywords: list[str]) -> str: +def build_skill_description( + name: str, pages: list[DocPage], keywords: list[str] +) -> str: """Build the description field for the skill frontmatter. Best-practices compliance: @@ -736,8 +804,18 @@ def _to_third_person(sentence: str) -> str: return sentence _BASE_VERBS_ENDING_IN_S = { - "access", "process", "address", "discuss", "bypass", "express", - "compress", "assess", "stress", "progress", "focus", "canvas", + "access", + "process", + "address", + "discuss", + "bypass", + "express", + "compress", + "assess", + "stress", + "progress", + "focus", + "canvas", } if first_word.endswith("ing"): return first_word + trailing_punct + suffix @@ -745,7 +823,11 @@ def _to_third_person(sentence: str) -> str: return first_word + trailing_punct + suffix if first_word.endswith(("ch", "sh", "x", "ss", "zz")): return first_word + "es" + trailing_punct + suffix - if first_word.endswith("y") and len(first_word) > 1 and first_word[-2] not in "aeiou": + if ( + first_word.endswith("y") + and len(first_word) > 1 + and first_word[-2] not in "aeiou" + ): return first_word[:-1] + "ies" + trailing_punct + suffix return first_word + "s" + trailing_punct + suffix @@ -786,9 +868,15 @@ def _clean(text: str, source: DocPage) -> str: result = rewrite_doc_paths(result, source, docs_dir, doc_to_skill) return result - procedures = [p for p in pages if CONTENT_TYPE_ROLE.get(p.content_type) == "procedure"] - context_pages = [p for p in pages if CONTENT_TYPE_ROLE.get(p.content_type) == "context"] - reference_pages = [p for p in pages if CONTENT_TYPE_ROLE.get(p.content_type) == "reference"] + procedures = [ + p for p in pages if CONTENT_TYPE_ROLE.get(p.content_type) == "procedure" + ] + context_pages = [ + p for p in pages if CONTENT_TYPE_ROLE.get(p.content_type) == "context" + ] + reference_pages = [ + p for p in pages if CONTENT_TYPE_ROLE.get(p.content_type) == "reference" + ] # Pages without a recognized content_type default to procedure untyped = [p for p in pages if p.content_type not in CONTENT_TYPE_ROLE] @@ -822,7 +910,7 @@ def _clean(text: str, source: DocPage) -> str: body = _clean(cp.body, cp) h1_match = re.match(r"^#\s+.+\n+", body) if h1_match: - body = body[h1_match.end():] + body = body[h1_match.end() :] # Trim to keep SKILL.md concise; full content goes to references/ body_lines = body.split("\n") if len(body_lines) > 60: @@ -867,7 +955,7 @@ def _clean(text: str, source: DocPage) -> str: for idx, pp in enumerate(procedures): # When merging multiple docs, add a transition heading if len(procedures) > 1 and idx > 0 and pp.title: - lines.append(f"---") + lines.append("---") lines.append("") for heading, content in pp.sections: @@ -971,6 +1059,7 @@ def _clean(text: str, source: DocPage) -> str: # Grouping strategies # --------------------------------------------------------------------------- + def group_by_directory(pages: list[DocPage]) -> dict[str, list[DocPage]]: """Group pages by their parent directory.""" groups: dict[str, list[DocPage]] = {} @@ -1019,8 +1108,13 @@ def group_by_content_type(pages: list[DocPage]) -> dict[str, list[DocPage]]: # --------------------------------------------------------------------------- EXCLUDED_PATTERNS = { - "CONTRIBUTING.md", "README.md", "SETUP.md", "CHANGELOG.md", - "LICENSE.md", "license.md", "index.md", + "CONTRIBUTING.md", + "README.md", + "SETUP.md", + "CHANGELOG.md", + "LICENSE.md", + "license.md", + "index.md", } @@ -1051,6 +1145,7 @@ def scan_docs(docs_dir: Path) -> list[DocPage]: # CLI # --------------------------------------------------------------------------- + def main(): parser = argparse.ArgumentParser( description="Convert documentation files into Agent Skills.", @@ -1068,26 +1163,39 @@ def main(): %(prog)s docs/ output/ --strategy smart --dry-run """), ) - parser.add_argument("docs_dir", type=Path, help="Path to the documentation directory") - parser.add_argument("output_dir", type=Path, help="Output directory for generated skills") parser.add_argument( - "--strategy", choices=list(STRATEGIES.keys()), default="smart", + "docs_dir", type=Path, help="Path to the documentation directory" + ) + parser.add_argument( + "output_dir", type=Path, help="Output directory for generated skills" + ) + parser.add_argument( + "--strategy", + choices=list(STRATEGIES.keys()), + default="smart", help="Grouping strategy (default: smart)", ) parser.add_argument( - "--dry-run", action="store_true", + "--dry-run", + action="store_true", help="Show what would be generated without writing files", ) parser.add_argument( - "--prefix", default="", + "--prefix", + default="", help="Prefix for all skill names (e.g. 'nemoclaw')", ) parser.add_argument( - "--name-map", nargs="*", default=[], metavar="CAT=NAME", + "--name-map", + nargs="*", + default=[], + metavar="CAT=NAME", help="Override names: --name-map about=overview deployment=deploy-remote", ) parser.add_argument( - "--exclude", nargs="*", default=[], + "--exclude", + nargs="*", + default=[], help="Additional file patterns to exclude", ) @@ -1097,8 +1205,10 @@ def main(): name_overrides: dict[str, str] = {} for mapping in args.name_map: if "=" not in mapping: - print(f"Error: --name-map entries must be CAT=NAME, got '{mapping}'", - file=sys.stderr) + print( + f"Error: --name-map entries must be CAT=NAME, got '{mapping}'", + file=sys.stderr, + ) sys.exit(1) cat, _, nm = mapping.partition("=") name_overrides[cat.strip()] = nm.strip() @@ -1150,7 +1260,8 @@ def main(): skill_names: dict[str, str] = {} # group_name → skill_name for group_name, group_pages in sorted(groups.items()): sname = generate_skill_name( - group_name, group_pages, + group_name, + group_pages, prefix=args.prefix, name_overrides=name_overrides, ) @@ -1167,12 +1278,16 @@ def main(): pass # Generate skills - print(f"\n{'[DRY RUN] ' if args.dry_run else ''}Generating skills to {args.output_dir}/") + print( + f"\n{'[DRY RUN] ' if args.dry_run else ''}Generating skills to {args.output_dir}/" + ) summaries: list[dict] = [] for group_name, group_pages in sorted(groups.items()): name = skill_names[group_name] summary = generate_skill( - name, group_pages, args.output_dir, + name, + group_pages, + args.output_dir, docs_dir=docs_dir_resolved, doc_to_skill=doc_to_skill, dry_run=args.dry_run, @@ -1194,7 +1309,9 @@ def main(): warning = " ⚠ >500 lines" if lines > 500 else "" print(f" {s['name']:30s} {lines:4d} lines {refs} refs{warning}{status}") - print(f"\nTotal: {len(summaries)} skills, {total_lines} lines, {total_refs} reference files") + print( + f"\nTotal: {len(summaries)} skills, {total_lines} lines, {total_refs} reference files" + ) if any(s["skill_md_lines"] > 500 for s in summaries): print("\nNote: Skills over 500 lines should be trimmed. Move detailed") diff --git a/scripts/fix-coredns.sh b/scripts/fix-coredns.sh index 512ba851e..9b587ab33 100755 --- a/scripts/fix-coredns.sh +++ b/scripts/fix-coredns.sh @@ -57,11 +57,11 @@ fi echo "Patching CoreDNS to forward to $UPSTREAM_DNS..." -docker exec "$CLUSTER" kubectl patch configmap coredns -n kube-system --type merge -p "{\"data\":{\"Corefile\":\".:53 {\\n errors\\n health\\n ready\\n kubernetes cluster.local in-addr.arpa ip6.arpa {\\n pods insecure\\n fallthrough in-addr.arpa ip6.arpa\\n }\\n hosts /etc/coredns/NodeHosts {\\n ttl 60\\n reload 15s\\n fallthrough\\n }\\n prometheus :9153\\n cache 30\\n loop\\n reload\\n loadbalance\\n forward . $UPSTREAM_DNS\\n}\\n\"}}" > /dev/null +docker exec "$CLUSTER" kubectl patch configmap coredns -n kube-system --type merge -p "{\"data\":{\"Corefile\":\".:53 {\\n errors\\n health\\n ready\\n kubernetes cluster.local in-addr.arpa ip6.arpa {\\n pods insecure\\n fallthrough in-addr.arpa ip6.arpa\\n }\\n hosts /etc/coredns/NodeHosts {\\n ttl 60\\n reload 15s\\n fallthrough\\n }\\n prometheus :9153\\n cache 30\\n loop\\n reload\\n loadbalance\\n forward . $UPSTREAM_DNS\\n}\\n\"}}" >/dev/null -docker exec "$CLUSTER" kubectl rollout restart deploy/coredns -n kube-system > /dev/null +docker exec "$CLUSTER" kubectl rollout restart deploy/coredns -n kube-system >/dev/null echo "CoreDNS patched. Waiting for rollout..." -docker exec "$CLUSTER" kubectl rollout status deploy/coredns -n kube-system --timeout=30s > /dev/null +docker exec "$CLUSTER" kubectl rollout status deploy/coredns -n kube-system --timeout=30s >/dev/null echo "Done. DNS should resolve in ~10 seconds." diff --git a/scripts/install-openshell.sh b/scripts/install-openshell.sh index 1eeec7d2b..dbbeed409 100755 --- a/scripts/install-openshell.sh +++ b/scripts/install-openshell.sh @@ -40,8 +40,8 @@ version_gte() { # Returns 0 (true) if $1 >= $2 — portable, no sort -V (BSD compat) local IFS=. local -a a b - read -r -a a <<< "$1" - read -r -a b <<< "$2" + read -r -a a <<<"$1" + read -r -a b <<<"$2" for i in 0 1 2; do local ai=${a[$i]:-0} bi=${b[$i]:-0} if ((ai > bi)); then return 0; fi diff --git a/scripts/install.sh b/scripts/install.sh old mode 100644 new mode 100755 index 9e3dc1a2e..c75940232 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -14,9 +14,12 @@ GREEN='\033[0;32m' YELLOW='\033[1;33m' NC='\033[0m' -info() { echo -e "${GREEN}[install]${NC} $1"; } -warn() { echo -e "${YELLOW}[install]${NC} $1"; } -fail() { echo -e "${RED}[install]${NC} $1"; exit 1; } +info() { echo -e "${GREEN}[install]${NC} $1"; } +warn() { echo -e "${YELLOW}[install]${NC} $1"; } +fail() { + echo -e "${RED}[install]${NC} $1" + exit 1 +} define_runtime_helpers() { socket_exists() { @@ -37,8 +40,7 @@ define_runtime_helpers() { for socket_path in \ "$home_dir/.colima/default/docker.sock" \ - "$home_dir/.config/colima/default/docker.sock" - do + "$home_dir/.config/colima/default/docker.sock"; do if socket_exists "$socket_path"; then printf '%s\n' "$socket_path" return 0 @@ -115,7 +117,7 @@ refresh_path() { npm_bin="$(npm config get prefix 2>/dev/null)/bin" || true if [ -n "$npm_bin" ] && [ -d "$npm_bin" ]; then case ":$PATH:" in - *":$npm_bin:"*) ;; # already on PATH + *":$npm_bin:"*) ;; # already on PATH *) export PATH="$npm_bin:$PATH" ;; esac fi @@ -131,14 +133,14 @@ ARCH="$(uname -m)" case "$OS" in Darwin) OS_LABEL="macOS" ;; - Linux) OS_LABEL="Linux" ;; - *) fail "Unsupported OS: $OS" ;; + Linux) OS_LABEL="Linux" ;; + *) fail "Unsupported OS: $OS" ;; esac case "$ARCH" in - x86_64|amd64) ARCH_LABEL="x86_64" ;; - aarch64|arm64) ARCH_LABEL="aarch64" ;; - *) fail "Unsupported architecture: $ARCH" ;; + x86_64 | amd64) ARCH_LABEL="x86_64" ;; + aarch64 | arm64) ARCH_LABEL="aarch64" ;; + *) fail "Unsupported architecture: $ARCH" ;; esac info "Detected $OS_LABEL ($ARCH_LABEL)" @@ -148,16 +150,16 @@ info "Detected $OS_LABEL ($ARCH_LABEL)" NODE_MGR="none" NEED_RESHIM=false -if command -v asdf > /dev/null 2>&1 && asdf plugin list 2>/dev/null | grep -q nodejs; then +if command -v asdf >/dev/null 2>&1 && asdf plugin list 2>/dev/null | grep -q nodejs; then NODE_MGR="asdf" elif [ -n "${NVM_DIR:-}" ] && [ -s "${NVM_DIR}/nvm.sh" ]; then NODE_MGR="nvm" elif [ -s "$HOME/.nvm/nvm.sh" ]; then export NVM_DIR="$HOME/.nvm" NODE_MGR="nvm" -elif command -v fnm > /dev/null 2>&1; then +elif command -v fnm >/dev/null 2>&1; then NODE_MGR="fnm" -elif command -v brew > /dev/null 2>&1 && [ "$OS" = "Darwin" ]; then +elif command -v brew >/dev/null 2>&1 && [ "$OS" = "Darwin" ]; then NODE_MGR="brew" elif [ "$OS" = "Linux" ]; then NODE_MGR="nodesource" @@ -170,8 +172,8 @@ version_major() { } ensure_supported_runtime() { - command -v node > /dev/null 2>&1 || fail "${RUNTIME_REQUIREMENT_MSG} Node.js was not found on PATH." - command -v npm > /dev/null 2>&1 || fail "${RUNTIME_REQUIREMENT_MSG} npm was not found on PATH." + command -v node >/dev/null 2>&1 || fail "${RUNTIME_REQUIREMENT_MSG} Node.js was not found on PATH." + command -v npm >/dev/null 2>&1 || fail "${RUNTIME_REQUIREMENT_MSG} npm was not found on PATH." local node_version npm_version node_major npm_major node_version="$(node -v 2>/dev/null || true)" @@ -182,7 +184,7 @@ ensure_supported_runtime() { [[ "$node_major" =~ ^[0-9]+$ ]] || fail "Could not determine Node.js version from '${node_version}'. ${RUNTIME_REQUIREMENT_MSG}" [[ "$npm_major" =~ ^[0-9]+$ ]] || fail "Could not determine npm version from '${npm_version}'. ${RUNTIME_REQUIREMENT_MSG}" - if (( node_major < MIN_NODE_MAJOR || npm_major < MIN_NPM_MAJOR )); then + if ((node_major < MIN_NODE_MAJOR || npm_major < MIN_NPM_MAJOR)); then fail "Unsupported runtime detected: Node.js ${node_version:-unknown}, npm ${npm_version:-unknown}. ${RUNTIME_REQUIREMENT_MSG} Upgrade Node.js and rerun the installer." fi @@ -193,7 +195,7 @@ ensure_supported_runtime() { install_node() { local current_major="" - if command -v node > /dev/null 2>&1; then + if command -v node >/dev/null 2>&1; then current_major="$(node -v 2>/dev/null | sed 's/^v//' | cut -d. -f1)" fi @@ -230,8 +232,8 @@ install_node() { brew link --overwrite node@22 2>/dev/null || true ;; nodesource) - curl -fsSL https://deb.nodesource.com/setup_22.x | sudo -E bash - > /dev/null 2>&1 - sudo apt-get install -y -qq nodejs > /dev/null 2>&1 + curl -fsSL https://deb.nodesource.com/setup_22.x | sudo -E bash - >/dev/null 2>&1 + sudo apt-get install -y -qq nodejs >/dev/null 2>&1 ;; none) fail "No Node.js version manager found. Install Node.js 22 manually, then re-run." @@ -247,12 +249,12 @@ ensure_supported_runtime # ── Install Docker ─────────────────────────────────────────────── install_docker() { - if command -v docker > /dev/null 2>&1 && docker info > /dev/null 2>&1; then + if command -v docker >/dev/null 2>&1 && docker info >/dev/null 2>&1; then info "Docker already running" return 0 fi - if command -v docker > /dev/null 2>&1; then + if command -v docker >/dev/null 2>&1; then # Docker installed but not running if [ "$OS" = "Darwin" ]; then local colima_socket="" @@ -272,7 +274,7 @@ install_docker() { fail "Docker Desktop appears to be installed but is not running. Start Docker Desktop and re-run." fi - if command -v colima > /dev/null 2>&1; then + if command -v colima >/dev/null 2>&1; then info "Starting Colima..." colima start return 0 @@ -285,7 +287,7 @@ install_docker() { case "$OS" in Darwin) - if ! command -v brew > /dev/null 2>&1; then + if ! command -v brew >/dev/null 2>&1; then fail "Homebrew required to install Docker on macOS. Install from https://brew.sh" fi info "Installing Colima + Docker CLI via Homebrew..." @@ -294,14 +296,14 @@ install_docker() { colima start ;; Linux) - sudo apt-get update -qq > /dev/null 2>&1 - sudo apt-get install -y -qq docker.io > /dev/null 2>&1 + sudo apt-get update -qq >/dev/null 2>&1 + sudo apt-get install -y -qq docker.io >/dev/null 2>&1 sudo usermod -aG docker "$(whoami)" info "Docker installed. You may need to log out and back in for group changes." ;; esac - if ! docker info > /dev/null 2>&1; then + if ! docker info >/dev/null 2>&1; then fail "Docker installed but not running. Start Docker and re-run." fi @@ -313,7 +315,7 @@ install_docker # ── Install OpenShell CLI binary ───────────────────────────────── install_openshell() { - if command -v openshell > /dev/null 2>&1; then + if command -v openshell >/dev/null 2>&1; then info "openshell already installed: $(openshell --version 2>&1 || echo 'unknown')" return 0 fi @@ -323,20 +325,20 @@ install_openshell() { case "$OS" in Darwin) case "$ARCH_LABEL" in - x86_64) ASSET="openshell-x86_64-apple-darwin.tar.gz" ;; + x86_64) ASSET="openshell-x86_64-apple-darwin.tar.gz" ;; aarch64) ASSET="openshell-aarch64-apple-darwin.tar.gz" ;; esac ;; Linux) case "$ARCH_LABEL" in - x86_64) ASSET="openshell-x86_64-unknown-linux-musl.tar.gz" ;; + x86_64) ASSET="openshell-x86_64-unknown-linux-musl.tar.gz" ;; aarch64) ASSET="openshell-aarch64-unknown-linux-musl.tar.gz" ;; esac ;; esac tmpdir="$(mktemp -d)" - if command -v gh > /dev/null 2>&1; then + if command -v gh >/dev/null 2>&1; then GH_TOKEN="${GITHUB_TOKEN:-}" gh release download --repo NVIDIA/OpenShell \ --pattern "$ASSET" --dir "$tmpdir" else @@ -377,13 +379,12 @@ pre_extract_openclaw() { info "Pre-extracting openclaw@${openclaw_version} with system tar (GH-503 workaround)…" local tmpdir tmpdir="$(mktemp -d)" - if npm pack "openclaw@${openclaw_version}" --pack-destination "$tmpdir" > /dev/null 2>&1; then + if npm pack "openclaw@${openclaw_version}" --pack-destination "$tmpdir" >/dev/null 2>&1; then local tgz tgz="$(find "$tmpdir" -maxdepth 1 -name 'openclaw-*.tgz' -print -quit)" if [ -n "$tgz" ] && [ -f "$tgz" ]; then if mkdir -p "${install_dir}/node_modules/openclaw" \ - && tar xzf "$tgz" -C "${install_dir}/node_modules/openclaw" --strip-components=1 - then + && tar xzf "$tgz" -C "${install_dir}/node_modules/openclaw" --strip-components=1; then info "openclaw pre-extracted successfully" else warn "Failed to extract openclaw tarball" @@ -431,12 +432,12 @@ refresh_path # ── Verify ─────────────────────────────────────────────────────── -if ! command -v nemoclaw > /dev/null 2>&1; then +if ! command -v nemoclaw >/dev/null 2>&1; then # Try refreshing PATH one more time refresh_path fi -if ! command -v nemoclaw > /dev/null 2>&1; then +if ! command -v nemoclaw >/dev/null 2>&1; then npm_bin="$(npm config get prefix 2>/dev/null)/bin" || true if [ -n "$npm_bin" ] && [ -x "$npm_bin/nemoclaw" ]; then warn "nemoclaw installed at $npm_bin/nemoclaw but not on current PATH." diff --git a/scripts/lib/runtime.sh b/scripts/lib/runtime.sh old mode 100644 new mode 100755 index 3bf546847..a6bba65f2 --- a/scripts/lib/runtime.sh +++ b/scripts/lib/runtime.sh @@ -20,8 +20,7 @@ find_colima_docker_socket() { for socket_path in \ "$home_dir/.colima/default/docker.sock" \ - "$home_dir/.config/colima/default/docker.sock" - do + "$home_dir/.config/colima/default/docker.sock"; do if socket_exists "$socket_path"; then printf '%s\n' "$socket_path" return 0 @@ -69,7 +68,7 @@ docker_host_runtime() { local docker_host="${1:-${DOCKER_HOST:-}}" case "$docker_host" in - unix://*"/.colima/default/docker.sock"|unix://*"/.config/colima/default/docker.sock") + unix://*"/.colima/default/docker.sock" | unix://*"/.config/colima/default/docker.sock") printf 'colima\n' ;; unix://*"/.docker/run/docker.sock") @@ -89,7 +88,7 @@ infer_container_runtime_from_info() { local normalized normalized="$(printf '%s' "$info" | tr '[:upper:]' '[:lower:]')" - if [[ -z "${normalized// }" ]]; then + if [[ -z "${normalized// /}" ]]; then printf 'unknown\n' elif [[ "$normalized" == *podman* ]]; then printf 'podman\n' @@ -128,13 +127,13 @@ first_non_loopback_nameserver() { } get_colima_vm_nameserver() { - if ! command -v colima > /dev/null 2>&1; then + if ! command -v colima >/dev/null 2>&1; then return 1 fi local profile="${COLIMA_PROFILE:-default}" local resolv_conf - resolv_conf="$(colima ssh --profile "$profile" -- cat /etc/resolv.conf < /dev/null 2>/dev/null || true)" + resolv_conf="$(colima ssh --profile "$profile" -- cat /etc/resolv.conf /dev/null || true)" first_non_loopback_nameserver "$resolv_conf" } @@ -217,10 +216,10 @@ check_local_provider_health() { case "$provider" in vllm-local) - curl -sf http://localhost:8000/v1/models > /dev/null 2>&1 + curl -sf http://localhost:8000/v1/models >/dev/null 2>&1 ;; ollama-local) - curl -sf http://localhost:11434/api/tags > /dev/null 2>&1 + curl -sf http://localhost:11434/api/tags >/dev/null 2>&1 ;; *) return 1 diff --git a/scripts/nemoclaw-start.sh b/scripts/nemoclaw-start.sh index d28b96374..053506425 100755 --- a/scripts/nemoclaw-start.sh +++ b/scripts/nemoclaw-start.sh @@ -40,7 +40,8 @@ PYAUTH print_dashboard_urls() { local token chat_ui_base local_url remote_url - token="$(python3 - <<'PYTOKEN' + token="$( + python3 - <<'PYTOKEN' import json import os path = os.path.expanduser('~/.openclaw/openclaw.json') @@ -51,7 +52,7 @@ except Exception: else: print(cfg.get('gateway', {}).get('auth', {}).get('token', '')) PYTOKEN -)" + )" chat_ui_base="${CHAT_UI_URL%/}" local_url="http://127.0.0.1:${PUBLIC_PORT}/" @@ -66,7 +67,7 @@ PYTOKEN } start_auto_pair() { - nohup python3 - <<'PYAUTOPAIR' >> /tmp/gateway.log 2>&1 & + nohup python3 - <<'PYAUTOPAIR' >>/tmp/gateway.log 2>&1 & import json import subprocess import time @@ -136,7 +137,7 @@ if [ ${#NEMOCLAW_CMD[@]} -gt 0 ]; then exec "${NEMOCLAW_CMD[@]}" fi -nohup openclaw gateway run > /tmp/gateway.log 2>&1 & +nohup openclaw gateway run >/tmp/gateway.log 2>&1 & echo "[gateway] openclaw gateway launched (pid $!)" start_auto_pair print_dashboard_urls diff --git a/scripts/setup-spark.sh b/scripts/setup-spark.sh index 0cf5a20c1..9ab3ed80a 100755 --- a/scripts/setup-spark.sh +++ b/scripts/setup-spark.sh @@ -28,9 +28,10 @@ NC='\033[0m' info() { echo -e "${GREEN}>>>${NC} $1"; } warn() { echo -e "${YELLOW}>>>${NC} $1"; } -fail() { echo -e "${RED}>>>${NC} $1"; exit 1; } - -SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +fail() { + echo -e "${RED}>>>${NC} $1" + exit 1 +} # ── Pre-flight checks ───────────────────────────────────────────── @@ -48,7 +49,7 @@ if [ -z "$REAL_USER" ]; then warn "Could not detect non-root user. Docker group will not be configured." fi -command -v docker > /dev/null || fail "Docker not found. DGX Spark should have Docker pre-installed." +command -v docker >/dev/null || fail "Docker not found. DGX Spark should have Docker pre-installed." # ── 1. Docker group ─────────────────────────────────────────────── @@ -112,7 +113,7 @@ with open('$DAEMON_JSON', 'w') as f: else info "Creating Docker daemon config with cgroupns=host..." mkdir -p "$(dirname "$DAEMON_JSON")" - echo '{ "default-cgroupns-mode": "host" }' > "$DAEMON_JSON" + echo '{ "default-cgroupns-mode": "host" }' >"$DAEMON_JSON" NEEDS_RESTART=true fi @@ -123,7 +124,7 @@ if [ "$NEEDS_RESTART" = true ]; then systemctl restart docker # Wait for Docker to be ready for i in 1 2 3 4 5 6 7 8 9 10; do - if docker info > /dev/null 2>&1; then + if docker info >/dev/null 2>&1; then break fi [ "$i" -eq 10 ] && fail "Docker didn't come back after restart. Check 'systemctl status docker'." diff --git a/scripts/setup.sh b/scripts/setup.sh index 22b3ccfec..6aeb68085 100755 --- a/scripts/setup.sh +++ b/scripts/setup.sh @@ -36,7 +36,10 @@ REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" info() { echo -e "${GREEN}>>>${NC} $1"; } warn() { echo -e "${YELLOW}>>>${NC} $1"; } -fail() { echo -e "${RED}>>>${NC} $1"; exit 1; } +fail() { + echo -e "${RED}>>>${NC} $1" + exit 1 +} upsert_provider() { local name="$1" @@ -49,7 +52,7 @@ upsert_provider() { --config "$config" 2>&1 | grep -q "AlreadyExists"; then openshell provider update "$name" \ --credential "$credential" \ - --config "$config" > /dev/null + --config "$config" >/dev/null info "Updated $name provider" else info "Created $name provider" @@ -78,8 +81,8 @@ if docker_host="$(detect_docker_host)"; then fi # Check prerequisites -command -v openshell > /dev/null || fail "openshell CLI not found. Install the binary from https://github.com/NVIDIA/OpenShell/releases" -command -v docker > /dev/null || fail "docker not found" +command -v openshell >/dev/null || fail "openshell CLI not found. Install the binary from https://github.com/NVIDIA/OpenShell/releases" +command -v docker >/dev/null || fail "docker not found" [ -n "${NVIDIA_API_KEY:-}" ] || fail "NVIDIA_API_KEY not set. Get one from build.nvidia.com" CONTAINER_RUNTIME="$(infer_container_runtime_from_info "$(docker info 2>/dev/null || true)")" @@ -105,9 +108,9 @@ fi # 1. Gateway — always start fresh to avoid stale state info "Starting OpenShell gateway..." -openshell gateway destroy -g nemoclaw > /dev/null 2>&1 || true +openshell gateway destroy -g nemoclaw >/dev/null 2>&1 || true GATEWAY_ARGS=(--name nemoclaw) -command -v nvidia-smi > /dev/null 2>&1 && GATEWAY_ARGS+=(--gpu) +command -v nvidia-smi >/dev/null 2>&1 && GATEWAY_ARGS+=(--gpu) openshell gateway start "${GATEWAY_ARGS[@]}" 2>&1 | grep -E "Gateway|✓|Error|error" || true # Verify gateway is actually healthy (may need a moment after start) @@ -148,15 +151,15 @@ fi # 4a. Ollama (macOS local inference) if [ "$(uname -s)" = "Darwin" ]; then - if ! command -v ollama > /dev/null 2>&1; then + if ! command -v ollama >/dev/null 2>&1; then info "Installing Ollama..." brew install ollama 2>/dev/null || warn "Ollama install failed (brew required). Install manually: https://ollama.com" fi - if command -v ollama > /dev/null 2>&1; then + if command -v ollama >/dev/null 2>&1; then # Start Ollama service if not running if ! check_local_provider_health "ollama-local"; then info "Starting Ollama service..." - OLLAMA_HOST=0.0.0.0:11434 ollama serve > /dev/null 2>&1 & + OLLAMA_HOST=0.0.0.0:11434 ollama serve >/dev/null 2>&1 & sleep 2 fi OLLAMA_LOCAL_BASE_URL="$(get_local_provider_base_url "ollama-local")" @@ -170,11 +173,11 @@ fi # 4b. Inference route — default to nvidia-nim info "Setting inference route to nvidia-nim / Nemotron 3 Super..." -openshell inference set --no-verify --provider nvidia-nim --model nvidia/nemotron-3-super-120b-a12b > /dev/null 2>&1 +openshell inference set --no-verify --provider nvidia-nim --model nvidia/nemotron-3-super-120b-a12b >/dev/null 2>&1 # 5. Build and create sandbox info "Deleting old ${SANDBOX_NAME} sandbox (if any)..." -openshell sandbox delete "$SANDBOX_NAME" > /dev/null 2>&1 || true +openshell sandbox delete "$SANDBOX_NAME" >/dev/null 2>&1 || true info "Building and creating NemoClaw sandbox (this takes a few minutes on first run)..." @@ -192,7 +195,7 @@ CREATE_LOG=$(mktemp /tmp/nemoclaw-create-XXXXXX.log) set +e openshell sandbox create --from "$BUILD_CTX/Dockerfile" --name "$SANDBOX_NAME" \ --provider nvidia-nim \ - -- env NVIDIA_API_KEY="$NVIDIA_API_KEY" > "$CREATE_LOG" 2>&1 + -- env NVIDIA_API_KEY="$NVIDIA_API_KEY" >"$CREATE_LOG" 2>&1 CREATE_RC=$? set -e rm -rf "$BUILD_CTX" diff --git a/scripts/smoke-macos-install.sh b/scripts/smoke-macos-install.sh old mode 100644 new mode 100755 index 443ef86a9..9dfd3d0ac --- a/scripts/smoke-macos-install.sh +++ b/scripts/smoke-macos-install.sh @@ -14,7 +14,10 @@ NC='\033[0m' info() { echo -e "${GREEN}[smoke]${NC} $1"; } warn() { echo -e "${YELLOW}[smoke]${NC} $1"; } -fail() { echo -e "${RED}[smoke]${NC} $1"; exit 1; } +fail() { + echo -e "${RED}[smoke]${NC} $1" + exit 1 +} SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" @@ -97,7 +100,7 @@ while [ $# -gt 0 ]; do DELETE_MODELS=true shift ;; - -h|--help) + -h | --help) usage exit 0 ;; @@ -151,7 +154,7 @@ ensure_clean_start() { fail "Existing NemoClaw/OpenShell state detected. Re-run with --allow-existing-state if you really want to test on this machine." fi - if command -v openshell > /dev/null 2>&1; then + if command -v openshell >/dev/null 2>&1; then if openshell sandbox list 2>/dev/null | grep -Eq '[[:alnum:]]'; then fail "Existing OpenShell sandboxes detected. Re-run with --allow-existing-state only if you are prepared for uninstall.sh to remove them." fi @@ -173,12 +176,12 @@ feed_install_answers() { done printf 'n\n' - ) > "$answers_pipe" + ) >"$answers_pipe" } start_log_follow() { local logfile="$1" - : > "$logfile" + : >"$logfile" tail -n +1 -f "$logfile" & LOG_FOLLOW_PID=$! } @@ -198,7 +201,7 @@ run_install() { ANSWER_WRITER_PID=$! start_log_follow "$INSTALL_LOG" set +e - bash "$REPO_DIR/install.sh" < "$answers_pipe" >> "$INSTALL_LOG" 2>&1 + bash "$REPO_DIR/install.sh" <"$answers_pipe" >>"$INSTALL_LOG" 2>&1 INSTALL_STATUS=$? set -e stop_log_follow @@ -218,7 +221,7 @@ run_uninstall() { info "Running uninstall.sh for cleanup" start_log_follow "$UNINSTALL_LOG" set +e - bash "$REPO_DIR/uninstall.sh" "${args[@]}" >> "$UNINSTALL_LOG" 2>&1 + bash "$REPO_DIR/uninstall.sh" "${args[@]}" >>"$UNINSTALL_LOG" 2>&1 UNINSTALL_STATUS=$? set -e stop_log_follow @@ -233,7 +236,7 @@ verify_cleanup() { leftovers=1 fi - if command -v openshell > /dev/null 2>&1; then + if command -v openshell >/dev/null 2>&1; then local sandbox_output sandbox_output="$(openshell sandbox list 2>/dev/null || true)" if printf '%s' "$sandbox_output" | grep -Eq '[[:alnum:]]'; then @@ -242,7 +245,7 @@ verify_cleanup() { fi fi - if command -v docker > /dev/null 2>&1 && docker info > /dev/null 2>&1; then + if command -v docker >/dev/null 2>&1 && docker info >/dev/null 2>&1; then local related_containers related_containers="$( docker ps -a --format '{{.Image}} {{.Names}}' 2>/dev/null \ diff --git a/scripts/start-services.sh b/scripts/start-services.sh index cbce0f183..303caf696 100755 --- a/scripts/start-services.sh +++ b/scripts/start-services.sh @@ -49,9 +49,12 @@ RED='\033[0;31m' YELLOW='\033[1;33m' NC='\033[0m' -info() { echo -e "${GREEN}[services]${NC} $1"; } -warn() { echo -e "${YELLOW}[services]${NC} $1"; } -fail() { echo -e "${RED}[services]${NC} $1"; exit 1; } +info() { echo -e "${GREEN}[services]${NC} $1"; } +warn() { echo -e "${YELLOW}[services]${NC} $1"; } +fail() { + echo -e "${RED}[services]${NC} $1" + exit 1 +} is_running() { local pidfile="$PIDDIR/$1.pid" @@ -68,8 +71,8 @@ start_service() { info "$name already running (PID $(cat "$PIDDIR/$name.pid"))" return 0 fi - nohup "$@" > "$PIDDIR/$name.log" 2>&1 & - echo $! > "$PIDDIR/$name.pid" + nohup "$@" >"$PIDDIR/$name.log" 2>&1 & + echo $! >"$PIDDIR/$name.pid" info "$name started (PID $!)" } @@ -127,10 +130,10 @@ do_start() { warn "Create a bot via @BotFather on Telegram and set the token." fi - command -v node > /dev/null || fail "node not found. Install Node.js first." + command -v node >/dev/null || fail "node not found. Install Node.js first." # Verify sandbox is running - if command -v openshell > /dev/null 2>&1; then + if command -v openshell >/dev/null 2>&1; then if ! openshell sandbox list 2>&1 | grep -q "Ready"; then warn "No sandbox in Ready state. Telegram bridge may not work until sandbox is running." fi @@ -145,7 +148,7 @@ do_start() { fi # 3. cloudflared tunnel - if command -v cloudflared > /dev/null 2>&1; then + if command -v cloudflared >/dev/null 2>&1; then start_service cloudflared \ cloudflared tunnel --url "http://localhost:$DASHBOARD_PORT" else @@ -194,7 +197,7 @@ do_start() { # Dispatch case "$ACTION" in - stop) do_stop ;; + stop) do_stop ;; status) show_status ;; - start) do_start ;; + start) do_start ;; esac diff --git a/scripts/test-inference-local.sh b/scripts/test-inference-local.sh index 93aea3625..9fddf8ec9 100755 --- a/scripts/test-inference-local.sh +++ b/scripts/test-inference-local.sh @@ -1,4 +1,9 @@ #!/usr/bin/env bash +# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + # Test inference.local routing through OpenShell provider (local vLLM) -echo '{"model":"nvidia/nemotron-3-nano-30b-a3b","messages":[{"role":"user","content":"say hello"}]}' > /tmp/req.json -curl -s https://inference.local/v1/chat/completions -H "Content-Type: application/json" -d @/tmp/req.json +TMPFILE=$(mktemp) +trap 'rm -f "$TMPFILE"' EXIT +echo '{"model":"nvidia/nemotron-3-nano-30b-a3b","messages":[{"role":"user","content":"say hello"}]}' >"$TMPFILE" +curl -s https://inference.local/v1/chat/completions -H "Content-Type: application/json" -d @"$TMPFILE" diff --git a/scripts/test-inference.sh b/scripts/test-inference.sh index cbe599f95..03b0f3300 100755 --- a/scripts/test-inference.sh +++ b/scripts/test-inference.sh @@ -1,4 +1,9 @@ #!/usr/bin/env bash +# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + # Test inference.local routing through OpenShell provider -echo '{"model":"nvidia/nemotron-3-super-120b-a12b","messages":[{"role":"user","content":"say hello"}]}' > /tmp/req.json -curl -s https://inference.local/v1/chat/completions -H "Content-Type: application/json" -d @/tmp/req.json +TMPFILE=$(mktemp) +trap 'rm -f "$TMPFILE"' EXIT +echo '{"model":"nvidia/nemotron-3-super-120b-a12b","messages":[{"role":"user","content":"say hello"}]}' >"$TMPFILE" +curl -s https://inference.local/v1/chat/completions -H "Content-Type: application/json" -d @"$TMPFILE" diff --git a/scripts/walkthrough.sh b/scripts/walkthrough.sh index b50e24b06..1acda809b 100755 --- a/scripts/walkthrough.sh +++ b/scripts/walkthrough.sh @@ -43,7 +43,10 @@ set -euo pipefail -[ -n "${NVIDIA_API_KEY:-}" ] || { echo "NVIDIA_API_KEY required"; exit 1; } +[ -n "${NVIDIA_API_KEY:-}" ] || { + echo "NVIDIA_API_KEY required" + exit 1 +} echo "" echo " ┌─────────────────────────────────────────────────────┐" @@ -61,7 +64,7 @@ echo " │ \"Install requests and get the top HN story\" │" echo " └─────────────────────────────────────────────────────┘" echo "" -if ! command -v tmux > /dev/null 2>&1; then +if ! command -v tmux >/dev/null 2>&1; then echo "tmux not found. Run these in two separate terminals:" echo "" echo " Terminal 1 (TUI):" @@ -69,7 +72,7 @@ if ! command -v tmux > /dev/null 2>&1; then echo "" echo " Terminal 2 (Agent):" echo " openshell sandbox connect nemoclaw" - echo " export NVIDIA_API_KEY=$NVIDIA_API_KEY" + echo ' export NVIDIA_API_KEY=' echo " nemoclaw-start" echo " openclaw agent --agent main --local --session-id live" exit 0 diff --git a/test/Dockerfile.sandbox b/test/Dockerfile.sandbox index cf0ef49a9..7552e041b 100644 --- a/test/Dockerfile.sandbox +++ b/test/Dockerfile.sandbox @@ -1,3 +1,4 @@ +# hadolint global ignore=DL3008,DL3013,DL3042,DL3059,DL4006,SC2038 # Lightweight test sandbox for NemoClaw E2E testing # Simulates the OpenClaw-in-OpenShell environment without requiring # the full NVIDIA base image or openshell CLI diff --git a/test/e2e-test.sh b/test/e2e-test.sh index 2ec70331b..cf20c4e29 100755 --- a/test/e2e-test.sh +++ b/test/e2e-test.sh @@ -13,7 +13,10 @@ YELLOW='\033[1;33m' NC='\033[0m' pass() { echo -e "${GREEN}PASS${NC}: $1"; } -fail() { echo -e "${RED}FAIL${NC}: $1"; exit 1; } +fail() { + echo -e "${RED}FAIL${NC}: $1" + exit 1 +} info() { echo -e "${YELLOW}TEST${NC}: $1"; } # ------------------------------------------------------- @@ -25,12 +28,12 @@ openclaw --version && pass "OpenClaw CLI installed" || fail "OpenClaw CLI not fo info "2. Verify plugin can be installed" # ------------------------------------------------------- openclaw plugins install /opt/nemoclaw 2>&1 && pass "Plugin installed" || { - # If plugins install isn't available, verify the built artifacts exist - if [ -f /opt/nemoclaw/dist/index.js ]; then - pass "Plugin built successfully (dist/index.js exists)" - else - fail "Plugin build artifacts missing" - fi + # If plugins install isn't available, verify the built artifacts exist + if [ -f /opt/nemoclaw/dist/index.js ]; then + pass "Plugin built successfully (dist/index.js exists)" + else + fail "Plugin build artifacts missing" + fi } # ------------------------------------------------------- diff --git a/test/e2e/Dockerfile.full-e2e b/test/e2e/Dockerfile.full-e2e index e514c79af..2f45a0b88 100644 --- a/test/e2e/Dockerfile.full-e2e +++ b/test/e2e/Dockerfile.full-e2e @@ -1,3 +1,4 @@ +# hadolint global ignore=DL3008,DL4006,SC2086 FROM ubuntu:24.04 ENV DEBIAN_FRONTEND=noninteractive diff --git a/test/e2e/test-double-onboard.sh b/test/e2e/test-double-onboard.sh index 7ebdddcd5..13dee9920 100755 --- a/test/e2e/test-double-onboard.sh +++ b/test/e2e/test-double-onboard.sh @@ -1,4 +1,7 @@ #!/bin/bash +# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + # Double onboard: verify that consecutive `nemoclaw onboard` runs recover # automatically from stale state (gateway, port forward, registry entries) # left behind by a previous run. @@ -26,11 +29,26 @@ FAIL=0 SKIP=0 TOTAL=0 -pass() { ((PASS++)); ((TOTAL++)); printf '\033[32m PASS: %s\033[0m\n' "$1"; } -fail() { ((FAIL++)); ((TOTAL++)); printf '\033[31m FAIL: %s\033[0m\n' "$1"; } -skip() { ((SKIP++)); ((TOTAL++)); printf '\033[33m SKIP: %s\033[0m\n' "$1"; } -section() { echo ""; printf '\033[1;36m=== %s ===\033[0m\n' "$1"; } -info() { printf '\033[1;34m [info]\033[0m %s\n' "$1"; } +pass() { + ((PASS++)) + ((TOTAL++)) + printf '\033[32m PASS: %s\033[0m\n' "$1" +} +fail() { + ((FAIL++)) + ((TOTAL++)) + printf '\033[31m FAIL: %s\033[0m\n' "$1" +} +skip() { + ((SKIP++)) + ((TOTAL++)) + printf '\033[33m SKIP: %s\033[0m\n' "$1" +} +section() { + echo "" + printf '\033[1;36m=== %s ===\033[0m\n' "$1" +} +info() { printf '\033[1;34m [info]\033[0m %s\n' "$1"; } SANDBOX_A="e2e-double-a" SANDBOX_B="e2e-double-b" @@ -45,7 +63,7 @@ info "Destroying any leftover test sandboxes/gateway from previous runs..." # the nemoclaw registry at ~/.nemoclaw/sandboxes.json. Stale registry # entries from a previous run would cause Phase 2 to exit with # "Sandbox already exists" before the test even starts. -if command -v nemoclaw > /dev/null 2>&1; then +if command -v nemoclaw >/dev/null 2>&1; then nemoclaw "$SANDBOX_A" destroy 2>/dev/null || true nemoclaw "$SANDBOX_B" destroy 2>/dev/null || true fi @@ -60,21 +78,21 @@ pass "Pre-cleanup complete" # ══════════════════════════════════════════════════════════════════ section "Phase 1: Prerequisites" -if docker info > /dev/null 2>&1; then +if docker info >/dev/null 2>&1; then pass "Docker is running" else fail "Docker is not running — cannot continue" exit 1 fi -if command -v openshell > /dev/null 2>&1; then +if command -v openshell >/dev/null 2>&1; then pass "openshell CLI installed" else fail "openshell CLI not found — cannot continue" exit 1 fi -if command -v nemoclaw > /dev/null 2>&1; then +if command -v nemoclaw >/dev/null 2>&1; then pass "nemoclaw CLI installed" else fail "nemoclaw CLI not found — cannot continue" @@ -99,7 +117,7 @@ ONBOARD_LOG="$(mktemp)" NEMOCLAW_NON_INTERACTIVE=1 \ NEMOCLAW_SANDBOX_NAME="$SANDBOX_A" \ NEMOCLAW_POLICY_MODE=skip \ - nemoclaw onboard --non-interactive > "$ONBOARD_LOG" 2>&1 + nemoclaw onboard --non-interactive >"$ONBOARD_LOG" 2>&1 exit1=$? output1="$(cat "$ONBOARD_LOG")" rm -f "$ONBOARD_LOG" @@ -119,7 +137,7 @@ openshell gateway info -g nemoclaw 2>/dev/null | grep -q "nemoclaw" \ && pass "Gateway is still running (stale state)" \ || fail "Gateway is not running after first onboard" -openshell sandbox get "$SANDBOX_A" > /dev/null 2>&1 \ +openshell sandbox get "$SANDBOX_A" >/dev/null 2>&1 \ && pass "Sandbox '$SANDBOX_A' exists in openshell" \ || fail "Sandbox '$SANDBOX_A' not found in openshell" @@ -140,7 +158,7 @@ NEMOCLAW_NON_INTERACTIVE=1 \ NEMOCLAW_SANDBOX_NAME="$SANDBOX_A" \ NEMOCLAW_RECREATE_SANDBOX=1 \ NEMOCLAW_POLICY_MODE=skip \ - nemoclaw onboard --non-interactive > "$ONBOARD_LOG" 2>&1 + nemoclaw onboard --non-interactive >"$ONBOARD_LOG" 2>&1 exit2=$? output2="$(cat "$ONBOARD_LOG")" rm -f "$ONBOARD_LOG" @@ -182,7 +200,7 @@ ONBOARD_LOG="$(mktemp)" NEMOCLAW_NON_INTERACTIVE=1 \ NEMOCLAW_SANDBOX_NAME="$SANDBOX_B" \ NEMOCLAW_POLICY_MODE=skip \ - nemoclaw onboard --non-interactive > "$ONBOARD_LOG" 2>&1 + nemoclaw onboard --non-interactive >"$ONBOARD_LOG" 2>&1 exit3=$? output3="$(cat "$ONBOARD_LOG")" rm -f "$ONBOARD_LOG" @@ -221,11 +239,11 @@ openshell sandbox delete "$SANDBOX_B" 2>/dev/null || true openshell forward stop 18789 2>/dev/null || true openshell gateway destroy -g nemoclaw 2>/dev/null || true -openshell sandbox get "$SANDBOX_A" > /dev/null 2>&1 \ +openshell sandbox get "$SANDBOX_A" >/dev/null 2>&1 \ && fail "Sandbox '$SANDBOX_A' still exists after cleanup" \ || pass "Sandbox '$SANDBOX_A' cleaned up" -openshell sandbox get "$SANDBOX_B" > /dev/null 2>&1 \ +openshell sandbox get "$SANDBOX_B" >/dev/null 2>&1 \ && fail "Sandbox '$SANDBOX_B' still exists after cleanup" \ || pass "Sandbox '$SANDBOX_B' cleaned up" diff --git a/test/e2e/test-full-e2e.sh b/test/e2e/test-full-e2e.sh old mode 100644 new mode 100755 index 5441ca684..6e7b6f33a --- a/test/e2e/test-full-e2e.sh +++ b/test/e2e/test-full-e2e.sh @@ -1,4 +1,7 @@ #!/bin/bash +# SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + # Full E2E: install → onboard → verify inference (REAL services, no mocks) # # Proves the COMPLETE user journey including real inference against @@ -28,11 +31,26 @@ FAIL=0 SKIP=0 TOTAL=0 -pass() { ((PASS++)); ((TOTAL++)); printf '\033[32m PASS: %s\033[0m\n' "$1"; } -fail() { ((FAIL++)); ((TOTAL++)); printf '\033[31m FAIL: %s\033[0m\n' "$1"; } -skip() { ((SKIP++)); ((TOTAL++)); printf '\033[33m SKIP: %s\033[0m\n' "$1"; } -section() { echo ""; printf '\033[1;36m=== %s ===\033[0m\n' "$1"; } -info() { printf '\033[1;34m [info]\033[0m %s\n' "$1"; } +pass() { + ((PASS++)) + ((TOTAL++)) + printf '\033[32m PASS: %s\033[0m\n' "$1" +} +fail() { + ((FAIL++)) + ((TOTAL++)) + printf '\033[31m FAIL: %s\033[0m\n' "$1" +} +skip() { + ((SKIP++)) + ((TOTAL++)) + printf '\033[33m SKIP: %s\033[0m\n' "$1" +} +section() { + echo "" + printf '\033[1;36m=== %s ===\033[0m\n' "$1" +} +info() { printf '\033[1;34m [info]\033[0m %s\n' "$1"; } # Parse chat completion response — handles both content and reasoning_content # (nemotron-3-super is a reasoning model that may put output in reasoning_content) @@ -67,10 +85,10 @@ SANDBOX_NAME="${NEMOCLAW_SANDBOX_NAME:-e2e-nightly}" # ══════════════════════════════════════════════════════════════════ section "Phase 0: Pre-cleanup" info "Destroying any leftover sandbox/gateway from previous runs..." -if command -v nemoclaw > /dev/null 2>&1; then +if command -v nemoclaw >/dev/null 2>&1; then nemoclaw "$SANDBOX_NAME" destroy 2>/dev/null || true fi -if command -v openshell > /dev/null 2>&1; then +if command -v openshell >/dev/null 2>&1; then openshell sandbox delete "$SANDBOX_NAME" 2>/dev/null || true openshell gateway destroy -g nemoclaw 2>/dev/null || true fi @@ -81,7 +99,7 @@ pass "Pre-cleanup complete" # ══════════════════════════════════════════════════════════════════ section "Phase 1: Prerequisites" -if docker info > /dev/null 2>&1; then +if docker info >/dev/null 2>&1; then pass "Docker is running" else fail "Docker is not running — cannot continue" @@ -95,7 +113,7 @@ else exit 1 fi -if curl -sf --max-time 10 https://integrate.api.nvidia.com/v1/models > /dev/null 2>&1; then +if curl -sf --max-time 10 https://integrate.api.nvidia.com/v1/models >/dev/null 2>&1; then pass "Network access to integrate.api.nvidia.com" else fail "Cannot reach integrate.api.nvidia.com" @@ -112,7 +130,10 @@ fi # ══════════════════════════════════════════════════════════════════ section "Phase 2: Install nemoclaw (non-interactive mode)" -cd "$REPO" || { fail "Could not cd to repo root: $REPO"; exit 1; } +cd "$REPO" || { + fail "Could not cd to repo root: $REPO" + exit 1 +} info "Running install.sh --non-interactive..." info "This installs Node.js, openshell, NemoClaw, and runs onboard." @@ -122,7 +143,7 @@ INSTALL_LOG="/tmp/nemoclaw-e2e-install.log" # Write to a file instead of piping through tee. openshell's background # port-forward inherits pipe file descriptors, which prevents tee from exiting. # Use tail -f in the background for real-time output in CI logs. -bash install.sh --non-interactive > "$INSTALL_LOG" 2>&1 & +bash install.sh --non-interactive >"$INSTALL_LOG" 2>&1 & install_pid=$! tail -f "$INSTALL_LOG" --pid=$install_pid 2>/dev/null & tail_pid=$! @@ -151,7 +172,7 @@ else fi # Verify nemoclaw is on PATH -if command -v nemoclaw > /dev/null 2>&1; then +if command -v nemoclaw >/dev/null 2>&1; then pass "nemoclaw installed at $(command -v nemoclaw)" else fail "nemoclaw not found on PATH after install" @@ -159,14 +180,14 @@ else fi # Verify openshell was installed -if command -v openshell > /dev/null 2>&1; then +if command -v openshell >/dev/null 2>&1; then pass "openshell installed ($(openshell --version 2>&1 || echo unknown))" else fail "openshell not found on PATH after install" exit 1 fi -nemoclaw --help > /dev/null 2>&1 \ +nemoclaw --help >/dev/null 2>&1 \ && pass "nemoclaw --help exits 0" \ || fail "nemoclaw --help failed" @@ -249,11 +270,11 @@ info "[LIVE] Sandbox inference test → user → sandbox → gateway → NVIDIA ssh_config="$(mktemp)" sandbox_response="" -if openshell sandbox ssh-config "$SANDBOX_NAME" > "$ssh_config" 2>/dev/null; then +if openshell sandbox ssh-config "$SANDBOX_NAME" >"$ssh_config" 2>/dev/null; then # Use timeout if available (Linux, Homebrew), fall back to plain ssh TIMEOUT_CMD="" - command -v timeout > /dev/null 2>&1 && TIMEOUT_CMD="timeout 90" - command -v gtimeout > /dev/null 2>&1 && TIMEOUT_CMD="gtimeout 90" + command -v timeout >/dev/null 2>&1 && TIMEOUT_CMD="timeout 90" + command -v gtimeout >/dev/null 2>&1 && TIMEOUT_CMD="gtimeout 90" sandbox_response=$($TIMEOUT_CMD ssh -F "$ssh_config" \ -o StrictHostKeyChecking=no \ -o UserKnownHostsFile=/dev/null \ @@ -263,7 +284,7 @@ if openshell sandbox ssh-config "$SANDBOX_NAME" > "$ssh_config" 2>/dev/null; the "curl -s --max-time 60 https://inference.local/v1/chat/completions \ -H 'Content-Type: application/json' \ -d '{\"model\":\"nvidia/nemotron-3-super-120b-a12b\",\"messages\":[{\"role\":\"user\",\"content\":\"Reply with exactly one word: PONG\"}],\"max_tokens\":100}'" \ - 2>&1) || true + 2>&1) || true fi rm -f "$ssh_config" diff --git a/uninstall.sh b/uninstall.sh index 60d056739..4ad94911c 100755 --- a/uninstall.sh +++ b/uninstall.sh @@ -21,9 +21,9 @@ set -euo pipefail # --------------------------------------------------------------------------- if [[ -z "${NO_COLOR:-}" && -t 1 ]]; then if [[ "${COLORTERM:-}" == "truecolor" || "${COLORTERM:-}" == "24bit" ]]; then - C_GREEN=$'\033[38;2;118;185;0m' # #76B900 — exact NVIDIA green + C_GREEN=$'\033[38;2;118;185;0m' # #76B900 — exact NVIDIA green else - C_GREEN=$'\033[38;5;148m' # closest 256-color on dark backgrounds + C_GREEN=$'\033[38;5;148m' # closest 256-color on dark backgrounds fi C_BOLD=$'\033[1m' C_DIM=$'\033[2m' @@ -36,12 +36,16 @@ fi info() { printf "${C_GREEN}[uninstall]${C_RESET} %s\n" "$*"; } warn() { printf "${C_YELLOW}[uninstall]${C_RESET} %s\n" "$*"; } -fail() { printf "${C_RED}[uninstall]${C_RESET} %s\n" "$*" >&2; exit 1; } -ok() { printf " ${C_GREEN}✓${C_RESET} %s\n" "$*"; } +fail() { + printf "${C_RED}[uninstall]${C_RESET} %s\n" "$*" >&2 + exit 1 +} +ok() { printf " ${C_GREEN}✓${C_RESET} %s\n" "$*"; } # spin "label" cmd [args...] — spinner wrapper, same as installer. spin() { - local msg="$1"; shift + local msg="$1" + shift if [[ ! -t 1 ]]; then info "$msg" @@ -49,7 +53,8 @@ spin() { return fi - local log; log=$(mktemp) + local log + log=$(mktemp) "$@" >"$log" 2>&1 & local pid=$! i=0 local frames=('⠋' '⠙' '⠹' '⠸' '⠼' '⠴' '⠦' '⠧' '⠇' '⠏') @@ -59,7 +64,8 @@ spin() { sleep 0.08 done - wait "$pid"; local status=$? + wait "$pid" + local status=$? if [[ $status -eq 0 ]]; then printf "\r ${C_GREEN}✓${C_RESET} %s\n" "$msg" else @@ -147,7 +153,7 @@ while [ $# -gt 0 ]; do DELETE_MODELS=true shift ;; - -h|--help) + -h | --help) usage exit 0 ;; @@ -184,15 +190,18 @@ confirm() { read -r reply || true fi case "$reply" in - y|Y|yes|YES) ;; - *) info "Aborted."; exit 0 ;; + y | Y | yes | YES) ;; + *) + info "Aborted." + exit 0 + ;; esac } run_optional() { local description="$1" shift - if "$@" > /dev/null 2>&1; then + if "$@" >/dev/null 2>&1; then info "$description" else warn "$description skipped" @@ -243,7 +252,7 @@ stop_helper_services() { } stop_openshell_forward_processes() { - if ! command -v pgrep > /dev/null 2>&1; then + if ! command -v pgrep >/dev/null 2>&1; then warn "pgrep not found; skipping local OpenShell forward process cleanup." return 0 fi @@ -261,7 +270,7 @@ stop_openshell_forward_processes() { fi for pid in "${pids[@]}"; do - if kill "$pid" > /dev/null 2>&1 || kill -9 "$pid" > /dev/null 2>&1; then + if kill "$pid" >/dev/null 2>&1 || kill -9 "$pid" >/dev/null 2>&1; then info "Stopped OpenShell forward process $pid" else warn "Failed to stop OpenShell forward process $pid" @@ -270,7 +279,7 @@ stop_openshell_forward_processes() { } remove_openshell_resources() { - if ! command -v openshell > /dev/null 2>&1; then + if ! command -v openshell >/dev/null 2>&1; then warn "openshell not found; skipping gateway/provider/sandbox cleanup." return 0 fi @@ -285,9 +294,9 @@ remove_openshell_resources() { } remove_nemoclaw_cli() { - if command -v npm > /dev/null 2>&1; then - npm unlink -g nemoclaw > /dev/null 2>&1 || true - if npm uninstall -g --loglevel=error nemoclaw > /dev/null 2>&1; then + if command -v npm >/dev/null 2>&1; then + npm unlink -g nemoclaw >/dev/null 2>&1 || true + if npm uninstall -g --loglevel=error nemoclaw >/dev/null 2>&1; then info "Removed global nemoclaw npm package" else warn "Global nemoclaw npm package not found or already removed" @@ -314,12 +323,12 @@ remove_nemoclaw_state() { } remove_related_docker_containers() { - if ! command -v docker > /dev/null 2>&1; then + if ! command -v docker >/dev/null 2>&1; then warn "docker not found; skipping Docker container cleanup." return 0 fi - if ! docker info > /dev/null 2>&1; then + if ! docker info >/dev/null 2>&1; then warn "docker is not running; skipping Docker container cleanup." return 0 fi @@ -351,7 +360,7 @@ remove_related_docker_containers() { local removed_any=false local container_id for container_id in "${container_ids[@]}"; do - if docker rm -f "$container_id" > /dev/null 2>&1; then + if docker rm -f "$container_id" >/dev/null 2>&1; then info "Removed Docker container $container_id" removed_any=true else @@ -365,12 +374,12 @@ remove_related_docker_containers() { } remove_related_docker_images() { - if ! command -v docker > /dev/null 2>&1; then + if ! command -v docker >/dev/null 2>&1; then warn "docker not found; skipping Docker image cleanup." return 0 fi - if ! docker info > /dev/null 2>&1; then + if ! docker info >/dev/null 2>&1; then warn "docker is not running; skipping Docker image cleanup." return 0 fi @@ -402,7 +411,7 @@ remove_related_docker_images() { local removed_any=false local image_id for image_id in "${image_ids[@]}"; do - if docker rmi -f "$image_id" > /dev/null 2>&1; then + if docker rmi -f "$image_id" >/dev/null 2>&1; then info "Removed Docker image $image_id" removed_any=true else @@ -422,12 +431,12 @@ gateway_volume_candidates() { } remove_related_docker_volumes() { - if ! command -v docker > /dev/null 2>&1; then + if ! command -v docker >/dev/null 2>&1; then warn "docker not found; skipping Docker volume cleanup." return 0 fi - if ! docker info > /dev/null 2>&1; then + if ! docker info >/dev/null 2>&1; then warn "docker is not running; skipping Docker volume cleanup." return 0 fi @@ -446,8 +455,8 @@ remove_related_docker_volumes() { local removed_any=false for volume_name in "${volume_names[@]}"; do - if docker volume inspect "$volume_name" > /dev/null 2>&1; then - if docker volume rm -f "$volume_name" > /dev/null 2>&1; then + if docker volume inspect "$volume_name" >/dev/null 2>&1; then + if docker volume rm -f "$volume_name" >/dev/null 2>&1; then info "Removed Docker volume $volume_name" removed_any=true else @@ -467,14 +476,14 @@ remove_optional_ollama_models() { return 0 fi - if ! command -v ollama > /dev/null 2>&1; then + if ! command -v ollama >/dev/null 2>&1; then warn "ollama not found; skipping model cleanup." return 0 fi local model for model in "${OLLAMA_MODELS[@]}"; do - if ollama rm "$model" > /dev/null 2>&1; then + if ollama rm "$model" >/dev/null 2>&1; then info "Removed Ollama model '$model'" else warn "Ollama model '$model' not found or already removed" @@ -495,7 +504,7 @@ remove_openshell_binary() { local removed=false local current_path="" - if command -v openshell > /dev/null 2>&1; then + if command -v openshell >/dev/null 2>&1; then current_path="$(command -v openshell)" fi