From 1fbc4da9e9d3cefb4535c81cf3f23196a54a9286 Mon Sep 17 00:00:00 2001 From: jnun Date: Sun, 22 Mar 2026 20:29:31 -0500 Subject: [PATCH 1/3] feat(env): add configurable port overrides via environment variables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow users to override default service ports (NEMOCLAW_GATEWAY_PORT, NEMOCLAW_DASHBOARD_PORT, NEMOCLAW_VLLM_PORT, NEMOCLAW_OLLAMA_PORT) through environment variables. This prevents conflicts when default ports are already in use on the host. New file: - bin/lib/ports.js — central port config with env var overrides and validation (range 1024-65535) Modified files: - bin/nemoclaw.js — use DASHBOARD_PORT for port forwarding - bin/lib/onboard.js — use port constants for gateway, dashboard, inference detection, and Ollama startup - bin/lib/local-inference.js — use VLLM_PORT and OLLAMA_PORT for health checks, base URLs, and error messages - bin/lib/nim.js — use VLLM_PORT for NIM container port mapping - bin/lib/preflight.js — use DASHBOARD_PORT as default check Follows the existing process.env.NEMOCLAW_* pattern used by NEMOCLAW_MODEL, NEMOCLAW_PROVIDER, and NEMOCLAW_GPU. Co-Authored-By: Claude Opus 4.6 (1M context) --- .gitmodules | 3 +++ bin/lib/local-inference.js | 25 +++++++++++++------------ bin/lib/nim.js | 7 ++++--- bin/lib/onboard.js | 38 +++++++++++++++++++------------------- bin/lib/ports.js | 23 +++++++++++++++++++++++ bin/lib/preflight.js | 3 ++- bin/nemoclaw.js | 3 ++- submodules/5daydocs | 1 + 8 files changed, 67 insertions(+), 36 deletions(-) create mode 100644 .gitmodules create mode 100644 bin/lib/ports.js create mode 160000 submodules/5daydocs diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..68a201fda --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "submodules/5daydocs"] + path = submodules/5daydocs + url = git@github.com:jnun/5daydocs.git diff --git a/bin/lib/local-inference.js b/bin/lib/local-inference.js index 1065a70e3..9a2699340 100644 --- a/bin/lib/local-inference.js +++ b/bin/lib/local-inference.js @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 const { shellQuote } = require("./runner"); +const { VLLM_PORT, OLLAMA_PORT } = require("./ports"); const HOST_GATEWAY_URL = "http://host.openshell.internal"; const CONTAINER_REACHABILITY_IMAGE = "curlimages/curl:8.10.1"; @@ -10,9 +11,9 @@ const DEFAULT_OLLAMA_MODEL = "nemotron-3-nano:30b"; function getLocalProviderBaseUrl(provider) { switch (provider) { case "vllm-local": - return `${HOST_GATEWAY_URL}:8000/v1`; + return `${HOST_GATEWAY_URL}:${VLLM_PORT}/v1`; case "ollama-local": - return `${HOST_GATEWAY_URL}:11434/v1`; + return `${HOST_GATEWAY_URL}:${OLLAMA_PORT}/v1`; default: return null; } @@ -21,9 +22,9 @@ function getLocalProviderBaseUrl(provider) { function getLocalProviderHealthCheck(provider) { switch (provider) { case "vllm-local": - return "curl -sf http://localhost:8000/v1/models 2>/dev/null"; + return `curl -sf http://localhost:${VLLM_PORT}/v1/models 2>/dev/null`; case "ollama-local": - return "curl -sf http://localhost:11434/api/tags 2>/dev/null"; + return `curl -sf http://localhost:${OLLAMA_PORT}/api/tags 2>/dev/null`; default: return null; } @@ -32,9 +33,9 @@ function getLocalProviderHealthCheck(provider) { function getLocalProviderContainerReachabilityCheck(provider) { switch (provider) { case "vllm-local": - return `docker run --rm --add-host host.openshell.internal:host-gateway ${CONTAINER_REACHABILITY_IMAGE} -sf http://host.openshell.internal:8000/v1/models 2>/dev/null`; + return `docker run --rm --add-host host.openshell.internal:host-gateway ${CONTAINER_REACHABILITY_IMAGE} -sf http://host.openshell.internal:${VLLM_PORT}/v1/models 2>/dev/null`; case "ollama-local": - return `docker run --rm --add-host host.openshell.internal:host-gateway ${CONTAINER_REACHABILITY_IMAGE} -sf http://host.openshell.internal:11434/api/tags 2>/dev/null`; + return `docker run --rm --add-host host.openshell.internal:host-gateway ${CONTAINER_REACHABILITY_IMAGE} -sf http://host.openshell.internal:${OLLAMA_PORT}/api/tags 2>/dev/null`; default: return null; } @@ -52,12 +53,12 @@ function validateLocalProvider(provider, runCapture) { case "vllm-local": return { ok: false, - message: "Local vLLM was selected, but nothing is responding on http://localhost:8000.", + message: `Local vLLM was selected, but nothing is responding on http://localhost:${VLLM_PORT}.`, }; case "ollama-local": return { ok: false, - message: "Local Ollama was selected, but nothing is responding on http://localhost:11434.", + message: `Local Ollama was selected, but nothing is responding on http://localhost:${OLLAMA_PORT}.`, }; default: return { ok: false, message: "The selected local inference provider is unavailable." }; @@ -79,13 +80,13 @@ function validateLocalProvider(provider, runCapture) { return { ok: false, message: - "Local vLLM is responding on localhost, but containers cannot reach http://host.openshell.internal:8000. Ensure the server is reachable from containers, not only from the host shell.", + `Local vLLM is responding on localhost, but containers cannot reach http://host.openshell.internal:${VLLM_PORT}. Ensure the server is reachable from containers, not only from the host shell.`, }; case "ollama-local": return { ok: false, message: - "Local Ollama is responding on localhost, but containers cannot reach http://host.openshell.internal:11434. Ensure Ollama listens on 0.0.0.0:11434 instead of 127.0.0.1 so sandboxes can reach it.", + `Local Ollama is responding on localhost, but containers cannot reach http://host.openshell.internal:${OLLAMA_PORT}. Ensure Ollama listens on 0.0.0.0:${OLLAMA_PORT} instead of 127.0.0.1 so sandboxes can reach it.`, }; default: return { ok: false, message: "The selected local inference provider is unavailable from containers." }; @@ -123,7 +124,7 @@ function getOllamaWarmupCommand(model, keepAlive = "15m") { stream: false, keep_alive: keepAlive, }); - return `nohup curl -s http://localhost:11434/api/generate -H 'Content-Type: application/json' -d ${shellQuote(payload)} >/dev/null 2>&1 &`; + return `nohup curl -s http://localhost:${OLLAMA_PORT}/api/generate -H 'Content-Type: application/json' -d ${shellQuote(payload)} >/dev/null 2>&1 &`; } function getOllamaProbeCommand(model, timeoutSeconds = 120, keepAlive = "15m") { @@ -133,7 +134,7 @@ function getOllamaProbeCommand(model, timeoutSeconds = 120, keepAlive = "15m") { stream: false, keep_alive: keepAlive, }); - return `curl -sS --max-time ${timeoutSeconds} http://localhost:11434/api/generate -H 'Content-Type: application/json' -d ${shellQuote(payload)} 2>/dev/null`; + return `curl -sS --max-time ${timeoutSeconds} http://localhost:${OLLAMA_PORT}/api/generate -H 'Content-Type: application/json' -d ${shellQuote(payload)} 2>/dev/null`; } function validateOllamaModel(model, runCapture) { diff --git a/bin/lib/nim.js b/bin/lib/nim.js index 548b2db23..01cff615a 100644 --- a/bin/lib/nim.js +++ b/bin/lib/nim.js @@ -5,6 +5,7 @@ const { run, runCapture, shellQuote } = require("./runner"); const nimImages = require("./nim-images.json"); +const { VLLM_PORT } = require("./ports"); function containerName(sandboxName) { return `nemoclaw-nim-${sandboxName}`; @@ -125,7 +126,7 @@ function pullNimImage(model) { return image; } -function startNimContainer(sandboxName, model, port = 8000) { +function startNimContainer(sandboxName, model, port = VLLM_PORT) { const name = containerName(sandboxName); const image = getImageForModel(model); if (!image) { @@ -144,7 +145,7 @@ function startNimContainer(sandboxName, model, port = 8000) { return name; } -function waitForNimHealth(port = 8000, timeout = 300) { +function waitForNimHealth(port = VLLM_PORT, timeout = 300) { const start = Date.now(); const interval = 5000; const safePort = Number(port); @@ -186,7 +187,7 @@ function nimStatus(sandboxName) { let healthy = false; if (state === "running") { - const health = runCapture(`curl -sf http://localhost:8000/v1/models 2>/dev/null`, { + const health = runCapture(`curl -sf http://localhost:${VLLM_PORT}/v1/models 2>/dev/null`, { ignoreError: true, }); healthy = !!health; diff --git a/bin/lib/onboard.js b/bin/lib/onboard.js index 252a303c8..502c34ab8 100644 --- a/bin/lib/onboard.js +++ b/bin/lib/onboard.js @@ -33,6 +33,7 @@ const registry = require("./registry"); const nim = require("./nim"); const policies = require("./policies"); const { checkPortAvailable } = require("./preflight"); +const { DASHBOARD_PORT, GATEWAY_PORT, VLLM_PORT, OLLAMA_PORT } = require("./ports"); const EXPERIMENTAL = process.env.NEMOCLAW_EXPERIMENTAL === "1"; const USE_COLOR = !process.env.NO_COLOR && !!process.stdout.isTTY; const DIM = USE_COLOR ? "\x1b[2m" : ""; @@ -361,15 +362,15 @@ async function preflight() { const gwInfo = runCapture("openshell gateway info -g nemoclaw 2>/dev/null", { ignoreError: true }); if (hasStaleGateway(gwInfo)) { console.log(" Cleaning up previous NemoClaw session..."); - run("openshell forward stop 18789 2>/dev/null || true", { ignoreError: true }); + run(`openshell forward stop ${DASHBOARD_PORT} 2>/dev/null || true`, { ignoreError: true }); run("openshell gateway destroy -g nemoclaw 2>/dev/null || true", { ignoreError: true }); console.log(" ✓ Previous session cleaned up"); } - // Required ports — gateway (8080) and dashboard (18789) + // Required ports — gateway and dashboard const requiredPorts = [ - { port: 8080, label: "OpenShell gateway" }, - { port: 18789, label: "NemoClaw dashboard" }, + { port: GATEWAY_PORT, label: "OpenShell gateway" }, + { port: DASHBOARD_PORT, label: "NemoClaw dashboard" }, ]; for (const { port, label } of requiredPorts) { const portCheck = await checkPortAvailable(port); @@ -423,7 +424,7 @@ async function startGateway(gpu) { // Destroy old gateway run("openshell gateway destroy -g nemoclaw 2>/dev/null || true", { ignoreError: true }); - const gwArgs = ["--name", "nemoclaw"]; + const gwArgs = ["--name", "nemoclaw", "--port", String(GATEWAY_PORT)]; // Do NOT pass --gpu here. On DGX Spark (and most GPU hosts), inference is // routed through a host-side provider (Ollama, vLLM, or cloud API) — the // sandbox itself does not need direct GPU access. Passing --gpu causes @@ -532,7 +533,7 @@ async function createSandbox(gpu) { // --gpu is intentionally omitted. See comment in startGateway(). console.log(` Creating sandbox '${sandboxName}' (this takes a few minutes on first run)...`); - const chatUiUrl = process.env.CHAT_UI_URL || 'http://127.0.0.1:18789'; + const chatUiUrl = process.env.CHAT_UI_URL || `http://127.0.0.1:${DASHBOARD_PORT}`; const envArgs = [`CHAT_UI_URL=${shellQuote(chatUiUrl)}`]; if (process.env.NVIDIA_API_KEY) { envArgs.push(`NVIDIA_API_KEY=${shellQuote(process.env.NVIDIA_API_KEY)}`); @@ -600,12 +601,12 @@ async function createSandbox(gpu) { process.exit(1); } - // Release any stale forward on port 18789 before claiming it for the new sandbox. + // Release any stale forward on the dashboard port before claiming it for the new sandbox. // A previous onboard run may have left the port forwarded to a different sandbox, // which would silently prevent the new sandbox's dashboard from being reachable. - run(`openshell forward stop 18789 2>/dev/null || true`, { ignoreError: true }); + run(`openshell forward stop ${DASHBOARD_PORT} 2>/dev/null || true`, { ignoreError: true }); // Forward dashboard port to the new sandbox - run(`openshell forward start --background 18789 "${sandboxName}"`, { ignoreError: true }); + run(`openshell forward start --background ${DASHBOARD_PORT} "${sandboxName}"`, { ignoreError: true }); // Register only after confirmed ready — prevents phantom entries registry.registerSandbox({ @@ -628,8 +629,8 @@ async function setupNim(sandboxName, gpu) { // Detect local inference options const hasOllama = !!runCapture("command -v ollama", { ignoreError: true }); - const ollamaRunning = !!runCapture("curl -sf http://localhost:11434/api/tags 2>/dev/null", { ignoreError: true }); - const vllmRunning = !!runCapture("curl -sf http://localhost:8000/v1/models 2>/dev/null", { ignoreError: true }); + const ollamaRunning = !!runCapture(`curl -sf http://localhost:${OLLAMA_PORT}/api/tags 2>/dev/null`, { ignoreError: true }); + const vllmRunning = !!runCapture(`curl -sf http://localhost:${VLLM_PORT}/v1/models 2>/dev/null`, { ignoreError: true }); const requestedProvider = isNonInteractive() ? getNonInteractiveProvider() : null; const requestedModel = isNonInteractive() ? getNonInteractiveModel(requestedProvider || "cloud") : null; // Build options list — only show local options with NEMOCLAW_EXPERIMENTAL=1 @@ -647,14 +648,14 @@ async function setupNim(sandboxName, gpu) { options.push({ key: "ollama", label: - `Local Ollama (localhost:11434)${ollamaRunning ? " — running" : ""}` + + `Local Ollama (localhost:${OLLAMA_PORT})${ollamaRunning ? " — running" : ""}` + (ollamaRunning ? " (suggested)" : ""), }); } if (EXPERIMENTAL && vllmRunning) { options.push({ key: "vllm", - label: "Existing vLLM instance (localhost:8000) — running [experimental] (suggested)", + label: `Existing vLLM instance (localhost:${VLLM_PORT}) — running [experimental] (suggested)`, }); } @@ -747,10 +748,10 @@ async function setupNim(sandboxName, gpu) { } else if (selected.key === "ollama") { if (!ollamaRunning) { console.log(" Starting Ollama..."); - run("OLLAMA_HOST=0.0.0.0:11434 ollama serve > /dev/null 2>&1 &", { ignoreError: true }); + run(`OLLAMA_HOST=0.0.0.0:${OLLAMA_PORT} ollama serve > /dev/null 2>&1 &`, { ignoreError: true }); sleep(2); } - console.log(" ✓ Using Ollama on localhost:11434"); + console.log(` ✓ Using Ollama on localhost:${OLLAMA_PORT}`); provider = "ollama-local"; if (isNonInteractive()) { model = requestedModel || getDefaultOllamaModel(runCapture); @@ -761,9 +762,9 @@ async function setupNim(sandboxName, gpu) { console.log(" Installing Ollama via Homebrew..."); run("brew install ollama", { ignoreError: true }); console.log(" Starting Ollama..."); - run("OLLAMA_HOST=0.0.0.0:11434 ollama serve > /dev/null 2>&1 &", { ignoreError: true }); + run(`OLLAMA_HOST=0.0.0.0:${OLLAMA_PORT} ollama serve > /dev/null 2>&1 &`, { ignoreError: true }); sleep(2); - console.log(" ✓ Using Ollama on localhost:11434"); + console.log(` ✓ Using Ollama on localhost:${OLLAMA_PORT}`); provider = "ollama-local"; if (isNonInteractive()) { model = requestedModel || getDefaultOllamaModel(runCapture); @@ -771,7 +772,7 @@ async function setupNim(sandboxName, gpu) { model = await promptOllamaModel(); } } else if (selected.key === "vllm") { - console.log(" ✓ Using existing vLLM on localhost:8000"); + console.log(` ✓ Using existing vLLM on localhost:${VLLM_PORT}`); provider = "vllm-local"; model = "vllm-local"; } @@ -1017,7 +1018,6 @@ function printDashboard(sandboxName, model, provider) { console.log(""); console.log(` ${"─".repeat(50)}`); - // console.log(` Dashboard http://localhost:18789/`); console.log(` Sandbox ${sandboxName} (Landlock + seccomp + netns)`); console.log(` Model ${model} (${providerLabel})`); console.log(` NIM ${nimLabel}`); diff --git a/bin/lib/ports.js b/bin/lib/ports.js new file mode 100644 index 000000000..bdf9fe976 --- /dev/null +++ b/bin/lib/ports.js @@ -0,0 +1,23 @@ +// SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 +// +// Central port configuration — override via environment variables. + +function parsePort(envVar, fallback) { + const raw = process.env[envVar]; + if (raw === undefined || raw === "") return fallback; + const parsed = parseInt(raw, 10); + if (Number.isNaN(parsed) || parsed < 1024 || parsed > 65535) { + throw new Error( + `Invalid port: ${envVar}="${raw}" — must be an integer between 1024 and 65535` + ); + } + return parsed; +} + +module.exports = { + DASHBOARD_PORT: parsePort("NEMOCLAW_DASHBOARD_PORT", 18789), + GATEWAY_PORT: parsePort("NEMOCLAW_GATEWAY_PORT", 8081), + VLLM_PORT: parsePort("NEMOCLAW_VLLM_PORT", 8009), + OLLAMA_PORT: parsePort("NEMOCLAW_OLLAMA_PORT", 11434), +}; diff --git a/bin/lib/preflight.js b/bin/lib/preflight.js index 7f191413d..a77768a2a 100644 --- a/bin/lib/preflight.js +++ b/bin/lib/preflight.js @@ -5,6 +5,7 @@ const net = require("net"); const { runCapture } = require("./runner"); +const { DASHBOARD_PORT } = require("./ports"); /** * Check whether a TCP port is available for listening. @@ -21,7 +22,7 @@ const { runCapture } = require("./runner"); * { ok: false, process: string, pid: number|null, reason: string } */ async function checkPortAvailable(port, opts) { - const p = port || 18789; + const p = port || DASHBOARD_PORT; const o = opts || {}; // ── lsof path ────────────────────────────────────────────────── diff --git a/bin/nemoclaw.js b/bin/nemoclaw.js index 2010cfeb2..ed2b099d5 100755 --- a/bin/nemoclaw.js +++ b/bin/nemoclaw.js @@ -30,6 +30,7 @@ const { const registry = require("./lib/registry"); const nim = require("./lib/nim"); const policies = require("./lib/policies"); +const { DASHBOARD_PORT } = require("./lib/ports"); // ── Global commands ────────────────────────────────────────────── @@ -299,7 +300,7 @@ function listSandboxes() { function sandboxConnect(sandboxName) { const qn = shellQuote(sandboxName); // Ensure port forward is alive before connecting - run(`openshell forward start --background 18789 ${qn} 2>/dev/null || true`, { ignoreError: true }); + run(`openshell forward start --background ${DASHBOARD_PORT} ${qn} 2>/dev/null || true`, { ignoreError: true }); runInteractive(`openshell sandbox connect ${qn}`); } diff --git a/submodules/5daydocs b/submodules/5daydocs new file mode 160000 index 000000000..83d362d07 --- /dev/null +++ b/submodules/5daydocs @@ -0,0 +1 @@ +Subproject commit 83d362d07b39dfaa242486a3533ca3d7142cb63a From 0d231dbdbf0a1594dbfe7375b2ba7ce664ffb8c0 Mon Sep 17 00:00:00 2001 From: jnun Date: Sun, 22 Mar 2026 20:29:43 -0500 Subject: [PATCH 2/3] chore: remove accidentally committed submodule reference Co-Authored-By: Claude Opus 4.6 (1M context) --- .gitmodules | 3 --- submodules/5daydocs | 1 - 2 files changed, 4 deletions(-) delete mode 100644 .gitmodules delete mode 160000 submodules/5daydocs diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 68a201fda..000000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "submodules/5daydocs"] - path = submodules/5daydocs - url = git@github.com:jnun/5daydocs.git diff --git a/submodules/5daydocs b/submodules/5daydocs deleted file mode 160000 index 83d362d07..000000000 --- a/submodules/5daydocs +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 83d362d07b39dfaa242486a3533ca3d7142cb63a From 81745457c75efb2f68b8c9cba3a1618b3ce90935 Mon Sep 17 00:00:00 2001 From: jnun Date: Sun, 22 Mar 2026 21:17:18 -0500 Subject: [PATCH 3/3] fixup! feat(env): add configurable port overrides via environment variables - Tighten parsePort validation: reject non-digit strings (e.g. "8081abc") that parseInt silently accepted; use regex + Number() instead - Add JSDoc to parsePort and all exported constants (docstring coverage) Co-Authored-By: Claude Opus 4.6 (1M context) --- bin/lib/ports.js | 37 +++++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/bin/lib/ports.js b/bin/lib/ports.js index bdf9fe976..a80858ae6 100644 --- a/bin/lib/ports.js +++ b/bin/lib/ports.js @@ -3,11 +3,28 @@ // // Central port configuration — override via environment variables. +/** + * Read an environment variable as a port number, falling back to a default. + * + * Validates that the value contains only digits and falls within the + * non-privileged port range (1024–65535). + * + * @param {string} envVar - Name of the environment variable to read. + * @param {number} fallback - Default port when the variable is unset or empty. + * @returns {number} The resolved port number. + * @throws {Error} If the value is not a valid port in range. + */ function parsePort(envVar, fallback) { const raw = process.env[envVar]; if (raw === undefined || raw === "") return fallback; - const parsed = parseInt(raw, 10); - if (Number.isNaN(parsed) || parsed < 1024 || parsed > 65535) { + const trimmed = String(raw).trim(); + if (!/^\d+$/.test(trimmed)) { + throw new Error( + `Invalid port: ${envVar}="${raw}" — must be an integer between 1024 and 65535` + ); + } + const parsed = Number(trimmed); + if (parsed < 1024 || parsed > 65535) { throw new Error( `Invalid port: ${envVar}="${raw}" — must be an integer between 1024 and 65535` ); @@ -15,9 +32,13 @@ function parsePort(envVar, fallback) { return parsed; } -module.exports = { - DASHBOARD_PORT: parsePort("NEMOCLAW_DASHBOARD_PORT", 18789), - GATEWAY_PORT: parsePort("NEMOCLAW_GATEWAY_PORT", 8081), - VLLM_PORT: parsePort("NEMOCLAW_VLLM_PORT", 8009), - OLLAMA_PORT: parsePort("NEMOCLAW_OLLAMA_PORT", 11434), -}; +/** @type {number} Dashboard UI port (default 18789, override via NEMOCLAW_DASHBOARD_PORT). */ +const DASHBOARD_PORT = parsePort("NEMOCLAW_DASHBOARD_PORT", 18789); +/** @type {number} OpenShell gateway port (default 8081, override via NEMOCLAW_GATEWAY_PORT). */ +const GATEWAY_PORT = parsePort("NEMOCLAW_GATEWAY_PORT", 8081); +/** @type {number} vLLM inference port (default 8009, override via NEMOCLAW_VLLM_PORT). */ +const VLLM_PORT = parsePort("NEMOCLAW_VLLM_PORT", 8009); +/** @type {number} Ollama inference port (default 11434, override via NEMOCLAW_OLLAMA_PORT). */ +const OLLAMA_PORT = parsePort("NEMOCLAW_OLLAMA_PORT", 11434); + +module.exports = { DASHBOARD_PORT, GATEWAY_PORT, VLLM_PORT, OLLAMA_PORT };