Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions .github/workflows/pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -90,3 +90,18 @@ jobs:

- name: Run sandbox E2E tests
run: docker run --rm -v "${{ github.workspace }}/test:/opt/test" nemoclaw-sandbox-test /opt/test/e2e-test.sh

test-e2e-ollama-proxy:
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout
uses: actions/checkout@v4

- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "22"

- name: Run Ollama auth proxy E2E tests
run: bash test/e2e-ollama-proxy.sh
9 changes: 6 additions & 3 deletions bin/lib/local-inference.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@ function getLocalProviderBaseUrl(provider) {
case "vllm-local":
return `${HOST_GATEWAY_URL}:8000/v1`;
case "ollama-local":
return `${HOST_GATEWAY_URL}:11434/v1`;
// Route through the auth proxy (11435), not Ollama directly (11434)
return `${HOST_GATEWAY_URL}:11435/v1`;
default:
return null;
}
Expand All @@ -34,7 +35,9 @@ function getLocalProviderContainerReachabilityCheck(provider) {
case "vllm-local":
return `docker run --rm --add-host host.openshell.internal:host-gateway ${CONTAINER_REACHABILITY_IMAGE} -sf http://host.openshell.internal:8000/v1/models 2>/dev/null`;
case "ollama-local":
return `docker run --rm --add-host host.openshell.internal:host-gateway ${CONTAINER_REACHABILITY_IMAGE} -sf http://host.openshell.internal:11434/api/tags 2>/dev/null`;
// Check the auth proxy port (11435), not Ollama directly (11434).
// The proxy is on 0.0.0.0 and reachable from containers; Ollama is on 127.0.0.1.
return `docker run --rm --add-host host.openshell.internal:host-gateway ${CONTAINER_REACHABILITY_IMAGE} -sf http://host.openshell.internal:11435/api/tags 2>/dev/null`;
default:
return null;
}
Expand Down Expand Up @@ -85,7 +88,7 @@ function validateLocalProvider(provider, runCapture) {
return {
ok: false,
message:
"Local Ollama is responding on localhost, but containers cannot reach http://host.openshell.internal:11434. Ensure Ollama listens on 0.0.0.0:11434 instead of 127.0.0.1 so sandboxes can reach it.",
"Local Ollama is responding on localhost, but containers cannot reach http://host.openshell.internal:11435. Ensure the Ollama auth proxy (scripts/ollama-auth-proxy.js) is running.",
};
default:
return { ok: false, message: "The selected local inference provider is unavailable from containers." };
Expand Down
60 changes: 48 additions & 12 deletions bin/lib/onboard.js
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ const path = require("path");
const { spawn, spawnSync } = require("child_process");
const { ROOT, SCRIPTS, run, runCapture, shellQuote } = require("./runner");
const {
HOST_GATEWAY_URL,
getDefaultOllamaModel,
getLocalProviderBaseUrl,
getOllamaModelOptions,
Expand Down Expand Up @@ -275,6 +276,36 @@ function sleep(seconds) {
require("child_process").spawnSync("sleep", [String(seconds)]);
}

// ── Ollama auth proxy ─────────────────────────────────────────────
// Ollama has no built-in auth and must not listen on 0.0.0.0 (PSIRT
// bug 6002780). We bind Ollama to 127.0.0.1 and front it with a
// token-authenticated proxy on 0.0.0.0:11435 so the OpenShell gateway
// (running in a container) can still reach it.

let ollamaProxyToken = null;

function startOllamaAuthProxy() {
// Kill any stale proxy from a previous onboard run so the new token takes effect
run('lsof -ti :11435 | xargs kill 2>/dev/null || true', { ignoreError: true });
const crypto = require("crypto");
ollamaProxyToken = crypto.randomBytes(24).toString("hex");
run(
`OLLAMA_PROXY_TOKEN=${shellQuote(ollamaProxyToken)} ` +
`node "${SCRIPTS}/ollama-auth-proxy.js" > /dev/null 2>&1 &`,
{ ignoreError: true },
);
sleep(1);
Comment on lines +287 to +297
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Don't rotate the proxy token unless the new proxy actually owns :11435.

This helper always generates a new token and backgrounds a new proxy, but it never checks whether :11435 is already occupied or whether the child bound successfully. Since Step 1 only cleans up the OpenShell gateway, rerunning onboarding with Ollama can leave the old proxy serving the old token while ollama-local gets updated to the new one, which breaks later requests from the sandbox.

Suggested hardening
 function startOllamaAuthProxy() {
   const crypto = require("crypto");
-  ollamaProxyToken = crypto.randomBytes(24).toString("hex");
+  if (runCapture("curl -sf http://127.0.0.1:11435/api/tags 2>/dev/null", { ignoreError: true })) {
+    console.error("  Ollama auth proxy is already running on port 11435. Stop it before rerunning onboard.");
+    process.exit(1);
+  }
+  const token = crypto.randomBytes(24).toString("hex");
   run(
-    `OLLAMA_PROXY_TOKEN=${shellQuote(ollamaProxyToken)} ` +
+    `OLLAMA_PROXY_TOKEN=${shellQuote(token)} ` +
     `node "${SCRIPTS}/ollama-auth-proxy.js" > /dev/null 2>&1 &`,
     { ignoreError: true },
   );
   sleep(1);
+  if (!runCapture("curl -sf http://127.0.0.1:11435/api/tags 2>/dev/null", { ignoreError: true })) {
+    console.error("  Ollama auth proxy failed to start on port 11435.");
+    process.exit(1);
+  }
+  ollamaProxyToken = token;
 }
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@bin/lib/onboard.js` around lines 287 - 295, startOllamaAuthProxy currently
unconditionally rotates ollamaProxyToken and backgrounds a new
ollama-auth-proxy.js, which can leave the old proxy on :11435 serving the
previous token; change the flow so you first probe localhost:11435 to see if
it's already bound by a running proxy (or attempt a handshake to verify it
serves the current token), and only generate a new ollamaProxyToken and
background a new proxy when either the port is free or you can confirm the newly
spawned process successfully binds/serves :11435; after starting the child
process (the one that runs ollama-auth-proxy.js) verify ownership by attempting
a connection/handshake to :11435 and only replace ollamaProxyToken if that
verification succeeds, otherwise kill the spawned process and keep the existing
token/daemon running.

// Verify proxy is actually listening before proceeding
const probe = runCapture("curl -sf --connect-timeout 2 http://127.0.0.1:11435/api/tags 2>/dev/null", { ignoreError: true });
if (!probe) {
console.error(" Warning: Ollama auth proxy did not start on :11435");
}
}

function getOllamaProxyToken() {
return ollamaProxyToken;
}

function waitForSandboxReady(sandboxName, attempts = 10, delaySeconds = 2) {
for (let i = 0; i < attempts; i += 1) {
const exists = runCapture(`openshell sandbox get "${sandboxName}" 2>/dev/null`, { ignoreError: true });
Expand Down Expand Up @@ -746,11 +777,12 @@ async function setupNim(sandboxName, gpu) {
}
} else if (selected.key === "ollama") {
if (!ollamaRunning) {
console.log(" Starting Ollama...");
run("OLLAMA_HOST=0.0.0.0:11434 ollama serve > /dev/null 2>&1 &", { ignoreError: true });
console.log(" Starting Ollama (localhost only)...");
run("OLLAMA_HOST=127.0.0.1:11434 ollama serve > /dev/null 2>&1 &", { ignoreError: true });
sleep(2);
}
console.log(" ✓ Using Ollama on localhost:11434");
startOllamaAuthProxy();
console.log(" ✓ Using Ollama on localhost:11434 (proxy on :11435)");
provider = "ollama-local";
if (isNonInteractive()) {
model = requestedModel || getDefaultOllamaModel(runCapture);
Expand All @@ -760,10 +792,11 @@ async function setupNim(sandboxName, gpu) {
} else if (selected.key === "install-ollama") {
console.log(" Installing Ollama via Homebrew...");
run("brew install ollama", { ignoreError: true });
console.log(" Starting Ollama...");
run("OLLAMA_HOST=0.0.0.0:11434 ollama serve > /dev/null 2>&1 &", { ignoreError: true });
sleep(2);
console.log(" ✓ Using Ollama on localhost:11434");
console.log(" Starting Ollama (localhost only)...");
run("OLLAMA_HOST=127.0.0.1:11434 ollama serve > /dev/null 2>&1 &", { ignoreError: true });
sleep(2);
startOllamaAuthProxy();
console.log(" ✓ Using Ollama on localhost:11434 (proxy on :11435)");
provider = "ollama-local";
if (isNonInteractive()) {
model = requestedModel || getDefaultOllamaModel(runCapture);
Expand Down Expand Up @@ -844,15 +877,18 @@ async function setupInference(sandboxName, model, provider) {
console.error(" On macOS, local inference also depends on OpenShell host routing support.");
process.exit(1);
}
const baseUrl = getLocalProviderBaseUrl(provider);
// Use the auth proxy URL (port 11435) instead of direct Ollama (11434).
// The proxy validates a per-instance Bearer token before forwarding.
const proxyToken = getOllamaProxyToken() || "ollama";
const proxyBaseUrl = `${HOST_GATEWAY_URL}:11435/v1`;
run(
`OPENAI_API_KEY=ollama ` +
`OPENAI_API_KEY=${shellQuote(proxyToken)} ` +
`openshell provider create --name ollama-local --type openai ` +
`--credential "OPENAI_API_KEY" ` +
`--config "OPENAI_BASE_URL=${baseUrl}" 2>&1 || ` +
`OPENAI_API_KEY=ollama ` +
`--config "OPENAI_BASE_URL=${proxyBaseUrl}" 2>&1 || ` +
`OPENAI_API_KEY=${shellQuote(proxyToken)} ` +
`openshell provider update ollama-local --credential "OPENAI_API_KEY" ` +
`--config "OPENAI_BASE_URL=${baseUrl}" 2>&1 || true`,
`--config "OPENAI_BASE_URL=${proxyBaseUrl}" 2>&1 || true`,
{ ignoreError: true }
);
run(
Expand Down
73 changes: 73 additions & 0 deletions scripts/ollama-auth-proxy.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
#!/usr/bin/env node
// SPDX-FileCopyrightText: Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

/**
* Authenticated reverse proxy for Ollama.
*
* Ollama has no built-in authentication. This proxy sits in front of it,
* validating a Bearer token before forwarding requests. Ollama binds to
* 127.0.0.1 (localhost only) while the proxy listens on 0.0.0.0 so the
* OpenShell gateway (running in a container) can reach it.
*
* Env:
* OLLAMA_PROXY_TOKEN — required, the Bearer token to validate
* OLLAMA_PROXY_PORT — listen port (default: 11435)
* OLLAMA_BACKEND_PORT — Ollama port on localhost (default: 11434)
*/

const crypto = require("crypto");
const http = require("http");

const TOKEN = process.env.OLLAMA_PROXY_TOKEN;
if (!TOKEN) {
console.error("OLLAMA_PROXY_TOKEN required");
process.exit(1);
}

const LISTEN_PORT = parseInt(process.env.OLLAMA_PROXY_PORT || "11435", 10);
const BACKEND_PORT = parseInt(process.env.OLLAMA_BACKEND_PORT || "11434", 10);

const server = http.createServer((clientReq, clientRes) => {
const auth = clientReq.headers.authorization;
// Allow unauthenticated health checks (model list only, not inference)
const isHealthCheck = clientReq.method === "GET" && clientReq.url === "/api/tags";
const expected = `Bearer ${TOKEN}`;
const tokenMatch = auth && auth.length === expected.length &&
crypto.timingSafeEqual(Buffer.from(auth), Buffer.from(expected));
if (!isHealthCheck && !tokenMatch) {
clientRes.writeHead(401, { "Content-Type": "text/plain" });
clientRes.end("Unauthorized");
return;
}

// Strip the auth header before forwarding to Ollama
const headers = { ...clientReq.headers };
delete headers.authorization;
delete headers.host;

const proxyReq = http.request(
{
hostname: "127.0.0.1",
port: BACKEND_PORT,
path: clientReq.url,
method: clientReq.method,
headers,
},
(proxyRes) => {
clientRes.writeHead(proxyRes.statusCode, proxyRes.headers);
proxyRes.pipe(clientRes);
},
);

proxyReq.on("error", (err) => {
clientRes.writeHead(502, { "Content-Type": "text/plain" });
clientRes.end(`Ollama backend error: ${err.message}`);
});

clientReq.pipe(proxyReq);
});

server.listen(LISTEN_PORT, "0.0.0.0", () => {
console.log(` Ollama auth proxy listening on 0.0.0.0:${LISTEN_PORT} → 127.0.0.1:${BACKEND_PORT}`);
});
14 changes: 9 additions & 5 deletions scripts/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -153,17 +153,21 @@ if [ "$(uname -s)" = "Darwin" ]; then
brew install ollama 2>/dev/null || warn "Ollama install failed (brew required). Install manually: https://ollama.com"
fi
if command -v ollama > /dev/null 2>&1; then
# Start Ollama service if not running
# Start Ollama on localhost only (not 0.0.0.0 — no auth, PSIRT bug 6002780)
if ! check_local_provider_health "ollama-local"; then
info "Starting Ollama service..."
OLLAMA_HOST=0.0.0.0:11434 ollama serve > /dev/null 2>&1 &
info "Starting Ollama service (localhost only)..."
OLLAMA_HOST=127.0.0.1:11434 ollama serve > /dev/null 2>&1 &
sleep 2
fi
OLLAMA_LOCAL_BASE_URL="$(get_local_provider_base_url "ollama-local")"
# Start auth proxy so containers can reach Ollama through a token gate
OLLAMA_PROXY_TOKEN="$(head -c 24 /dev/urandom | xxd -p)"
OLLAMA_PROXY_TOKEN="$OLLAMA_PROXY_TOKEN" node "$SCRIPT_DIR/ollama-auth-proxy.js" > /dev/null 2>&1 &
sleep 1
OLLAMA_LOCAL_BASE_URL="http://host.openshell.internal:11435/v1"
upsert_provider \
"ollama-local" \
"openai" \
"OPENAI_API_KEY=ollama" \
"OPENAI_API_KEY=$OLLAMA_PROXY_TOKEN" \
"OPENAI_BASE_URL=$OLLAMA_LOCAL_BASE_URL"
fi
fi
Expand Down
Loading
Loading