diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..0b1d3e6ea0 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,8 @@ +# Vendored upstream Helm chart for kubernetes-sigs/agent-sandbox. +# Refreshed via deploy/helm/charts/agent-sandbox/vendor.sh — do not hand-edit. +# Marking as generated so GitHub collapses the diff in PRs and excludes it +# from language stats; bumps are still reviewable by reading vendor.sh's +# version arg. +deploy/helm/charts/agent-sandbox/crds/** linguist-generated=true +deploy/helm/charts/agent-sandbox/templates/** linguist-generated=true +deploy/helm/charts/agent-sandbox-*.tgz binary linguist-generated=true diff --git a/.github/workflows/release-studio-sandbox.yaml b/.github/workflows/release-studio-sandbox.yaml new file mode 100644 index 0000000000..dd83791b63 --- /dev/null +++ b/.github/workflows/release-studio-sandbox.yaml @@ -0,0 +1,78 @@ +name: Release Studio Sandbox Image + +on: + push: + branches: [main] + paths: + - "packages/sandbox/**" + workflow_dispatch: + +env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }}/mesh-sandbox + +jobs: + build-push: + name: Build & Push mesh-sandbox image + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Bun + uses: oven-sh/setup-bun@v2 + with: + bun-version: "1.3.5" + + - name: Install dependencies + run: bun install + + # The Dockerfile copies daemon/dist/daemon.js into the image, so the + # bundle has to exist before `docker build` runs. + - name: Build daemon bundle + run: bun run --cwd=packages/sandbox build + + - name: Read sandbox version + id: version + run: | + VERSION=$(bun -e "console.log(require('./packages/sandbox/package.json').version)") + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Extract metadata (tags, labels) + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=raw,value=${{ steps.version.outputs.version }} + type=raw,value=latest + type=sha,format=short + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: ./packages/sandbox + file: ./packages/sandbox/image/Dockerfile + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + platforms: linux/amd64,linux/arm64 diff --git a/apps/mesh/src/api/routes/decopilot/built-in-tools/index.ts b/apps/mesh/src/api/routes/decopilot/built-in-tools/index.ts index 6240f7c42f..01c01617c4 100644 --- a/apps/mesh/src/api/routes/decopilot/built-in-tools/index.ts +++ b/apps/mesh/src/api/routes/decopilot/built-in-tools/index.ts @@ -132,7 +132,7 @@ async function buildAllTools( // VM file tools — same six LLM-visible tools across runners (schemas in // vm-tools/schemas.ts). Dispatch resolves through `getRunnerByKind` so // the entry's recorded runnerKind drives the routing, regardless of the - // current MESH_SANDBOX_RUNNER env value. When no entry exists, fall back + // current STUDIO_SANDBOX_RUNNER env value. When no entry exists, fall back // to the QuickJS `sandbox` tool — VM_START must run first for file tools. const vmNeedsApproval = toolNeedsApproval(toolApprovalLevel, false, approvalOpts) !== false; diff --git a/apps/mesh/src/api/routes/decopilot/stream-core.ts b/apps/mesh/src/api/routes/decopilot/stream-core.ts index be61c5e3ac..1dd74b1941 100644 --- a/apps/mesh/src/api/routes/decopilot/stream-core.ts +++ b/apps/mesh/src/api/routes/decopilot/stream-core.ts @@ -476,7 +476,7 @@ async function streamCoreInner( { vmId: string; previewUrl: string; - runnerKind?: "docker" | "freestyle"; + runnerKind?: "docker" | "freestyle" | "agent-sandbox"; } > >; @@ -490,7 +490,8 @@ async function streamCoreInner( ? { runnerKind: (activeVmEntry.runnerKind ?? "freestyle") as | "docker" - | "freestyle", + | "freestyle" + | "agent-sandbox", vmId: activeVmEntry.vmId, } : null; diff --git a/apps/mesh/src/cli/build-child-env.ts b/apps/mesh/src/cli/build-child-env.ts index b8b6796422..af1deaffb7 100644 --- a/apps/mesh/src/cli/build-child-env.ts +++ b/apps/mesh/src/cli/build-child-env.ts @@ -77,7 +77,7 @@ export function buildChildEnv( FIRECRAWL_API_KEY: settings.firecrawlApiKey, // Sandbox runner: read from env by resolveRunnerKindFromEnv() in workers - MESH_SANDBOX_RUNNER: process.env.MESH_SANDBOX_RUNNER, + STUDIO_SANDBOX_RUNNER: process.env.STUDIO_SANDBOX_RUNNER, FREESTYLE_API_KEY: process.env.FREESTYLE_API_KEY, // Browserless diff --git a/apps/mesh/src/index.ts b/apps/mesh/src/index.ts index 5dbbb47081..37c9bed67c 100644 --- a/apps/mesh/src/index.ts +++ b/apps/mesh/src/index.ts @@ -78,6 +78,35 @@ function withSecurityHeaders(res: Response): Response { // Closed early in gracefulShutdown so the port frees before the Hono drain. let ingressServers: import("node:net").Server[] = []; +// Sandbox preview reverse-proxy (agent-sandbox only). The base domain is parsed at +// boot from STUDIO_SANDBOX_PREVIEW_URL_PATTERN; null disables the proxy and +// preview-host requests fall through to the normal mesh routing (which 404s +// because nothing matches). The Bun-level WS handler is registered +// unconditionally — when previewBaseDomain is null, no upgrade path runs it. +const { + parsePreviewBaseDomain, + tryHandlePreviewHttp, + tryUpgradePreviewWs, + previewWebSocketHandler, + isPreviewWsData, +} = await import("./sandbox/preview-proxy"); +const { getOrInitSharedRunner: getOrInitRunnerForPreview } = await import( + "./sandbox/lifecycle" +); +const previewBaseDomain = parsePreviewBaseDomain( + process.env.STUDIO_SANDBOX_PREVIEW_URL_PATTERN, +); +const previewProxyDeps = { + baseDomain: previewBaseDomain ?? "", + getRunner: async () => { + const runner = await getOrInitRunnerForPreview(); + if (!runner || runner.kind !== "agent-sandbox") return null; + // The agent-sandbox runner is the only one that exposes proxyPreviewRequest / + // resolvePreviewUpstreamUrl; cast is safe after the kind check. + return runner as unknown as import("@decocms/sandbox/runner/agent-sandbox").AgentSandboxRunner; + }, +}; + // Docker-only boot/dev wiring. Both hooks (boot sweep + local ingress) are // intimate with Docker-specific primitives (labels, host-port mappings); // other runners manage their own VM/ingress lifecycle. @@ -140,12 +169,48 @@ const server = Bun.serve({ hostname: "0.0.0.0", // Listen on all network interfaces (required for K8s) reusePort, fetch: async (request, server) => { + // Sandbox preview proxy: matched by Host header. Runs *before* assets + // and the Hono app so a `.preview.` request never hits + // mesh's static-file handler (which would 404 on the dev server's + // bundle paths). WS upgrades short-circuit Bun.serve's fetch by + // returning undefined; HTTP returns a Response. + if (previewBaseDomain) { + // Bun's Server type defaults T=undefined for upgrade(); cast widens + // to our PreviewWsData carrier so the WS handler can stash it. Bun + // doesn't enforce data-type consistency at runtime, only via generics. + const upgradeRes = await tryUpgradePreviewWs( + request, + server as unknown as Parameters[1], + previewProxyDeps, + ); + if (upgradeRes === undefined) return; // upgraded + if (upgradeRes) return upgradeRes; // pre-upgrade error + const httpRes = await tryHandlePreviewHttp(request, previewProxyDeps); + if (httpRes) return httpRes; + } + // Try assets first (static files or dev proxy), then API // Pass server as env so Hono's getConnInfo can access requestIP const assetRes = await handleAssets(request); if (assetRes) return withSecurityHeaders(assetRes); return app.fetch(request, { server }); }, + // Multiplexed WebSocket handler. `ws.data.kind` discriminates which + // upgrader stashed the payload — preview is the only producer today; new + // upgraders should add a tagged `kind` and a branch here. + websocket: { + open(ws) { + if (isPreviewWsData(ws.data)) previewWebSocketHandler.open(ws); + }, + message(ws, message) { + if (isPreviewWsData(ws.data)) { + previewWebSocketHandler.message(ws, message); + } + }, + close(ws) { + if (isPreviewWsData(ws.data)) previewWebSocketHandler.close(ws); + }, + }, development: settings.nodeEnv !== "production", }); diff --git a/apps/mesh/src/sandbox/lifecycle.ts b/apps/mesh/src/sandbox/lifecycle.ts index 7c4138458a..24068b7dc1 100644 --- a/apps/mesh/src/sandbox/lifecycle.ts +++ b/apps/mesh/src/sandbox/lifecycle.ts @@ -1,6 +1,6 @@ /** * Runner singletons, one per kind. VM_DELETE dispatches on the entry's - * recorded runnerKind (not env), so a pod that flipped MESH_SANDBOX_RUNNER + * recorded runnerKind (not env), so a pod that flipped STUDIO_SANDBOX_RUNNER * between start and stop still tears down the right kind of VM. * Boot/shutdown sweeps are Docker-only — other runners' sandboxes outlive * mesh by design, so a generic sweep would nuke active user VMs. @@ -14,18 +14,57 @@ import { type RunnerKind, type SandboxRunner, } from "@decocms/sandbox/runner"; +import { getDb } from "@/database"; +import type { Kysely } from "kysely"; +import { meter } from "@/observability"; +import type { Database as DatabaseSchema } from "@/storage/types"; import { KyselySandboxRunnerStateStore } from "@/storage/sandbox-runner-state"; const runners: Partial> = {}; +// In-flight instantiate() promises, memoized per kind. Two concurrent +// callers on a cold mesh would otherwise both miss the resolved-runner +// cache and both call instantiate(); memoizing the promise (and only +// promoting to `runners` once it resolves) collapses them to a single +// build. Cleared on failure so a retry can take a fresh swing. +const inflight: Partial>> = {}; + +function resolveOnce( + kind: RunnerKind, + build: () => Promise, +): Promise { + const cached = runners[kind]; + if (cached) return Promise.resolve(cached); + const pending = inflight[kind]; + if (pending) return pending; + const promise = build() + .then((runner) => { + runners[kind] = runner; + return runner; + }) + .finally(() => { + delete inflight[kind]; + }); + inflight[kind] = promise; + return promise; +} + +// Set in prod (k8s/docker behind ingress) so the runner skips the local +// 127.0.0.1 port-forward path and emits a URL the user's browser can +// actually reach. Empty/unset = local forwarder fallback (dev). +function readPreviewUrlPattern(): string | undefined { + const raw = process.env.STUDIO_SANDBOX_PREVIEW_URL_PATTERN; + return raw && raw.trim() !== "" ? raw : undefined; +} async function instantiate( kind: RunnerKind, - ctx: MeshContext, + db: Kysely, ): Promise { - const stateStore = new KyselySandboxRunnerStateStore(ctx.db); + const stateStore = new KyselySandboxRunnerStateStore(db); + const previewUrlPattern = readPreviewUrlPattern(); switch (kind) { case "docker": - return new DockerSandboxRunner({ stateStore }); + return new DockerSandboxRunner({ stateStore, previewUrlPattern }); case "freestyle": { // Dynamic import — freestyle SDK is an optionalDependency so // docker-only deploys don't need it installed. @@ -34,6 +73,22 @@ async function instantiate( ); return new FreestyleSandboxRunner({ stateStore }); } + case "agent-sandbox": { + // Dynamic import — @kubernetes/client-node is heavy and only needed + // when STUDIO_SANDBOX_RUNNER=agent-sandbox. Docker/Freestyle deploys never + // load it. + const { AgentSandboxRunner } = await import( + "@decocms/sandbox/runner/agent-sandbox" + ); + // `meter` is reassigned by initObservability() after sdk.start(); read + // it at runner construction (post-init) so we get the real instruments + // not the no-op evaluated at module load. + return new AgentSandboxRunner({ + stateStore, + previewUrlPattern, + meter, + }); + } default: { const exhaustive: never = kind; throw new Error(`Unknown runner kind: ${String(exhaustive)}`); @@ -46,15 +101,24 @@ export function getSharedRunner(ctx: MeshContext): Promise { } /** VM_DELETE uses this so teardown follows the entry's recorded runnerKind. */ -export async function getRunnerByKind( +export function getRunnerByKind( ctx: MeshContext, kind: RunnerKind, ): Promise { - const cached = runners[kind]; - if (cached) return cached; - const runner = await instantiate(kind, ctx); - runners[kind] = runner; - return runner; + return resolveOnce(kind, () => instantiate(kind, ctx.db)); +} + +/** + * Eager runner accessor for paths that need the runner before any user + * request — preview-host proxying at the Bun.serve layer is the only caller + * today. Reads the runner kind from env and constructs without a + * MeshContext (the state store only needs a Kysely instance). Returns null + * when no runner kind is configured. + */ +export async function getOrInitSharedRunner(): Promise { + const kind = tryResolveRunnerKindFromEnv(); + if (!kind) return null; + return resolveOnce(kind, () => instantiate(kind, getDb().db)); } /** diff --git a/apps/mesh/src/sandbox/preview-proxy.test.ts b/apps/mesh/src/sandbox/preview-proxy.test.ts new file mode 100644 index 0000000000..3803c6c71f --- /dev/null +++ b/apps/mesh/src/sandbox/preview-proxy.test.ts @@ -0,0 +1,298 @@ +import { describe, expect, it } from "bun:test"; +import { + extractHandleFromHost, + parsePreviewBaseDomain, + tryHandlePreviewHttp, + tryUpgradePreviewWs, +} from "./preview-proxy"; + +/** + * Inline mirror of `applyPreviewPattern` from + * `packages/sandbox/server/runner/shared/preview-url.ts` — kept here as a + * fixture so the round-trip test below has no cross-package coupling. If the + * real implementation drifts, the round-trip test will fail and force this + * mirror to update too. + */ +function applyPreviewPatternFixture(pattern: string, handle: string): string { + const base = pattern.replace(/\/+$/, ""); + if (base.includes("{handle}")) { + return `${base.replace("{handle}", handle)}/`; + } + try { + const u = new URL(base); + u.hostname = `${handle}.${u.hostname}`; + return `${u.toString()}/`; + } catch { + return `${base}/${handle}/`; + } +} + +describe("parsePreviewBaseDomain", () => { + it("extracts the base from {handle}-templated patterns", () => { + expect(parsePreviewBaseDomain("https://{handle}.preview.decocms.com")).toBe( + "preview.decocms.com", + ); + }); + + it("extracts from the bare-pattern form (no template)", () => { + expect(parsePreviewBaseDomain("https://preview.example.com")).toBe( + "preview.example.com", + ); + }); + + it("returns null for empty/unset patterns", () => { + expect(parsePreviewBaseDomain(null)).toBeNull(); + expect(parsePreviewBaseDomain(undefined)).toBeNull(); + expect(parsePreviewBaseDomain("")).toBeNull(); + expect(parsePreviewBaseDomain(" ")).toBeNull(); + }); + + it("returns null for malformed URLs", () => { + expect(parsePreviewBaseDomain("not-a-url")).toBeNull(); + }); + + it("returns null when the templated form has no base", () => { + // `{handle}.localhost` — strip leading subdomain leaves "localhost", + // which is technically valid, but `{handle}` alone (no dot) isn't. + expect(parsePreviewBaseDomain("https://{handle}")).toBeNull(); + }); +}); + +describe("extractHandleFromHost", () => { + const base = "preview.decocms.com"; + + it("extracts studio-sb- handles from the matching subdomain", () => { + expect( + extractHandleFromHost("studio-sb-abc123.preview.decocms.com", base), + ).toBe("studio-sb-abc123"); + }); + + it("ignores port suffix in Host header", () => { + expect( + extractHandleFromHost("studio-sb-abc.preview.decocms.com:8080", base), + ).toBe("studio-sb-abc"); + }); + + it("is case-insensitive on host + base", () => { + expect( + extractHandleFromHost("Studio-Sb-ABC.Preview.DecocMs.com", base), + ).toBe("studio-sb-abc"); + }); + + it("returns null when the handle prefix is missing", () => { + expect( + extractHandleFromHost("randomthing.preview.decocms.com", base), + ).toBeNull(); + }); + + it("returns null when the base domain doesn't match", () => { + expect( + extractHandleFromHost("studio-sb-abc.preview.example.org", base), + ).toBeNull(); + }); + + it("rejects nested subdomains", () => { + // foo.studio-sb-abc.preview.decocms.com → strip suffix yields + // "foo.studio-sb-abc" which has a dot → null. + expect( + extractHandleFromHost("foo.studio-sb-abc.preview.decocms.com", base), + ).toBeNull(); + }); + + it("returns null for missing host or base", () => { + expect(extractHandleFromHost(null, base)).toBeNull(); + expect(extractHandleFromHost(undefined, base)).toBeNull(); + expect( + extractHandleFromHost("studio-sb-abc.preview.decocms.com", ""), + ).toBeNull(); + }); +}); + +describe("applyPreviewPattern <-> parse/extract round-trip", () => { + // Walks the contract that applyPreviewPattern (runner) and + // parsePreviewBaseDomain + extractHandleFromHost (preview proxy) are + // inverses. If either side ever supports a pattern shape the other doesn't + // recognize, this test catches the mismatch before it silently misroutes + // production traffic. + const handle = "studio-sb-abc123"; + + const patterns = [ + "https://{handle}.preview.decocms.com", + "https://preview.example.com", + "https://{handle}.preview.example.com/", + "https://stage.example.com", + ]; + + for (const pattern of patterns) { + it(`round-trips: ${pattern}`, () => { + const previewUrl = applyPreviewPatternFixture(pattern, handle); + const url = new URL(previewUrl); + const baseDomain = parsePreviewBaseDomain(pattern); + expect(baseDomain).not.toBeNull(); + const recovered = extractHandleFromHost(url.host, baseDomain!); + expect(recovered).toBe(handle); + }); + } +}); + +describe("tryHandlePreviewHttp", () => { + const baseDomain = "preview.example.com"; + + it("returns null when the host doesn't match a preview URL", async () => { + const req = new Request("https://api.example.com/foo", { + headers: { host: "api.example.com" }, + }); + const res = await tryHandlePreviewHttp(req, { + baseDomain, + getRunner: async () => null, + }); + expect(res).toBeNull(); + }); + + it("returns 503 when the runner isn't configured for K8s", async () => { + const req = new Request("https://studio-sb-abc.preview.example.com/", { + headers: { host: "studio-sb-abc.preview.example.com" }, + }); + const res = await tryHandlePreviewHttp(req, { + baseDomain, + getRunner: async () => null, + }); + expect(res).not.toBeNull(); + expect(res!.status).toBe(503); + }); + + it("delegates to runner.proxyPreviewRequest with the parsed handle", async () => { + let received: { handle: string; req: Request } | null = null; + const fakeRunner = { + proxyPreviewRequest: async (handle: string, req: Request) => { + received = { handle, req }; + return new Response("ok", { status: 200 }); + }, + }; + const req = new Request( + "https://studio-sb-deadbeef.preview.example.com/foo", + { + headers: { host: "studio-sb-deadbeef.preview.example.com" }, + }, + ); + const res = await tryHandlePreviewHttp(req, { + baseDomain, + // biome-ignore lint/suspicious/noExplicitAny: structural duck-type + getRunner: async () => fakeRunner as any, + }); + expect(res).not.toBeNull(); + expect(res!.status).toBe(200); + expect(received).not.toBeNull(); + expect(received!.handle).toBe("studio-sb-deadbeef"); + }); +}); + +describe("tryUpgradePreviewWs", () => { + const baseDomain = "preview.example.com"; + const previewHost = "studio-sb-abc123.preview.example.com"; + + function wsRequest(path: string, host: string = previewHost): Request { + return new Request(`https://${host}${path}`, { + headers: { + host, + upgrade: "websocket", + connection: "upgrade", + "sec-websocket-key": "x3JJHMbDL1EzLkh9GBhXDw==", + "sec-websocket-version": "13", + }, + }); + } + + it("returns null when not a WS upgrade", async () => { + const req = new Request(`https://${previewHost}/`, { + headers: { host: previewHost }, + }); + const res = await tryUpgradePreviewWs( + req, + { upgrade: () => true }, + { baseDomain, getRunner: async () => null }, + ); + expect(res).toBeNull(); + }); + + it("returns null when host doesn't match a preview", async () => { + const req = wsRequest("/", "api.example.com"); + const res = await tryUpgradePreviewWs( + req, + { upgrade: () => true }, + { baseDomain, getRunner: async () => null }, + ); + expect(res).toBeNull(); + }); + + it("returns 503 when the runner isn't ready", async () => { + const req = wsRequest("/"); + const res = await tryUpgradePreviewWs( + req, + { upgrade: () => true }, + { baseDomain, getRunner: async () => null }, + ); + expect(res).not.toBeNull(); + expect((res as Response).status).toBe(503); + }); + + it("returns 404 when sandbox lookup misses", async () => { + const fakeRunner = { + resolvePreviewUpstreamUrl: async () => null, + }; + const req = wsRequest("/"); + const res = await tryUpgradePreviewWs( + req, + { upgrade: () => true }, + { + baseDomain, + // biome-ignore lint/suspicious/noExplicitAny: structural duck-type + getRunner: async () => fakeRunner as any, + }, + ); + expect(res).not.toBeNull(); + expect((res as Response).status).toBe(404); + }); + + it("rejects /_decopilot_vm/* paths even on WS", async () => { + const fakeRunner = { + resolvePreviewUpstreamUrl: async () => "http://x:9000", + }; + const req = wsRequest("/_decopilot_vm/bash"); + const res = await tryUpgradePreviewWs( + req, + { upgrade: () => true }, + { + baseDomain, + // biome-ignore lint/suspicious/noExplicitAny: structural duck-type + getRunner: async () => fakeRunner as any, + }, + ); + expect(res).not.toBeNull(); + expect((res as Response).status).toBe(404); + }); + + it("calls server.upgrade and returns undefined when upgrade succeeds", async () => { + const fakeRunner = { + resolvePreviewUpstreamUrl: async () => "http://upstream:9000", + }; + let upgradeArgs: { req: Request; data: unknown } | null = null; + const server = { + upgrade: (req: Request, opts?: { data?: unknown }) => { + upgradeArgs = { req, data: opts?.data }; + return true; + }, + }; + const req = wsRequest("/__vite-hmr"); + const res = await tryUpgradePreviewWs(req, server, { + baseDomain, + // biome-ignore lint/suspicious/noExplicitAny: structural duck-type + getRunner: async () => fakeRunner as any, + }); + expect(res).toBeUndefined(); + expect(upgradeArgs).not.toBeNull(); + const data = upgradeArgs!.data as { upstreamUrl: string; kind: string }; + expect(data.kind).toBe("preview"); + expect(data.upstreamUrl).toBe("ws://upstream:9000/__vite-hmr"); + }); +}); diff --git a/apps/mesh/src/sandbox/preview-proxy.ts b/apps/mesh/src/sandbox/preview-proxy.ts new file mode 100644 index 0000000000..43d544c815 --- /dev/null +++ b/apps/mesh/src/sandbox/preview-proxy.ts @@ -0,0 +1,340 @@ +/** + * Sandbox preview reverse-proxy. + * + * Inbound requests to `.preview.` are routed to the + * matching sandbox's daemon at port 9000. Mesh stays in the request path + * for the first ship; long-term plan is per-claim HTTPRoute objects (see + * the K8s sandbox plan), but this keeps DNS + RBAC simple while we ship. + * + * Why preview must terminate on port 9000 and never on the in-pod dev port + * (3000): the daemon's reverse proxy strips CSP/X-Frame headers and injects + * the HMR bootstrap that vite needs to function inside the studio iframe. + * Routing browsers straight at the dev port breaks SSE + iframe embedding. + * + * Auth model: preview URLs are open-by-handle, the same way Vercel preview + * URLs are. The handle is the secret. /_decopilot_vm/* is rejected here + * (defense-in-depth — the daemon's bearer-token check rejects it too) so + * the admin surface stays uncallable from preview hosts. + */ + +import { + HANDLE_PREFIX, + type AgentSandboxRunner, +} from "@decocms/sandbox/runner/agent-sandbox"; + +/** + * Cap on frames buffered between client upgrade and upstream WS open. Vite + * HMR sends ~1 frame per file event, so 256 covers a normal cold start with + * room to spare while preventing a slow/blackholed upstream from exhausting + * mesh memory. + */ +const MAX_PENDING_FRAMES = 256; + +/** + * Parses the base preview hostname (e.g. `preview.decocms.com`) out of the + * `STUDIO_SANDBOX_PREVIEW_URL_PATTERN` value. The pattern has the form + * `https://{handle}.preview.example.com` (or `https://{handle}.`), + * matching what the K8s runner's `applyPreviewPattern` produces. Returns + * null when the pattern is empty/missing/malformed — preview proxying is + * disabled in that case. + */ +export function parsePreviewBaseDomain( + pattern: string | null | undefined, +): string | null { + if (!pattern || pattern.trim() === "") return null; + // Substituting a placeholder before parsing handles the `{handle}` form. + // For the non-templated form we still get a valid URL whose hostname is + // the base. + const probe = pattern.includes("{handle}") + ? pattern.replace("{handle}", "__handle__") + : pattern; + let url: URL; + try { + url = new URL(probe); + } catch { + return null; + } + // `__handle__.preview.example.com` → strip the leading subdomain to get the + // base. If there's no leading subdomain segment, the pattern was bad. + const host = url.hostname; + if (pattern.includes("{handle}")) { + const dot = host.indexOf("."); + if (dot <= 0 || dot === host.length - 1) return null; + return host.slice(dot + 1); + } + // Bare-pattern form (no `{handle}`): `https://preview.example.com` — the + // hostname *is* the base. The runner's applyPreviewPattern in this case + // emits `https://.preview.example.com`. + return host; +} + +/** + * Pulls the sandbox handle out of a request Host header. Returns null when + * the host doesn't match `.` or the handle doesn't carry + * the K8s runner's `studio-sb-` prefix (anything else means the request isn't + * for a mesh sandbox preview and should fall through to the rest of the + * mesh API). + */ +export function extractHandleFromHost( + host: string | null | undefined, + baseDomain: string, +): string | null { + if (!host || !baseDomain) return null; + const colon = host.indexOf(":"); + const cleanHost = (colon >= 0 ? host.slice(0, colon) : host).toLowerCase(); + const cleanBase = baseDomain.toLowerCase().replace(/^\.+|\.+$/g, ""); + const suffix = `.${cleanBase}`; + if (!cleanHost.endsWith(suffix)) return null; + const handle = cleanHost.slice(0, cleanHost.length - suffix.length); + // Reject empty / nested subdomains: `foo.bar.preview.example.com` would be + // `foo.bar`, which is not a valid handle. + if (!handle || handle.includes(".")) return null; + if (!handle.startsWith(HANDLE_PREFIX)) return null; + return handle; +} + +export interface PreviewProxyDeps { + /** + * Lazy runner accessor. Returns null when the mesh isn't configured for + * the agent-sandbox runner — the caller treats null as "not a preview + * deployment" and falls through. + */ + getRunner: () => Promise; + baseDomain: string; +} + +/** + * Returns a Response if the request was a preview request (handled here), + * otherwise null (caller should fall through to its normal routing). + * + * 503 is returned when the runner isn't ready yet — preview traffic hit the + * mesh before any sandbox tool initialized the runner. The browser will + * retry; by then the runner should be up. + */ +export async function tryHandlePreviewHttp( + request: Request, + deps: PreviewProxyDeps, +): Promise { + const handle = extractHandleFromHost( + request.headers.get("host"), + deps.baseDomain, + ); + if (!handle) return null; + + const runner = await deps.getRunner(); + if (!runner) { + return errorResponse(503, "preview proxy not configured"); + } + return runner.proxyPreviewRequest(handle, request); +} + +// Cross-origin error envelope. Studio runs under its own origin and reads +// these via fetch (EventSource probeMissing, SSE error frames); without ACAO +// the browser hides the status and devtools surfaces an opaque CORS failure. +function errorResponse(status: number, message: string): Response { + return new Response(JSON.stringify({ error: message }), { + status, + headers: { + "content-type": "application/json", + "access-control-allow-origin": "*", + }, + }); +} + +/** + * WebSocket upgrade payload — Bun's `server.upgrade()` stashes this under + * `ws.data` for the websocket handler to use. Keeping the upstream URL + + * subprotocols here means the handler doesn't need to re-parse the host. + */ +export interface PreviewWsData { + kind: "preview"; + upstreamUrl: string; + upstreamProtocols: string[]; + /** Buffer messages received before the upstream WS finishes opening. */ + pending: Array; + upstream: WebSocket | null; + closed: boolean; +} + +export function isPreviewWsData(data: unknown): data is PreviewWsData { + return ( + typeof data === "object" && + data !== null && + (data as { kind?: unknown }).kind === "preview" + ); +} + +/** + * Bun-specific upgrade interceptor: consumed by the top-level Bun.serve + * fetch handler. Returns: + * - undefined when the request was upgraded (Bun.serve treats this as + * "the response will come from the WS handler later") + * - a Response when the request matched preview but couldn't be upgraded + * (404/502/503), letting the caller return it directly + * - null when the request isn't a preview WS request (caller falls through) + * + * Only handles `Upgrade: websocket` requests. Plain HTTP/SSE goes through + * `tryHandlePreviewHttp` instead. + */ +export async function tryUpgradePreviewWs( + request: Request, + server: BunServerLike, + deps: PreviewProxyDeps, +): Promise { + if ((request.headers.get("upgrade") ?? "").toLowerCase() !== "websocket") { + return null; + } + const handle = extractHandleFromHost( + request.headers.get("host"), + deps.baseDomain, + ); + if (!handle) return null; + + const runner = await deps.getRunner(); + if (!runner) { + return errorResponse(503, "preview proxy not configured"); + } + + const upstreamHttp = await runner.resolvePreviewUpstreamUrl(handle); + if (!upstreamHttp) { + return errorResponse(404, "sandbox not found"); + } + + const reqUrl = new URL(request.url); + if (reqUrl.pathname.startsWith("/_decopilot_vm")) { + return errorResponse(404, "not found"); + } + + const upstreamUrl = `${upstreamHttp.replace(/^http/, "ws")}${reqUrl.pathname}${reqUrl.search}`; + const protocolHeader = request.headers.get("sec-websocket-protocol"); + const upstreamProtocols = protocolHeader + ? protocolHeader + .split(",") + .map((s) => s.trim()) + .filter(Boolean) + : []; + + const data: PreviewWsData = { + kind: "preview", + upstreamUrl, + upstreamProtocols, + pending: [], + upstream: null, + closed: false, + }; + + const upgraded = server.upgrade(request, { data }); + if (!upgraded) { + return errorResponse(426, "upgrade failed"); + } + return undefined; +} + +/** + * Idempotent shutdown for one side of the preview WS bridge. Marks the + * connection as closed (so other event listeners stop forwarding), then + * closes both client and upstream sockets — `try/catch` around each because + * Bun + the WebSocket constructor both throw on close-after-close. + */ +function closePreviewBridge( + ws: PreviewServerWebSocket, + data: PreviewWsData, + code: number, + reason: string, +): void { + if (data.closed) return; + data.closed = true; + try { + ws.close(code, reason); + } catch {} + try { + data.upstream?.close(); + } catch {} +} + +/** + * Bun WebSocket handler for the upgraded preview connection. Pumps frames + * between the browser side (`ws`) and the upstream daemon (`ws.data.upstream`) + * in both directions. Buffers inbound frames received before the upstream + * dial completes — Bun delivers messages on `ws` immediately after upgrade, + * and the upstream WebSocket handshake takes a non-zero number of ticks. + */ +export const previewWebSocketHandler = { + open(ws: PreviewServerWebSocket) { + const data = ws.data; + if (!isPreviewWsData(data)) return; + let upstream: WebSocket; + try { + upstream = + data.upstreamProtocols.length > 0 + ? new WebSocket(data.upstreamUrl, data.upstreamProtocols) + : new WebSocket(data.upstreamUrl); + } catch (err) { + console.warn( + `[preview-ws] failed to dial upstream ${data.upstreamUrl}: ${err instanceof Error ? err.message : String(err)}`, + ); + closePreviewBridge(ws, data, 1011, "upstream connect failed"); + return; + } + upstream.binaryType = "arraybuffer"; + data.upstream = upstream; + + upstream.addEventListener("open", () => { + while (data.pending.length > 0) { + const msg = data.pending.shift(); + if (msg !== undefined) upstream.send(msg); + } + }); + upstream.addEventListener("message", (ev: MessageEvent) => { + if (data.closed) return; + ws.send(ev.data as string | Uint8Array | ArrayBuffer); + }); + upstream.addEventListener("close", (ev: CloseEvent) => { + closePreviewBridge(ws, data, ev.code || 1000, ev.reason || ""); + }); + upstream.addEventListener("error", () => { + closePreviewBridge(ws, data, 1011, "upstream error"); + }); + }, + message( + ws: PreviewServerWebSocket, + message: string | Uint8Array | ArrayBuffer, + ) { + const data = ws.data; + if (!isPreviewWsData(data)) return; + const upstream = data.upstream; + if (upstream && upstream.readyState === WebSocket.OPEN) { + upstream.send(message); + return; + } + // Cap the pre-handshake buffer. A blackholed upstream + a chatty client + // (e.g. vite HMR firing while the daemon is still booting) would otherwise + // grow this without bound. 1011 = "internal error" per RFC 6455. + if (data.pending.length >= MAX_PENDING_FRAMES) { + closePreviewBridge(ws, data, 1011, "preview ws backlog overflow"); + return; + } + data.pending.push(message); + }, + close(ws: PreviewServerWebSocket) { + const data = ws.data; + if (!isPreviewWsData(data)) return; + closePreviewBridge(ws, data, 1000, ""); + }, +}; + +// Minimal structural types to avoid taking a hard dependency on `bun-types` +// in this module. The real Bun.ServerWebSocket / Bun.Server are wider but +// we only touch these members. +export interface PreviewServerWebSocket { + data: PreviewWsData | unknown; + send(data: string | Uint8Array | ArrayBuffer): number; + close(code?: number, reason?: string): void; +} + +export interface BunServerLike { + upgrade( + request: Request, + options?: { data?: unknown; headers?: HeadersInit }, + ): boolean; +} diff --git a/apps/mesh/src/tools/vm/start.test.ts b/apps/mesh/src/tools/vm/start.test.ts index 7258bbb332..52fa226db0 100644 --- a/apps/mesh/src/tools/vm/start.test.ts +++ b/apps/mesh/src/tools/vm/start.test.ts @@ -9,9 +9,9 @@ import type { } from "@decocms/sandbox/runner"; import { composeSandboxRef } from "@decocms/sandbox/runner"; -// Pin runner kind — the dev env flips MESH_SANDBOX_RUNNER and VM_START +// Pin runner kind — the dev env flips STUDIO_SANDBOX_RUNNER and VM_START // reads it at handler time. -process.env.MESH_SANDBOX_RUNNER = "freestyle"; +process.env.STUDIO_SANDBOX_RUNNER = "freestyle"; // Mock runner BEFORE importing VM_START — handler is runner-agnostic // and we don't want to pull the real freestyle SDK. @@ -517,8 +517,8 @@ describe("VM_START", () => { }); it("skips freestyle teardown on runner flip — freestyle idles out on its own", async () => { - const original = process.env.MESH_SANDBOX_RUNNER; - process.env.MESH_SANDBOX_RUNNER = "docker"; + const original = process.env.STUDIO_SANDBOX_RUNNER; + process.env.STUDIO_SANDBOX_RUNNER = "docker"; try { const staleEntry: VmMapEntry = { vmId: "mh3fx1hmxzdz1h1agx4m", @@ -543,8 +543,8 @@ describe("VM_START", () => { expect(result.runnerKind).toBe("docker"); expect(result.isNewVm).toBe(true); } finally { - if (original === undefined) delete process.env.MESH_SANDBOX_RUNNER; - else process.env.MESH_SANDBOX_RUNNER = original; + if (original === undefined) delete process.env.STUDIO_SANDBOX_RUNNER; + else process.env.STUDIO_SANDBOX_RUNNER = original; } }); diff --git a/apps/mesh/src/tools/vm/start.ts b/apps/mesh/src/tools/vm/start.ts index a786ca644a..331865dc46 100644 --- a/apps/mesh/src/tools/vm/start.ts +++ b/apps/mesh/src/tools/vm/start.ts @@ -62,7 +62,7 @@ export const VM_START = defineTool({ vmId: z.string(), branch: z.string(), isNewVm: z.boolean(), - runnerKind: z.enum(["docker", "freestyle"]), + runnerKind: z.enum(["docker", "freestyle", "agent-sandbox"]), }), handler: async (input, ctx) => { @@ -230,6 +230,7 @@ async function provisionSandbox( displayName: `${githubRepo.owner}/${githubRepo.name}`, }, workload, + tenant: { orgId, userId }, }, ); diff --git a/apps/mesh/src/tools/vm/stop.test.ts b/apps/mesh/src/tools/vm/stop.test.ts index 1543083845..36b5284ca4 100644 --- a/apps/mesh/src/tools/vm/stop.test.ts +++ b/apps/mesh/src/tools/vm/stop.test.ts @@ -201,11 +201,11 @@ describe("VM_DELETE", () => { }); // Regression guard for the invariant called out in stop.ts:1–5: a pod that - // flipped MESH_SANDBOX_RUNNER between start and stop must still tear down + // flipped STUDIO_SANDBOX_RUNNER between start and stop must still tear down // the runner that the entry was created against. - it("dispatches on the entry's runnerKind even when MESH_SANDBOX_RUNNER env disagrees", async () => { - const original = process.env.MESH_SANDBOX_RUNNER; - process.env.MESH_SANDBOX_RUNNER = "freestyle"; + it("dispatches on the entry's runnerKind even when STUDIO_SANDBOX_RUNNER env disagrees", async () => { + const original = process.env.STUDIO_SANDBOX_RUNNER; + process.env.STUDIO_SANDBOX_RUNNER = "freestyle"; try { const metadata: Metadata = { vmMap: { "user-1": { [BRANCH]: DOCKER_ENTRY } }, @@ -218,8 +218,8 @@ describe("VM_DELETE", () => { expect(mockDelete).toHaveBeenCalledWith(DOCKER_ENTRY.vmId); expect(lastRequestedKind.value).toBe("docker"); } finally { - if (original === undefined) delete process.env.MESH_SANDBOX_RUNNER; - else process.env.MESH_SANDBOX_RUNNER = original; + if (original === undefined) delete process.env.STUDIO_SANDBOX_RUNNER; + else process.env.STUDIO_SANDBOX_RUNNER = original; } }); diff --git a/apps/mesh/src/tools/vm/stop.ts b/apps/mesh/src/tools/vm/stop.ts index 1175384e87..0640d665af 100644 --- a/apps/mesh/src/tools/vm/stop.ts +++ b/apps/mesh/src/tools/vm/stop.ts @@ -1,6 +1,6 @@ /** * VM_DELETE. Dispatches on the entry's persisted `runnerKind` (not env), - * so a pod that flipped MESH_SANDBOX_RUNNER between start and stop still + * so a pod that flipped STUDIO_SANDBOX_RUNNER between start and stop still * tears down the right kind of VM. */ diff --git a/apps/mesh/src/web/components/vm/env/env.tsx b/apps/mesh/src/web/components/vm/env/env.tsx index a8bd5cbc58..136191f648 100644 --- a/apps/mesh/src/web/components/vm/env/env.tsx +++ b/apps/mesh/src/web/components/vm/env/env.tsx @@ -63,7 +63,7 @@ interface VmData { vmId: string; branch: string; isNewVm: boolean; - runnerKind?: "docker" | "freestyle"; + runnerKind?: "docker" | "freestyle" | "agent-sandbox"; } type ViewStatus = diff --git a/apps/mesh/src/web/components/vm/hooks/use-vm-start.ts b/apps/mesh/src/web/components/vm/hooks/use-vm-start.ts index a80465d33a..45b91a2825 100644 --- a/apps/mesh/src/web/components/vm/hooks/use-vm-start.ts +++ b/apps/mesh/src/web/components/vm/hooks/use-vm-start.ts @@ -34,7 +34,7 @@ export interface VmStartResult { vmId: string; branch: string; isNewVm: boolean; - runnerKind?: "docker" | "freestyle"; + runnerKind?: "docker" | "freestyle" | "agent-sandbox"; } const inflightStarts = new Map>(); diff --git a/bun.lock b/bun.lock index 945c2eeaf0..9a1981bb8f 100644 --- a/bun.lock +++ b/bun.lock @@ -53,7 +53,7 @@ }, "apps/mesh": { "name": "decocms", - "version": "2.276.0", + "version": "2.281.2", "bin": { "deco": "./dist/server/cli.js", }, @@ -272,7 +272,7 @@ }, "packages/runtime": { "name": "@decocms/runtime", - "version": "1.5.0", + "version": "1.6.0", "dependencies": { "@ai-sdk/provider": "^3.0.0", "@cloudflare/workers-types": "^4.20250617.0", @@ -295,6 +295,10 @@ "packages/sandbox": { "name": "@decocms/sandbox", "version": "0.0.1", + "dependencies": { + "@kubernetes/client-node": "^1.4.0", + "@opentelemetry/api": "^1.9.0", + }, "devDependencies": { "@types/bun": "latest", "typescript": "^5.8.3", @@ -495,33 +499,33 @@ "@aws-crypto/util": ["@aws-crypto/util@5.2.0", "", { "dependencies": { "@aws-sdk/types": "^3.222.0", "@smithy/util-utf8": "^2.0.0", "tslib": "^2.6.2" } }, "sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ=="], - "@aws-sdk/client-s3": ["@aws-sdk/client-s3@3.1037.0", "", { "dependencies": { "@aws-crypto/sha1-browser": "5.2.0", "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.974.5", "@aws-sdk/credential-provider-node": "^3.972.36", "@aws-sdk/middleware-bucket-endpoint": "^3.972.10", "@aws-sdk/middleware-expect-continue": "^3.972.10", "@aws-sdk/middleware-flexible-checksums": "^3.974.13", "@aws-sdk/middleware-host-header": "^3.972.10", "@aws-sdk/middleware-location-constraint": "^3.972.10", "@aws-sdk/middleware-logger": "^3.972.10", "@aws-sdk/middleware-recursion-detection": "^3.972.11", "@aws-sdk/middleware-sdk-s3": "^3.972.34", "@aws-sdk/middleware-ssec": "^3.972.10", "@aws-sdk/middleware-user-agent": "^3.972.35", "@aws-sdk/region-config-resolver": "^3.972.13", "@aws-sdk/signature-v4-multi-region": "^3.996.22", "@aws-sdk/types": "^3.973.8", "@aws-sdk/util-endpoints": "^3.996.8", "@aws-sdk/util-user-agent-browser": "^3.972.10", "@aws-sdk/util-user-agent-node": "^3.973.21", "@smithy/config-resolver": "^4.4.17", "@smithy/core": "^3.23.17", "@smithy/eventstream-serde-browser": "^4.2.14", "@smithy/eventstream-serde-config-resolver": "^4.3.14", "@smithy/eventstream-serde-node": "^4.2.14", "@smithy/fetch-http-handler": "^5.3.17", "@smithy/hash-blob-browser": "^4.2.15", "@smithy/hash-node": "^4.2.14", "@smithy/hash-stream-node": "^4.2.14", "@smithy/invalid-dependency": "^4.2.14", "@smithy/md5-js": "^4.2.14", "@smithy/middleware-content-length": "^4.2.14", "@smithy/middleware-endpoint": "^4.4.32", "@smithy/middleware-retry": "^4.5.5", "@smithy/middleware-serde": "^4.2.20", "@smithy/middleware-stack": "^4.2.14", "@smithy/node-config-provider": "^4.3.14", "@smithy/node-http-handler": "^4.6.1", "@smithy/protocol-http": "^5.3.14", "@smithy/smithy-client": "^4.12.13", "@smithy/types": "^4.14.1", "@smithy/url-parser": "^4.2.14", "@smithy/util-base64": "^4.3.2", "@smithy/util-body-length-browser": "^4.2.2", "@smithy/util-body-length-node": "^4.2.3", "@smithy/util-defaults-mode-browser": "^4.3.49", "@smithy/util-defaults-mode-node": "^4.2.54", "@smithy/util-endpoints": "^3.4.2", "@smithy/util-middleware": "^4.2.14", "@smithy/util-retry": "^4.3.4", "@smithy/util-stream": "^4.5.25", "@smithy/util-utf8": "^4.2.2", "@smithy/util-waiter": "^4.2.16", "tslib": "^2.6.2" } }, "sha512-DBmA1jAW8ST6C4srBxeL1/RLIir/d8WOm4s4mi59mGp6mBktHM59Kwb7GuURaCO60cotuce5zr0sKpMLPcBQyA=="], + "@aws-sdk/client-s3": ["@aws-sdk/client-s3@3.1038.0", "", { "dependencies": { "@aws-crypto/sha1-browser": "5.2.0", "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.974.6", "@aws-sdk/credential-provider-node": "^3.972.37", "@aws-sdk/middleware-bucket-endpoint": "^3.972.10", "@aws-sdk/middleware-expect-continue": "^3.972.10", "@aws-sdk/middleware-flexible-checksums": "^3.974.14", "@aws-sdk/middleware-host-header": "^3.972.10", "@aws-sdk/middleware-location-constraint": "^3.972.10", "@aws-sdk/middleware-logger": "^3.972.10", "@aws-sdk/middleware-recursion-detection": "^3.972.11", "@aws-sdk/middleware-sdk-s3": "^3.972.35", "@aws-sdk/middleware-ssec": "^3.972.10", "@aws-sdk/middleware-user-agent": "^3.972.36", "@aws-sdk/region-config-resolver": "^3.972.13", "@aws-sdk/signature-v4-multi-region": "^3.996.23", "@aws-sdk/types": "^3.973.8", "@aws-sdk/util-endpoints": "^3.996.8", "@aws-sdk/util-user-agent-browser": "^3.972.10", "@aws-sdk/util-user-agent-node": "^3.973.22", "@smithy/config-resolver": "^4.4.17", "@smithy/core": "^3.23.17", "@smithy/eventstream-serde-browser": "^4.2.14", "@smithy/eventstream-serde-config-resolver": "^4.3.14", "@smithy/eventstream-serde-node": "^4.2.14", "@smithy/fetch-http-handler": "^5.3.17", "@smithy/hash-blob-browser": "^4.2.15", "@smithy/hash-node": "^4.2.14", "@smithy/hash-stream-node": "^4.2.14", "@smithy/invalid-dependency": "^4.2.14", "@smithy/md5-js": "^4.2.14", "@smithy/middleware-content-length": "^4.2.14", "@smithy/middleware-endpoint": "^4.4.32", "@smithy/middleware-retry": "^4.5.6", "@smithy/middleware-serde": "^4.2.20", "@smithy/middleware-stack": "^4.2.14", "@smithy/node-config-provider": "^4.3.14", "@smithy/node-http-handler": "^4.6.1", "@smithy/protocol-http": "^5.3.14", "@smithy/smithy-client": "^4.12.13", "@smithy/types": "^4.14.1", "@smithy/url-parser": "^4.2.14", "@smithy/util-base64": "^4.3.2", "@smithy/util-body-length-browser": "^4.2.2", "@smithy/util-body-length-node": "^4.2.3", "@smithy/util-defaults-mode-browser": "^4.3.49", "@smithy/util-defaults-mode-node": "^4.2.54", "@smithy/util-endpoints": "^3.4.2", "@smithy/util-middleware": "^4.2.14", "@smithy/util-retry": "^4.3.5", "@smithy/util-stream": "^4.5.25", "@smithy/util-utf8": "^4.2.2", "@smithy/util-waiter": "^4.3.0", "tslib": "^2.6.2" } }, "sha512-k60qm50bWkaqNfCJe1z28WaqgpztE0wbWVMZw6ZJcTOGfrWFhsJeLCEqtkH8w00iEozKx9GQwdQXz4G0sMGdKA=="], - "@aws-sdk/core": ["@aws-sdk/core@3.974.5", "", { "dependencies": { "@aws-sdk/types": "^3.973.8", "@aws-sdk/xml-builder": "^3.972.19", "@smithy/core": "^3.23.17", "@smithy/node-config-provider": "^4.3.14", "@smithy/property-provider": "^4.2.14", "@smithy/protocol-http": "^5.3.14", "@smithy/signature-v4": "^5.3.14", "@smithy/smithy-client": "^4.12.13", "@smithy/types": "^4.14.1", "@smithy/util-base64": "^4.3.2", "@smithy/util-middleware": "^4.2.14", "@smithy/util-retry": "^4.3.4", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-lMPlYlYfQdNZhlkJgnkmESwrY+hNh3PljmZ+37oAqLNdJ6rnILAwFSyc6B3bJeDOtMORNnMQIej0aTRuOlDyhQ=="], + "@aws-sdk/core": ["@aws-sdk/core@3.974.6", "", { "dependencies": { "@aws-sdk/types": "^3.973.8", "@aws-sdk/xml-builder": "^3.972.20", "@smithy/core": "^3.23.17", "@smithy/node-config-provider": "^4.3.14", "@smithy/property-provider": "^4.2.14", "@smithy/protocol-http": "^5.3.14", "@smithy/signature-v4": "^5.3.14", "@smithy/smithy-client": "^4.12.13", "@smithy/types": "^4.14.1", "@smithy/util-base64": "^4.3.2", "@smithy/util-middleware": "^4.2.14", "@smithy/util-retry": "^4.3.5", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-8Vu7zGxu+39ChR/s5J7nXBw3a2kMHAi0OfKT8ohgTVjX0qYed/8mIfdBb638oBmKrWCwwKjYAM5J/4gMJ8nAJA=="], "@aws-sdk/crc64-nvme": ["@aws-sdk/crc64-nvme@3.972.7", "", { "dependencies": { "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-QUagVVBbC8gODCF6e1aV0mE2TXWB9Opz4k8EJFdNrujUVQm5R4AjJa1mpOqzwOuROBzqJU9zawzig7M96L8Ejg=="], - "@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.972.31", "", { "dependencies": { "@aws-sdk/core": "^3.974.5", "@aws-sdk/types": "^3.973.8", "@smithy/property-provider": "^4.2.14", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-X/yGB73LmDW/6MdDJGCDzZBUXnM3ys4vs9l+5ZTJmiEswDdP1OjeoAFlFjVGS9o4KB2wZWQ9KOfdVNSSK6Ep3w=="], + "@aws-sdk/credential-provider-env": ["@aws-sdk/credential-provider-env@3.972.32", "", { "dependencies": { "@aws-sdk/core": "^3.974.6", "@aws-sdk/types": "^3.973.8", "@smithy/property-provider": "^4.2.14", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-7vA4GHg8NSmQxquJHSBcSM3RgB4ZaaRi6u4+zGFKOmOH6aqlgr2Sda46clkZDYzlirgfY96w15Zj0jh6PT48ng=="], - "@aws-sdk/credential-provider-http": ["@aws-sdk/credential-provider-http@3.972.33", "", { "dependencies": { "@aws-sdk/core": "^3.974.5", "@aws-sdk/types": "^3.973.8", "@smithy/fetch-http-handler": "^5.3.17", "@smithy/node-http-handler": "^4.6.1", "@smithy/property-provider": "^4.2.14", "@smithy/protocol-http": "^5.3.14", "@smithy/smithy-client": "^4.12.13", "@smithy/types": "^4.14.1", "@smithy/util-stream": "^4.5.25", "tslib": "^2.6.2" } }, "sha512-c0ZF+lwoWVvX5iCaGKL5T/4DnIw88CGqxA0BcBs3U86mIp5EZYPVg+KSPkMXOyokmADvNewiMUfSG2uFwjRp0g=="], + "@aws-sdk/credential-provider-http": ["@aws-sdk/credential-provider-http@3.972.34", "", { "dependencies": { "@aws-sdk/core": "^3.974.6", "@aws-sdk/types": "^3.973.8", "@smithy/fetch-http-handler": "^5.3.17", "@smithy/node-http-handler": "^4.6.1", "@smithy/property-provider": "^4.2.14", "@smithy/protocol-http": "^5.3.14", "@smithy/smithy-client": "^4.12.13", "@smithy/types": "^4.14.1", "@smithy/util-stream": "^4.5.25", "tslib": "^2.6.2" } }, "sha512-vBrhWujFCLp1u8ptJRWYlipMutzPptb8pDQ00rKVH9q67T7rGd3VTWIj63aKrlLuY6qSsw1Rt5F/D/7wnNgryA=="], - "@aws-sdk/credential-provider-ini": ["@aws-sdk/credential-provider-ini@3.972.35", "", { "dependencies": { "@aws-sdk/core": "^3.974.5", "@aws-sdk/credential-provider-env": "^3.972.31", "@aws-sdk/credential-provider-http": "^3.972.33", "@aws-sdk/credential-provider-login": "^3.972.35", "@aws-sdk/credential-provider-process": "^3.972.31", "@aws-sdk/credential-provider-sso": "^3.972.35", "@aws-sdk/credential-provider-web-identity": "^3.972.35", "@aws-sdk/nested-clients": "^3.997.3", "@aws-sdk/types": "^3.973.8", "@smithy/credential-provider-imds": "^4.2.14", "@smithy/property-provider": "^4.2.14", "@smithy/shared-ini-file-loader": "^4.4.9", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-jsU4u/cRkKFLKQS0k918FQ27fzXLG5ENiLWQMYE6581zLeI2hWh04ptlrvZMB3wJT/5d+vSzJk74X1CMFr4y8Q=="], + "@aws-sdk/credential-provider-ini": ["@aws-sdk/credential-provider-ini@3.972.36", "", { "dependencies": { "@aws-sdk/core": "^3.974.6", "@aws-sdk/credential-provider-env": "^3.972.32", "@aws-sdk/credential-provider-http": "^3.972.34", "@aws-sdk/credential-provider-login": "^3.972.36", "@aws-sdk/credential-provider-process": "^3.972.32", "@aws-sdk/credential-provider-sso": "^3.972.36", "@aws-sdk/credential-provider-web-identity": "^3.972.36", "@aws-sdk/nested-clients": "^3.997.4", "@aws-sdk/types": "^3.973.8", "@smithy/credential-provider-imds": "^4.2.14", "@smithy/property-provider": "^4.2.14", "@smithy/shared-ini-file-loader": "^4.4.9", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-FBHyCmV8EB0gUvh1d+CZm87zt2PrdC7OyWexLRoH3I5zWSOUGa+9t58Y5jbxRfwUp3AWpHAFvKY6YzgR845sVA=="], - "@aws-sdk/credential-provider-login": ["@aws-sdk/credential-provider-login@3.972.35", "", { "dependencies": { "@aws-sdk/core": "^3.974.5", "@aws-sdk/nested-clients": "^3.997.3", "@aws-sdk/types": "^3.973.8", "@smithy/property-provider": "^4.2.14", "@smithy/protocol-http": "^5.3.14", "@smithy/shared-ini-file-loader": "^4.4.9", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-5oa3j0cA50jPqgNhZ9XdJVopuzUf1klRb28/2MfLYWWiPi9DRVvbrBWT+DidbHTT36520VuXZJahQwR+YgSjrg=="], + "@aws-sdk/credential-provider-login": ["@aws-sdk/credential-provider-login@3.972.36", "", { "dependencies": { "@aws-sdk/core": "^3.974.6", "@aws-sdk/nested-clients": "^3.997.4", "@aws-sdk/types": "^3.973.8", "@smithy/property-provider": "^4.2.14", "@smithy/protocol-http": "^5.3.14", "@smithy/shared-ini-file-loader": "^4.4.9", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-IFap01lJKxQc0C/OHmZwZQr/cKq0DhrcmKedRrdnnl42D+P0SImnnnWQjv07uIPqpEdtqmkPXb9TiPYTU+prxQ=="], - "@aws-sdk/credential-provider-node": ["@aws-sdk/credential-provider-node@3.972.36", "", { "dependencies": { "@aws-sdk/credential-provider-env": "^3.972.31", "@aws-sdk/credential-provider-http": "^3.972.33", "@aws-sdk/credential-provider-ini": "^3.972.35", "@aws-sdk/credential-provider-process": "^3.972.31", "@aws-sdk/credential-provider-sso": "^3.972.35", "@aws-sdk/credential-provider-web-identity": "^3.972.35", "@aws-sdk/types": "^3.973.8", "@smithy/credential-provider-imds": "^4.2.14", "@smithy/property-provider": "^4.2.14", "@smithy/shared-ini-file-loader": "^4.4.9", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-4nT2T8Z7vH8KE9EdjEsuIlHpZSlcaK2PrKbQBjuUGU46BCCzF3WvP0u0Uiosni3Ykmmn4rWLVawoOCLotUtCbg=="], + "@aws-sdk/credential-provider-node": ["@aws-sdk/credential-provider-node@3.972.37", "", { "dependencies": { "@aws-sdk/credential-provider-env": "^3.972.32", "@aws-sdk/credential-provider-http": "^3.972.34", "@aws-sdk/credential-provider-ini": "^3.972.36", "@aws-sdk/credential-provider-process": "^3.972.32", "@aws-sdk/credential-provider-sso": "^3.972.36", "@aws-sdk/credential-provider-web-identity": "^3.972.36", "@aws-sdk/types": "^3.973.8", "@smithy/credential-provider-imds": "^4.2.14", "@smithy/property-provider": "^4.2.14", "@smithy/shared-ini-file-loader": "^4.4.9", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-/WFixFAAiw8WpmjZcI0l4t3DerXLmVinOIfuotmRZnu2qmsFPoqqmstASz0z8bi1pGdFXzeLzf6bwucM3mZcUQ=="], - "@aws-sdk/credential-provider-process": ["@aws-sdk/credential-provider-process@3.972.31", "", { "dependencies": { "@aws-sdk/core": "^3.974.5", "@aws-sdk/types": "^3.973.8", "@smithy/property-provider": "^4.2.14", "@smithy/shared-ini-file-loader": "^4.4.9", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-eKeT4MXumpBJsrDLCYcSzIkFPVTFn/es7It2oogp2OhU/ic7P/+xzFpQx9ZhwtXS57Mc5S42BPWi7lHmvs/nYg=="], + "@aws-sdk/credential-provider-process": ["@aws-sdk/credential-provider-process@3.972.32", "", { "dependencies": { "@aws-sdk/core": "^3.974.6", "@aws-sdk/types": "^3.973.8", "@smithy/property-provider": "^4.2.14", "@smithy/shared-ini-file-loader": "^4.4.9", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-uZp4tlGbpczV8QxmtIwOpSkcyGtBRR8/T4BAumRKfAt1nwCig3FSCZvrKl6ARDIDVRYn5p2oRcAsfFR01EgMGA=="], - "@aws-sdk/credential-provider-sso": ["@aws-sdk/credential-provider-sso@3.972.35", "", { "dependencies": { "@aws-sdk/core": "^3.974.5", "@aws-sdk/nested-clients": "^3.997.3", "@aws-sdk/token-providers": "3.1036.0", "@aws-sdk/types": "^3.973.8", "@smithy/property-provider": "^4.2.14", "@smithy/shared-ini-file-loader": "^4.4.9", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-bCuBdfnj0KGDMdLp6utMTLiJcFN2ek9EgZinxQZZSc3FxjJ/HSqeqab2cjbnoNfy8RM6suDCsRkmVY1izp9I+A=="], + "@aws-sdk/credential-provider-sso": ["@aws-sdk/credential-provider-sso@3.972.36", "", { "dependencies": { "@aws-sdk/core": "^3.974.6", "@aws-sdk/nested-clients": "^3.997.4", "@aws-sdk/token-providers": "3.1038.0", "@aws-sdk/types": "^3.973.8", "@smithy/property-provider": "^4.2.14", "@smithy/shared-ini-file-loader": "^4.4.9", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-DsLr0UHMyKzRJKe2bjlwU8q1cfoXg8TIJKV/xwvnalAemiZLOZunFzj/whGnFDZIBVLdnbLiwv5SvRf1+CSwkg=="], - "@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.972.35", "", { "dependencies": { "@aws-sdk/core": "^3.974.5", "@aws-sdk/nested-clients": "^3.997.3", "@aws-sdk/types": "^3.973.8", "@smithy/property-provider": "^4.2.14", "@smithy/shared-ini-file-loader": "^4.4.9", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-swW6Bwvl8lanyEMtZOWE/oR6yqcRQH4HTQZUVsnDVgoXvRjRywpYpLv2BWwjUFyjPrqsdX6FeTkf4tMSe/qFTQ=="], + "@aws-sdk/credential-provider-web-identity": ["@aws-sdk/credential-provider-web-identity@3.972.36", "", { "dependencies": { "@aws-sdk/core": "^3.974.6", "@aws-sdk/nested-clients": "^3.997.4", "@aws-sdk/types": "^3.973.8", "@smithy/property-provider": "^4.2.14", "@smithy/shared-ini-file-loader": "^4.4.9", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-uzrURO7frJhHQVVNR5zBJcCYeMYflmXcWBK1+MiBym2Dfjh6nXATrMixrmGZi+97Q7ETZ+y/4lUwAy0Nfnznjw=="], "@aws-sdk/middleware-bucket-endpoint": ["@aws-sdk/middleware-bucket-endpoint@3.972.10", "", { "dependencies": { "@aws-sdk/types": "^3.973.8", "@aws-sdk/util-arn-parser": "^3.972.3", "@smithy/node-config-provider": "^4.3.14", "@smithy/protocol-http": "^5.3.14", "@smithy/types": "^4.14.1", "@smithy/util-config-provider": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-Vbc2frZH7wXlMNd+ZZSXUEs/l1Sv8Jj4zUnIfwrYF5lwaLdXHZ9xx4U3rjUcaye3HRhFVc+E5DbBxpRAbB16BA=="], "@aws-sdk/middleware-expect-continue": ["@aws-sdk/middleware-expect-continue@3.972.10", "", { "dependencies": { "@aws-sdk/types": "^3.973.8", "@smithy/protocol-http": "^5.3.14", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-2Yn0f1Qiq/DjxYR3wfI3LokXnjOhFM7Ssn4LTdFDIxRMCE6I32MAsVnhPX1cUZsuVA9tiZtwwhlSLAtFGxAZlQ=="], - "@aws-sdk/middleware-flexible-checksums": ["@aws-sdk/middleware-flexible-checksums@3.974.13", "", { "dependencies": { "@aws-crypto/crc32": "5.2.0", "@aws-crypto/crc32c": "5.2.0", "@aws-crypto/util": "5.2.0", "@aws-sdk/core": "^3.974.5", "@aws-sdk/crc64-nvme": "^3.972.7", "@aws-sdk/types": "^3.973.8", "@smithy/is-array-buffer": "^4.2.2", "@smithy/node-config-provider": "^4.3.14", "@smithy/protocol-http": "^5.3.14", "@smithy/types": "^4.14.1", "@smithy/util-middleware": "^4.2.14", "@smithy/util-stream": "^4.5.25", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-b6QUe2hQX9XsnCzp6mtzVaERhganDKeb8lmGL6pVhr7rRVH9S9keDFW7uKytuuqmcY5943FixoGqn/QL+sbUBA=="], + "@aws-sdk/middleware-flexible-checksums": ["@aws-sdk/middleware-flexible-checksums@3.974.14", "", { "dependencies": { "@aws-crypto/crc32": "5.2.0", "@aws-crypto/crc32c": "5.2.0", "@aws-crypto/util": "5.2.0", "@aws-sdk/core": "^3.974.6", "@aws-sdk/crc64-nvme": "^3.972.7", "@aws-sdk/types": "^3.973.8", "@smithy/is-array-buffer": "^4.2.2", "@smithy/node-config-provider": "^4.3.14", "@smithy/protocol-http": "^5.3.14", "@smithy/types": "^4.14.1", "@smithy/util-middleware": "^4.2.14", "@smithy/util-stream": "^4.5.25", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-mhTO3amGzYv/DQNbbqZo6UkHquBHlEEVRZwXmjeRqLmy1l9z3xCiFzglPL7n9JpVc2DZc9kjaraAn3JQrueZbw=="], "@aws-sdk/middleware-host-header": ["@aws-sdk/middleware-host-header@3.972.10", "", { "dependencies": { "@aws-sdk/types": "^3.973.8", "@smithy/protocol-http": "^5.3.14", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-IJSsIMeVQ8MMCPbuh1AbltkFhLBLXn7aejzfX5YKT/VLDHn++Dcz8886tXckE+wQssyPUhaXrJhdakO2VilRhg=="], @@ -531,21 +535,21 @@ "@aws-sdk/middleware-recursion-detection": ["@aws-sdk/middleware-recursion-detection@3.972.11", "", { "dependencies": { "@aws-sdk/types": "^3.973.8", "@aws/lambda-invoke-store": "^0.2.2", "@smithy/protocol-http": "^5.3.14", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-+zz6f79Kj9V5qFK2P+D8Ehjnw4AhphAlCAsPjUqEcInA9umtSSKMrHbSagEeOIsDNuvVrH98bjRHcyQukTrhaQ=="], - "@aws-sdk/middleware-sdk-s3": ["@aws-sdk/middleware-sdk-s3@3.972.34", "", { "dependencies": { "@aws-sdk/core": "^3.974.5", "@aws-sdk/types": "^3.973.8", "@aws-sdk/util-arn-parser": "^3.972.3", "@smithy/core": "^3.23.17", "@smithy/node-config-provider": "^4.3.14", "@smithy/protocol-http": "^5.3.14", "@smithy/signature-v4": "^5.3.14", "@smithy/smithy-client": "^4.12.13", "@smithy/types": "^4.14.1", "@smithy/util-config-provider": "^4.2.2", "@smithy/util-middleware": "^4.2.14", "@smithy/util-stream": "^4.5.25", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-/UL96JKjsjdodcRRMKl99tLQvK6Oi9ptLC9iU1yiTF/ruaDX0mtBBtnLNZDxIZRJOCVOtB49ed1YaTadqygk8Q=="], + "@aws-sdk/middleware-sdk-s3": ["@aws-sdk/middleware-sdk-s3@3.972.35", "", { "dependencies": { "@aws-sdk/core": "^3.974.6", "@aws-sdk/types": "^3.973.8", "@aws-sdk/util-arn-parser": "^3.972.3", "@smithy/core": "^3.23.17", "@smithy/node-config-provider": "^4.3.14", "@smithy/protocol-http": "^5.3.14", "@smithy/signature-v4": "^5.3.14", "@smithy/smithy-client": "^4.12.13", "@smithy/types": "^4.14.1", "@smithy/util-config-provider": "^4.2.2", "@smithy/util-middleware": "^4.2.14", "@smithy/util-stream": "^4.5.25", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-lLppaNTAz+wNgLdi4FtHzrlwrGF0ODTnBWHBaFg85SKs0eJ+M+tP5ifrA8f/0lNd+Ak3MC1NGC6RavV3ny4HTg=="], "@aws-sdk/middleware-ssec": ["@aws-sdk/middleware-ssec@3.972.10", "", { "dependencies": { "@aws-sdk/types": "^3.973.8", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-Gli9A0u8EVVb+5bFDGS/QbSVg28w/wpEidg1ggVcSj65BDTdGR6punsOcVjqdiu1i42WHWo51MCvARPIIz9juw=="], - "@aws-sdk/middleware-user-agent": ["@aws-sdk/middleware-user-agent@3.972.35", "", { "dependencies": { "@aws-sdk/core": "^3.974.5", "@aws-sdk/types": "^3.973.8", "@aws-sdk/util-endpoints": "^3.996.8", "@smithy/core": "^3.23.17", "@smithy/protocol-http": "^5.3.14", "@smithy/types": "^4.14.1", "@smithy/util-retry": "^4.3.4", "tslib": "^2.6.2" } }, "sha512-hOFWNOjVmOocpRlrU04nYxjMOeoe0Obu5AXEuhB8zblMCPl3cG1hdluQCZERRKFyhMQjwZnDbhSHjoMUjetFGw=="], + "@aws-sdk/middleware-user-agent": ["@aws-sdk/middleware-user-agent@3.972.36", "", { "dependencies": { "@aws-sdk/core": "^3.974.6", "@aws-sdk/types": "^3.973.8", "@aws-sdk/util-endpoints": "^3.996.8", "@smithy/core": "^3.23.17", "@smithy/protocol-http": "^5.3.14", "@smithy/types": "^4.14.1", "@smithy/util-retry": "^4.3.5", "tslib": "^2.6.2" } }, "sha512-O2beToxguBvrZFFZ+fFgPbbae8MvyIBjQ6lImee4APHEXXNAD5ZJ2ayLF1mb7rsKw86TM81y5czg82bZncjSjg=="], - "@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.997.3", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.974.5", "@aws-sdk/middleware-host-header": "^3.972.10", "@aws-sdk/middleware-logger": "^3.972.10", "@aws-sdk/middleware-recursion-detection": "^3.972.11", "@aws-sdk/middleware-user-agent": "^3.972.35", "@aws-sdk/region-config-resolver": "^3.972.13", "@aws-sdk/signature-v4-multi-region": "^3.996.22", "@aws-sdk/types": "^3.973.8", "@aws-sdk/util-endpoints": "^3.996.8", "@aws-sdk/util-user-agent-browser": "^3.972.10", "@aws-sdk/util-user-agent-node": "^3.973.21", "@smithy/config-resolver": "^4.4.17", "@smithy/core": "^3.23.17", "@smithy/fetch-http-handler": "^5.3.17", "@smithy/hash-node": "^4.2.14", "@smithy/invalid-dependency": "^4.2.14", "@smithy/middleware-content-length": "^4.2.14", "@smithy/middleware-endpoint": "^4.4.32", "@smithy/middleware-retry": "^4.5.5", "@smithy/middleware-serde": "^4.2.20", "@smithy/middleware-stack": "^4.2.14", "@smithy/node-config-provider": "^4.3.14", "@smithy/node-http-handler": "^4.6.1", "@smithy/protocol-http": "^5.3.14", "@smithy/smithy-client": "^4.12.13", "@smithy/types": "^4.14.1", "@smithy/url-parser": "^4.2.14", "@smithy/util-base64": "^4.3.2", "@smithy/util-body-length-browser": "^4.2.2", "@smithy/util-body-length-node": "^4.2.3", "@smithy/util-defaults-mode-browser": "^4.3.49", "@smithy/util-defaults-mode-node": "^4.2.54", "@smithy/util-endpoints": "^3.4.2", "@smithy/util-middleware": "^4.2.14", "@smithy/util-retry": "^4.3.4", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-SivE6GP228IVgfsrr2c/vqTg95X0Qj39Yw4uIrcddpkUzIltNMoNOR62leHOLhODfjv9K8X2mPTwS69A5kT0nQ=="], + "@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.997.4", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.974.6", "@aws-sdk/middleware-host-header": "^3.972.10", "@aws-sdk/middleware-logger": "^3.972.10", "@aws-sdk/middleware-recursion-detection": "^3.972.11", "@aws-sdk/middleware-user-agent": "^3.972.36", "@aws-sdk/region-config-resolver": "^3.972.13", "@aws-sdk/signature-v4-multi-region": "^3.996.23", "@aws-sdk/types": "^3.973.8", "@aws-sdk/util-endpoints": "^3.996.8", "@aws-sdk/util-user-agent-browser": "^3.972.10", "@aws-sdk/util-user-agent-node": "^3.973.22", "@smithy/config-resolver": "^4.4.17", "@smithy/core": "^3.23.17", "@smithy/fetch-http-handler": "^5.3.17", "@smithy/hash-node": "^4.2.14", "@smithy/invalid-dependency": "^4.2.14", "@smithy/middleware-content-length": "^4.2.14", "@smithy/middleware-endpoint": "^4.4.32", "@smithy/middleware-retry": "^4.5.6", "@smithy/middleware-serde": "^4.2.20", "@smithy/middleware-stack": "^4.2.14", "@smithy/node-config-provider": "^4.3.14", "@smithy/node-http-handler": "^4.6.1", "@smithy/protocol-http": "^5.3.14", "@smithy/smithy-client": "^4.12.13", "@smithy/types": "^4.14.1", "@smithy/url-parser": "^4.2.14", "@smithy/util-base64": "^4.3.2", "@smithy/util-body-length-browser": "^4.2.2", "@smithy/util-body-length-node": "^4.2.3", "@smithy/util-defaults-mode-browser": "^4.3.49", "@smithy/util-defaults-mode-node": "^4.2.54", "@smithy/util-endpoints": "^3.4.2", "@smithy/util-middleware": "^4.2.14", "@smithy/util-retry": "^4.3.5", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-4Sf+WY1lMJzXlw5MiyCMe/UzdILCwvuaHThbqMXS6dfh9gZy3No360I42RXquOI/ULUOhWy2HCyU0Fp20fQGPQ=="], "@aws-sdk/region-config-resolver": ["@aws-sdk/region-config-resolver@3.972.13", "", { "dependencies": { "@aws-sdk/types": "^3.973.8", "@smithy/config-resolver": "^4.4.17", "@smithy/node-config-provider": "^4.3.14", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-CvJ2ZIjK/jVD/lbOpowBVElJyC1YxLTIJ13yM0AEo0t2v7swOzGjSA6lJGH+DwZXQhcjUjoYwc8bVYCX5MDr1A=="], - "@aws-sdk/s3-request-presigner": ["@aws-sdk/s3-request-presigner@3.1037.0", "", { "dependencies": { "@aws-sdk/signature-v4-multi-region": "^3.996.22", "@aws-sdk/types": "^3.973.8", "@aws-sdk/util-format-url": "^3.972.10", "@smithy/middleware-endpoint": "^4.4.32", "@smithy/protocol-http": "^5.3.14", "@smithy/smithy-client": "^4.12.13", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-rZQS8DxrqPYXzOvaoysf6L4fHmgFbndZz3GfUMhlHG1iWmcQqH7v0AGhpjyNBY3cYAX8+CAkOkD4VUrntnHNbQ=="], + "@aws-sdk/s3-request-presigner": ["@aws-sdk/s3-request-presigner@3.1038.0", "", { "dependencies": { "@aws-sdk/signature-v4-multi-region": "^3.996.23", "@aws-sdk/types": "^3.973.8", "@aws-sdk/util-format-url": "^3.972.10", "@smithy/middleware-endpoint": "^4.4.32", "@smithy/protocol-http": "^5.3.14", "@smithy/smithy-client": "^4.12.13", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-2PNCm+2Mx8v2GKRREKMS3PavahzRhmMMJjuJxUpLneQV4w3oMs2bpme62oU6l+hip1pyeyPimWHeabjhaURocw=="], - "@aws-sdk/signature-v4-multi-region": ["@aws-sdk/signature-v4-multi-region@3.996.22", "", { "dependencies": { "@aws-sdk/middleware-sdk-s3": "^3.972.34", "@aws-sdk/types": "^3.973.8", "@smithy/protocol-http": "^5.3.14", "@smithy/signature-v4": "^5.3.14", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-/rXhMXteD+BqhFd0nYprAgcZ/KtU+963uftPqd3tiFcFfooHZINXUGtOmo2SQjRVauCTNqIEzkwuSETdZFqTTA=="], + "@aws-sdk/signature-v4-multi-region": ["@aws-sdk/signature-v4-multi-region@3.996.23", "", { "dependencies": { "@aws-sdk/middleware-sdk-s3": "^3.972.35", "@aws-sdk/types": "^3.973.8", "@smithy/protocol-http": "^5.3.14", "@smithy/signature-v4": "^5.3.14", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-wBbys3Y53Ikly556vyADurKpYQHXS7Jjaskbz+Ga9PZCz7PB/9f3VdKbDlz7dqIzn+xwz7L/a6TR4iXcOi8IRw=="], - "@aws-sdk/token-providers": ["@aws-sdk/token-providers@3.1036.0", "", { "dependencies": { "@aws-sdk/core": "^3.974.5", "@aws-sdk/nested-clients": "^3.997.3", "@aws-sdk/types": "^3.973.8", "@smithy/property-provider": "^4.2.14", "@smithy/shared-ini-file-loader": "^4.4.9", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-aNSJ6jjDYayxN9ZA1JpycVScX93Lx03kKZ1EXt3DGOTahcWVLJj3oLAlop0xKP+vP2Ga2t49p1tEaMkTbCCaZA=="], + "@aws-sdk/token-providers": ["@aws-sdk/token-providers@3.1038.0", "", { "dependencies": { "@aws-sdk/core": "^3.974.6", "@aws-sdk/nested-clients": "^3.997.4", "@aws-sdk/types": "^3.973.8", "@smithy/property-provider": "^4.2.14", "@smithy/shared-ini-file-loader": "^4.4.9", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-Qniru+9oGGb/HNK/gGZWbV3jsD0k71ngE7qMQ/x6gYNYLd2EOwHCS6E2E6jfkaqO4i0d+nNKmfRy8bNcshKdGQ=="], "@aws-sdk/types": ["@aws-sdk/types@3.973.8", "", { "dependencies": { "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-gjlAdtHMbtR9X5iIhVUvbVcy55KnznpC6bkDUWW9z915bi0ckdUr5cjf16Kp6xq0bP5HBD2xzgbL9F9Quv5vUw=="], @@ -559,9 +563,9 @@ "@aws-sdk/util-user-agent-browser": ["@aws-sdk/util-user-agent-browser@3.972.10", "", { "dependencies": { "@aws-sdk/types": "^3.973.8", "@smithy/types": "^4.14.1", "bowser": "^2.11.0", "tslib": "^2.6.2" } }, "sha512-FAzqXvfEssGdSIz8ejatan0bOdx1qefBWKF/gWmVBXIP1HkS7v/wjjaqrAGGKvyihrXTXW00/2/1nTJtxpXz7g=="], - "@aws-sdk/util-user-agent-node": ["@aws-sdk/util-user-agent-node@3.973.21", "", { "dependencies": { "@aws-sdk/middleware-user-agent": "^3.972.35", "@aws-sdk/types": "^3.973.8", "@smithy/node-config-provider": "^4.3.14", "@smithy/types": "^4.14.1", "@smithy/util-config-provider": "^4.2.2", "tslib": "^2.6.2" }, "peerDependencies": { "aws-crt": ">=1.0.0" }, "optionalPeers": ["aws-crt"] }, "sha512-Av4UHTcAWgdvbN0IP9pbtf4Qa1+6LtJqQdZWj5pLn5J67w0pnJJAZZ+7JPPcj2KN3378zD2JDM9DwJKEyvyMTQ=="], + "@aws-sdk/util-user-agent-node": ["@aws-sdk/util-user-agent-node@3.973.22", "", { "dependencies": { "@aws-sdk/middleware-user-agent": "^3.972.36", "@aws-sdk/types": "^3.973.8", "@smithy/node-config-provider": "^4.3.14", "@smithy/types": "^4.14.1", "@smithy/util-config-provider": "^4.2.2", "tslib": "^2.6.2" }, "peerDependencies": { "aws-crt": ">=1.0.0" }, "optionalPeers": ["aws-crt"] }, "sha512-YTYqTmOUrwbm1h99Ee4y/mVYpFRl0oSO/amtP5cc1BZZWdaAVWs9zj3TkyRHWvR9aI/ZS8m3mS6awXtYUlWyaw=="], - "@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.19", "", { "dependencies": { "@smithy/types": "^4.14.1", "fast-xml-parser": "5.7.1", "tslib": "^2.6.2" } }, "sha512-Cw8IOMdBUEIl8ZlhRC3Dc/E64D5B5/8JhV6vhPLiPfJwcRC84S6F8aBOIi/N4vR9ZyA4I5Cc0Ateb/9EHaJXeQ=="], + "@aws-sdk/xml-builder": ["@aws-sdk/xml-builder@3.972.20", "", { "dependencies": { "@nodable/entities": "2.1.0", "@smithy/types": "^4.14.1", "fast-xml-parser": "5.7.2", "tslib": "^2.6.2" } }, "sha512-MDcUfroaMAnDAHn29vN781t0wudR8zjfgg+r3s5otx8TJXFWg01NZB7HvHkBbOf7UUmKEwIZf5kHxiaVUgwjlQ=="], "@aws/lambda-invoke-store": ["@aws/lambda-invoke-store@0.2.4", "", {}, "sha512-iY8yvjE0y651BixKNPgmv1WrQc+GZ142sb0z4gYnChDDY2YqI4P/jsSopBWrKfAt7LOJAkOXt7rC/hms+WclQQ=="], @@ -881,13 +885,13 @@ "@inkjs/ui": ["@inkjs/ui@2.0.0", "", { "dependencies": { "chalk": "^5.3.0", "cli-spinners": "^3.0.0", "deepmerge": "^4.3.1", "figures": "^6.1.0" }, "peerDependencies": { "ink": ">=5" } }, "sha512-5+8fJmwtF9UvikzLfph9sA+LS+l37Ij/szQltkuXLOAXwNkBX9innfzh4pLGXIB59vKEQUtc6D4qGvhD7h3pAg=="], - "@instantdb/core": ["@instantdb/core@1.0.17", "", { "dependencies": { "@instantdb/version": "1.0.17", "mutative": "^1.0.10", "uuid": "^11.1.0" } }, "sha512-merdcc4g91ZLTIRH6juI2jWODRlZuLbGssba6Xv+L3Xxg9tR9dtabr+nXEgUbPNZPORe6L0cEP5pKEGzU/FYuQ=="], + "@instantdb/core": ["@instantdb/core@1.0.20", "", { "dependencies": { "@instantdb/version": "1.0.20", "mutative": "^1.0.10", "uuid": "^11.1.0" } }, "sha512-bh/VZftslcvYQzM70Ik4HC3kyUouDO8NQknxTdhogqBYHQHv1JV/Aj08l5aaajmNQD+ZiIwHP2SRqUO1NbSgTA=="], - "@instantdb/react": ["@instantdb/react@1.0.17", "", { "dependencies": { "@instantdb/core": "1.0.17", "@instantdb/react-common": "1.0.17", "@instantdb/version": "1.0.17", "eventsource": "^4.0.0" }, "peerDependencies": { "react": ">=16" } }, "sha512-fWZ/TfAd3/4iUS5HvjslF1a5tz73Q4HdwUak+GW2sEOTdrYYDRFivqRFNQdm5w50uyfhDZh07eNxz0EGwmMMdA=="], + "@instantdb/react": ["@instantdb/react@1.0.20", "", { "dependencies": { "@instantdb/core": "1.0.20", "@instantdb/react-common": "1.0.20", "@instantdb/version": "1.0.20", "eventsource": "^4.0.0" }, "peerDependencies": { "react": ">=16" } }, "sha512-KnK8uniMgCPLenybLAri5NI1r2p+TrT3doyxMExbVvlUITb/HHIaqtMuS7uUkOowOMMAdkstSf5FiV8SRcHkLw=="], - "@instantdb/react-common": ["@instantdb/react-common@1.0.17", "", { "dependencies": { "@instantdb/core": "1.0.17", "@instantdb/version": "1.0.17" }, "peerDependencies": { "react": ">=16" } }, "sha512-knB7jq8YV7YQv84FFeuUmayXTFB0e0aJdYPCYsUZFfsEhGKMWEKDr9XwZ59DoHbzZXRb+Nqfpln00tb7l6pHOA=="], + "@instantdb/react-common": ["@instantdb/react-common@1.0.20", "", { "dependencies": { "@instantdb/core": "1.0.20", "@instantdb/version": "1.0.20" }, "peerDependencies": { "react": ">=16" } }, "sha512-AmZQtF/JPEkJ8CcLsPFl4KH6Gb7oi/K7NCW4ESM2FYnbtHHZUKdq0XAUw2jSVcJ1hTW6FJNdxldvS30RfSj71g=="], - "@instantdb/version": ["@instantdb/version@1.0.17", "", {}, "sha512-9MXsyNYmwH7kUBZLFpMmv5fJgFjKQHZXfDJvEJsRl+ssumnroILAI3fvHWF44yPiR5eZvYxKLHvPE0IttovyKw=="], + "@instantdb/version": ["@instantdb/version@1.0.20", "", {}, "sha512-WuoEBXO01Bnhnk+uCbWliugnas0ny6c9Xx+dSdC5/P9fb3TGinVltfWxu1YxwnbMz3IN2jlMOmqNCdlFfrc8/g=="], "@isaacs/fs-minipass": ["@isaacs/fs-minipass@4.0.1", "", { "dependencies": { "minipass": "^7.0.4" } }, "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w=="], @@ -909,6 +913,12 @@ "@jsdevtools/ono": ["@jsdevtools/ono@7.1.3", "", {}, "sha512-4JQNk+3mVzK3xh2rqd6RB4J46qUR19azEHBneZyTZM+c456qOrbbM/5xcR8huNCCcbVt7+UmizG6GuUvPvKUYg=="], + "@jsep-plugin/assignment": ["@jsep-plugin/assignment@1.3.0", "", { "peerDependencies": { "jsep": "^0.4.0||^1.0.0" } }, "sha512-VVgV+CXrhbMI3aSusQyclHkenWSAm95WaiKrMxRFam3JSUiIaQjoMIw2sEs/OX4XifnqeQUN4DYbJjlA8EfktQ=="], + + "@jsep-plugin/regex": ["@jsep-plugin/regex@1.0.4", "", { "peerDependencies": { "jsep": "^0.4.0||^1.0.0" } }, "sha512-q7qL4Mgjs1vByCaTnDFcBnV9HS7GVPJX5vyVoCgZHNSC9rjwIlmbXG5sUuorR5ndfHAIlJ8pVStxvjXHbNvtUg=="], + + "@kubernetes/client-node": ["@kubernetes/client-node@1.4.0", "", { "dependencies": { "@types/js-yaml": "^4.0.1", "@types/node": "^24.0.0", "@types/node-fetch": "^2.6.13", "@types/stream-buffers": "^3.0.3", "form-data": "^4.0.0", "hpagent": "^1.2.0", "isomorphic-ws": "^5.0.0", "js-yaml": "^4.1.0", "jsonpath-plus": "^10.3.0", "node-fetch": "^2.7.0", "openid-client": "^6.1.3", "rfc4648": "^1.3.0", "socks-proxy-agent": "^8.0.4", "stream-buffers": "^3.0.2", "tar-fs": "^3.0.9", "ws": "^8.18.2" } }, "sha512-Zge3YvF7DJi264dU1b3wb/GmzR99JhUpqTvp+VGHfwZT+g7EOOYNScDJNZwXy9cszyIGPIs0VHr+kk8e95qqrA=="], + "@levischuck/tiny-cbor": ["@levischuck/tiny-cbor@0.2.11", "", {}, "sha512-llBRm4dT4Z89aRsm6u2oEZ8tfwL/2l6BwpZ7JcyieouniDECM5AqNgr/y08zalEIvW3RSK4upYyybDcmjXqAow=="], "@mapbox/node-pre-gyp": ["@mapbox/node-pre-gyp@2.0.3", "", { "dependencies": { "consola": "^3.2.3", "detect-libc": "^2.0.0", "https-proxy-agent": "^7.0.5", "node-fetch": "^2.6.7", "nopt": "^8.0.0", "semver": "^7.5.3", "tar": "^7.4.0" }, "bin": { "node-pre-gyp": "bin/node-pre-gyp" } }, "sha512-uwPAhccfFJlsfCxMYTwOdVfOz3xqyj8xYL3zJj8f0pb30tLohnnFPhLuqp4/qoEz8sNxe4SESZedcBojRefIzg=="], @@ -917,7 +927,7 @@ "@mdx-js/mdx": ["@mdx-js/mdx@3.1.1", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdx": "^2.0.0", "acorn": "^8.0.0", "collapse-white-space": "^2.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "estree-util-scope": "^1.0.0", "estree-walker": "^3.0.0", "hast-util-to-jsx-runtime": "^2.0.0", "markdown-extensions": "^2.0.0", "recma-build-jsx": "^1.0.0", "recma-jsx": "^1.0.0", "recma-stringify": "^1.0.0", "rehype-recma": "^1.0.0", "remark-mdx": "^3.0.0", "remark-parse": "^11.0.0", "remark-rehype": "^11.0.0", "source-map": "^0.7.0", "unified": "^11.0.0", "unist-util-position-from-estree": "^2.0.0", "unist-util-stringify-position": "^4.0.0", "unist-util-visit": "^5.0.0", "vfile": "^6.0.0" } }, "sha512-f6ZO2ifpwAQIpzGWaBQT2TXxPv6z3RBzQKpVftEWN78Vl/YweF1uwussDx8ECAXVtr3Rs89fKyG9YlzUs9DyGQ=="], - "@modelcontextprotocol/ext-apps": ["@modelcontextprotocol/ext-apps@1.7.0", "", { "dependencies": { "@standard-schema/spec": "^1.1.0" }, "peerDependencies": { "@modelcontextprotocol/sdk": "^1.29.0", "react": "^17.0.0 || ^18.0.0 || ^19.0.0", "react-dom": "^17.0.0 || ^18.0.0 || ^19.0.0", "zod": "^3.25.0 || ^4.0.0" }, "optionalPeers": ["react", "react-dom"] }, "sha512-gs8rYVx6a8pyCvSpXq7TyVLTERCC94JLrcmJgBs0+3p4jp3iQdJPu1IU+2ovVdFZ1sW8JgmvTkRnxAlIizKINg=="], + "@modelcontextprotocol/ext-apps": ["@modelcontextprotocol/ext-apps@1.7.1", "", { "dependencies": { "@standard-schema/spec": "^1.1.0" }, "peerDependencies": { "@modelcontextprotocol/sdk": "^1.29.0", "react": "^17.0.0 || ^18.0.0 || ^19.0.0", "react-dom": "^17.0.0 || ^18.0.0 || ^19.0.0", "zod": "^3.25.0 || ^4.0.0" }, "optionalPeers": ["react", "react-dom"] }, "sha512-J3WdG1A4JSSKnSWKyU+895dBVYBV2Utgtf7fUsUK45mlkETm53a/1DR6Pm3hUGKqLLQthZLmpxOg8VPzJi/lyg=="], "@modelcontextprotocol/sdk": ["@modelcontextprotocol/sdk@1.27.1", "", { "dependencies": { "@hono/node-server": "^1.19.9", "ajv": "^8.17.1", "ajv-formats": "^3.0.1", "content-type": "^1.0.5", "cors": "^2.8.5", "cross-spawn": "^7.0.5", "eventsource": "^3.0.2", "eventsource-parser": "^3.0.0", "express": "^5.2.1", "express-rate-limit": "^8.2.1", "hono": "^4.11.4", "jose": "^6.1.3", "json-schema-typed": "^8.0.2", "pkce-challenge": "^5.0.0", "raw-body": "^3.0.0", "zod": "^3.25 || ^4.0", "zod-to-json-schema": "^3.25.1" }, "peerDependencies": { "@cfworker/json-schema": "^4.1.1" }, "optionalPeers": ["@cfworker/json-schema"] }, "sha512-sr6GbP+4edBwFndLbM60gf07z0FQ79gaExpnsjMGePXqFcSSb7t6iscpjk9DhFhwd+mTEQrzNafGP8/iGGFYaA=="], @@ -931,6 +941,8 @@ "@noble/hashes": ["@noble/hashes@2.2.0", "", {}, "sha512-IYqDGiTXab6FniAgnSdZwgWbomxpy9FtYvLKs7wCUs2a8RkITG+DFGO1DM9cr+E3/RgADRpFjrKVaJ1z6sjtEg=="], + "@nodable/entities": ["@nodable/entities@2.1.0", "", {}, "sha512-nyT7T3nbMyBI/lvr6L5TyWbFJAI9FTgVRakNoBqCD+PmID8DzFrrNdLLtHMwMszOtqZa8PAOV24ZqDnQrhQINA=="], + "@nodelib/fs.scandir": ["@nodelib/fs.scandir@2.1.5", "", { "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" } }, "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g=="], "@nodelib/fs.stat": ["@nodelib/fs.stat@2.0.5", "", {}, "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A=="], @@ -1133,15 +1145,15 @@ "@poppinss/exception": ["@poppinss/exception@1.2.3", "", {}, "sha512-dCED+QRChTVatE9ibtoaxc+WkdzOSjYTKi/+uacHWIsfodVfpsueo3+DKpgU5Px8qXjgmXkSvhXvSCz3fnP9lw=="], - "@posthog/core": ["@posthog/core@1.27.6", "", { "dependencies": { "@posthog/types": "1.372.2" } }, "sha512-FjvgPdORywAjgjtgkZJ2/x9ED52jtOJym/RVldY4Oa7wzmlY49rxZm8gvOlocEnjP90bSbj3ko7qVjXNhftFvA=="], + "@posthog/core": ["@posthog/core@1.27.7", "", { "dependencies": { "@posthog/types": "1.372.3" } }, "sha512-6rzOZajUkhuezgPeF+ReMMly0D9oiwIZtMQrsJtZcS/mwi5OtvuYgxeaohgP9PKOhkK1c7cvGskX0Y2YUtBYCw=="], - "@posthog/types": ["@posthog/types@1.372.2", "", {}, "sha512-dx+WImdDg2NDqaDowTmW+BMNalUfPKngR+g1Iom8ULSav+fGacxexv6fSOl0uSVBwYZsDFe7qNUu0NB/rwGjEw=="], + "@posthog/types": ["@posthog/types@1.372.3", "", {}, "sha512-4mkXC9AhsquJnvogWtWsCi+ReODj/jbK0d3fkwCNLLTOpaiAF125FJ6OJyRFax2u+dEKXAPA/dCTGx1S2WF0nw=="], "@protobufjs/aspromise": ["@protobufjs/aspromise@1.1.2", "", {}, "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ=="], "@protobufjs/base64": ["@protobufjs/base64@1.1.2", "", {}, "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg=="], - "@protobufjs/codegen": ["@protobufjs/codegen@2.0.4", "", {}, "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg=="], + "@protobufjs/codegen": ["@protobufjs/codegen@2.0.5", "", {}, "sha512-zgXFLzW3Ap33e6d0Wlj4MGIm6Ce8O89n/apUaGNB/jx+hw+ruWEp7EwGUshdLKVRCxZW12fp9r40E1mQrf/34g=="], "@protobufjs/eventemitter": ["@protobufjs/eventemitter@1.1.0", "", {}, "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q=="], @@ -1149,13 +1161,13 @@ "@protobufjs/float": ["@protobufjs/float@1.0.2", "", {}, "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ=="], - "@protobufjs/inquire": ["@protobufjs/inquire@1.1.0", "", {}, "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q=="], + "@protobufjs/inquire": ["@protobufjs/inquire@1.1.1", "", {}, "sha512-mnzgDV26ueAvk7rsbt9L7bE0SuAoqyuys/sMMrmVcN5x9VsxpcG3rqAUSgDyLp0UZlmNfIbQ4fHfCtreVBk8Ew=="], "@protobufjs/path": ["@protobufjs/path@1.1.2", "", {}, "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA=="], "@protobufjs/pool": ["@protobufjs/pool@1.1.0", "", {}, "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw=="], - "@protobufjs/utf8": ["@protobufjs/utf8@1.1.0", "", {}, "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw=="], + "@protobufjs/utf8": ["@protobufjs/utf8@1.1.1", "", {}, "sha512-oOAWABowe8EAbMyWKM0tYDKi8Yaox52D+HWZhAIJqQXbqe0xI/GV7FhLWqlEKreMkfDjshR5FKgi3mnle0h6Eg=="], "@radix-ui/number": ["@radix-ui/number@1.1.1", "", {}, "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g=="], @@ -1441,7 +1453,7 @@ "@smithy/middleware-endpoint": ["@smithy/middleware-endpoint@4.4.32", "", { "dependencies": { "@smithy/core": "^3.23.17", "@smithy/middleware-serde": "^4.2.20", "@smithy/node-config-provider": "^4.3.14", "@smithy/shared-ini-file-loader": "^4.4.9", "@smithy/types": "^4.14.1", "@smithy/url-parser": "^4.2.14", "@smithy/util-middleware": "^4.2.14", "tslib": "^2.6.2" } }, "sha512-ZZkgyjnJppiZbIm6Qbx92pbXYi1uzenIvGhBSCDlc7NwuAkiqSgS75j1czAD25ZLs2FjMjYy1q7gyRVWG6JA0Q=="], - "@smithy/middleware-retry": ["@smithy/middleware-retry@4.5.5", "", { "dependencies": { "@smithy/core": "^3.23.17", "@smithy/node-config-provider": "^4.3.14", "@smithy/protocol-http": "^5.3.14", "@smithy/service-error-classification": "^4.3.0", "@smithy/smithy-client": "^4.12.13", "@smithy/types": "^4.14.1", "@smithy/util-middleware": "^4.2.14", "@smithy/util-retry": "^4.3.4", "@smithy/uuid": "^1.1.2", "tslib": "^2.6.2" } }, "sha512-wnYOpB5vATFKWrY2Z9Alb0KhjZI6AbzU6Fbz3Hq2GnURdRYWB4q+qWivQtSTwXcmWUA3MZ6krfwL6Cq5MAbxsA=="], + "@smithy/middleware-retry": ["@smithy/middleware-retry@4.5.6", "", { "dependencies": { "@smithy/core": "^3.23.17", "@smithy/node-config-provider": "^4.3.14", "@smithy/protocol-http": "^5.3.14", "@smithy/service-error-classification": "^4.3.1", "@smithy/smithy-client": "^4.12.13", "@smithy/types": "^4.14.1", "@smithy/util-middleware": "^4.2.14", "@smithy/util-retry": "^4.3.5", "@smithy/uuid": "^1.1.2", "tslib": "^2.6.2" } }, "sha512-5zhmo2AkstmM/RMKYP0NHfmuYWBR+/umlmSuALgajLxf0X0rLE6d17MfzTxpzkILWVhwvCJkCyPH0AfMlbaucQ=="], "@smithy/middleware-serde": ["@smithy/middleware-serde@4.2.20", "", { "dependencies": { "@smithy/core": "^3.23.17", "@smithy/protocol-http": "^5.3.14", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-Lx9JMO9vArPtiChE3wbEZ5akMIDQpWQtlu90lhACQmNOXcGXRbaDywMHDzuDZ2OkZzP+9wQfZi3YJT9F67zTQQ=="], @@ -1459,7 +1471,7 @@ "@smithy/querystring-parser": ["@smithy/querystring-parser@4.2.14", "", { "dependencies": { "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-hr+YyqBD23GVvRxGGrcc/oOeNlK3PzT5Fu4dzrDXxzS1LpFiuL2PQQqKPs87M79aW7ziMs+nvB3qdw77SqE7Lw=="], - "@smithy/service-error-classification": ["@smithy/service-error-classification@4.3.0", "", { "dependencies": { "@smithy/types": "^4.14.1" } }, "sha512-9jKsBYQRPR0xBLgc2415RsA5PIcP2sis4oBdN9s0D13cg1B1284mNTjx9Yc+BEERXzuPm5ObktI96OxsKh8E9A=="], + "@smithy/service-error-classification": ["@smithy/service-error-classification@4.3.1", "", { "dependencies": { "@smithy/types": "^4.14.1" } }, "sha512-aUQuDGh760ts/8MU+APjIZhlLPKhIIfqyzZaJikLEIMrdxFvxuLYD0WxWzaYWpmLbQlXDe9p7EWM3HsBe0K6Gw=="], "@smithy/shared-ini-file-loader": ["@smithy/shared-ini-file-loader@4.4.9", "", { "dependencies": { "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-495/V2I15SHgedSJoDPD23JuSfKAp726ZI1V0wtjB07Wh7q/0tri/0e0DLefZCHgxZonrGKt/OCTpAtP1wE1kQ=="], @@ -1491,7 +1503,7 @@ "@smithy/util-middleware": ["@smithy/util-middleware@4.2.14", "", { "dependencies": { "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-1Su2vj9RYNDEv/V+2E+jXkkwGsgR7dc4sfHn9Z7ruzQHJIEni9zzw5CauvRXlFJfmgcqYP8fWa0dkh2Q2YaQyw=="], - "@smithy/util-retry": ["@smithy/util-retry@4.3.4", "", { "dependencies": { "@smithy/service-error-classification": "^4.3.0", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-FY1UQQ1VFmMwiYp1GVS4MeaGD5O0blLNYK0xCRHU+mJgeoH/hSY8Ld8sJWKQ6uznkh14HveRGQJncgPyNl9J+A=="], + "@smithy/util-retry": ["@smithy/util-retry@4.3.5", "", { "dependencies": { "@smithy/service-error-classification": "^4.3.1", "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-h1IJsbgMDA+jaTjrco/JsyfWOgHRJBv8myB1y4AEI2fjIzD6ktZ7pFAyTw+gwN9GKIAygvC6db0mq0j8N2rFOg=="], "@smithy/util-stream": ["@smithy/util-stream@4.5.25", "", { "dependencies": { "@smithy/fetch-http-handler": "^5.3.17", "@smithy/node-http-handler": "^4.6.1", "@smithy/types": "^4.14.1", "@smithy/util-base64": "^4.3.2", "@smithy/util-buffer-from": "^4.2.2", "@smithy/util-hex-encoding": "^4.2.2", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-/PFpG4k8Ze8Ei+mMKj3oiPICYekthuzePZMgZbCqMiXIHHf4n2aZ4Ps0aSRShycFTGuj/J6XldmC0x0DwednIA=="], @@ -1499,7 +1511,7 @@ "@smithy/util-utf8": ["@smithy/util-utf8@4.2.2", "", { "dependencies": { "@smithy/util-buffer-from": "^4.2.2", "tslib": "^2.6.2" } }, "sha512-75MeYpjdWRe8M5E3AW0O4Cx3UadweS+cwdXjwYGBW5h/gxxnbeZ877sLPX/ZJA9GVTlL/qG0dXP29JWFCD1Ayw=="], - "@smithy/util-waiter": ["@smithy/util-waiter@4.2.16", "", { "dependencies": { "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-GtclrKoZ3Lt7jPQ7aTIYKfjY92OgceScftVnkTsG8e1KV8rkvZgN+ny6YSRhd9hxB8rZtwVbmln7NTvE5O3GmQ=="], + "@smithy/util-waiter": ["@smithy/util-waiter@4.3.0", "", { "dependencies": { "@smithy/types": "^4.14.1", "tslib": "^2.6.2" } }, "sha512-JyjYmLAfS+pdxF92o4yLgEoy0zhayKTw73FU1aofLWwLcJw7iSqIY2exGmMTrl/lmZugP5p/zxdFSippJDfKWA=="], "@smithy/uuid": ["@smithy/uuid@1.1.2", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-O/IEdcCUKkubz60tFbGA7ceITTAJsty+lBjNoorP4Z6XRqaFb/OjQjZODophEcuq68nKm6/0r+6/lLQ+XVpk8g=="], @@ -1667,6 +1679,8 @@ "@types/hast": ["@types/hast@3.0.4", "", { "dependencies": { "@types/unist": "*" } }, "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ=="], + "@types/js-yaml": ["@types/js-yaml@4.0.9", "", {}, "sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg=="], + "@types/json-schema": ["@types/json-schema@7.0.15", "", {}, "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA=="], "@types/linkify-it": ["@types/linkify-it@5.0.0", "", {}, "sha512-sVDA58zAw4eWAffKOaQH5/5j3XeayukzDk+ewSsnv3p4yJEZHCCzMDiZM8e0OUrRvmpGZ85jf4yDHkHsgBNr9Q=="], @@ -1689,6 +1703,8 @@ "@types/node": ["@types/node@24.12.2", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-A1sre26ke7HDIuY/M23nd9gfB+nrmhtYyMINbjI1zHJxYteKR6qSMX56FsmjMcDb3SMcjJg5BiRRgOCC/yBD0g=="], + "@types/node-fetch": ["@types/node-fetch@2.6.13", "", { "dependencies": { "@types/node": "*", "form-data": "^4.0.4" } }, "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw=="], + "@types/pg": ["@types/pg@8.20.0", "", { "dependencies": { "@types/node": "*", "pg-protocol": "*", "pg-types": "^2.2.0" } }, "sha512-bEPFOaMAHTEP1EzpvHTbmwR8UsFyHSKsRisLIHVMXnpNefSbGA1bD6CVy+qKjGSqmZqNqBDV2azOBo8TgkcVow=="], "@types/react": ["@types/react@19.2.14", "", { "dependencies": { "csstype": "^3.2.2" } }, "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w=="], @@ -1697,6 +1713,8 @@ "@types/react-syntax-highlighter": ["@types/react-syntax-highlighter@15.5.13", "", { "dependencies": { "@types/react": "*" } }, "sha512-uLGJ87j6Sz8UaBAooU0T6lWJ0dBmjZgN1PZTrj05TNql2/XpC6+4HhMT5syIdFUUt+FASfCeLLv4kBygNU+8qA=="], + "@types/stream-buffers": ["@types/stream-buffers@3.0.8", "", { "dependencies": { "@types/node": "*" } }, "sha512-J+7VaHKNvlNPJPEJXX/fKa9DZtR/xPMwuIbe+yNOwp1YB+ApUOBv2aUpEoBJEi8nJgbgs1x8e73ttg0r1rSUdw=="], + "@types/trusted-types": ["@types/trusted-types@2.0.7", "", {}, "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw=="], "@types/unist": ["@types/unist@3.0.3", "", {}, "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q=="], @@ -1783,16 +1801,32 @@ "async-sema": ["async-sema@3.1.1", "", {}, "sha512-tLRNUXati5MFePdAk8dw7Qt7DpxPB60ofAgn8WRhW6a2rcimZnYBP9oxHiv0OHy+Wz7kPMG+t4LGdt31+4EmGg=="], + "asynckit": ["asynckit@0.4.0", "", {}, "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="], + "auto-bind": ["auto-bind@5.0.1", "", {}, "sha512-ooviqdwwgfIfNmDwo94wlshcdzfO64XV0Cg6oDsDYBJfITDz1EngD2z7DkbvCWn+XIMsIqW27sEVF6qcpJrRcg=="], "axobject-query": ["axobject-query@4.1.0", "", {}, "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ=="], + "b4a": ["b4a@1.8.0", "", { "peerDependencies": { "react-native-b4a": "*" }, "optionalPeers": ["react-native-b4a"] }, "sha512-qRuSmNSkGQaHwNbM7J78Wwy+ghLEYF1zNrSeMxj4Kgw6y33O3mXcQ6Ie9fRvfU/YnxWkOchPXbaLb73TkIsfdg=="], + "babel-plugin-react-compiler": ["babel-plugin-react-compiler@1.0.0", "", { "dependencies": { "@babel/types": "^7.26.0" } }, "sha512-Ixm8tFfoKKIPYdCCKYTsqv+Fd4IJ0DQqMyEimo+pxUOMUR9cVPlwTrFt9Avu+3cb6Zp3mAzl+t1MrG2fxxKsxw=="], "bail": ["bail@2.0.2", "", {}, "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw=="], "balanced-match": ["balanced-match@4.0.4", "", {}, "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA=="], + "bare-events": ["bare-events@2.8.2", "", { "peerDependencies": { "bare-abort-controller": "*" }, "optionalPeers": ["bare-abort-controller"] }, "sha512-riJjyv1/mHLIPX4RwiK+oW9/4c3TEUeORHKefKAKnZ5kyslbN+HXowtbaVEqt4IMUB7OXlfixcs6gsFeo/jhiQ=="], + + "bare-fs": ["bare-fs@4.7.1", "", { "dependencies": { "bare-events": "^2.5.4", "bare-path": "^3.0.0", "bare-stream": "^2.6.4", "bare-url": "^2.2.2", "fast-fifo": "^1.3.2" }, "peerDependencies": { "bare-buffer": "*" }, "optionalPeers": ["bare-buffer"] }, "sha512-WDRsyVN52eAx/lBamKD6uyw8H4228h/x0sGGGegOamM2cd7Pag88GfMQalobXI+HaEUxpCkbKQUDOQqt9wawRw=="], + + "bare-os": ["bare-os@3.9.0", "", {}, "sha512-JTjuZyNIDpw+GytMO4a6TK1VXdVKKJr6DRxEHasyuYyShV2deuiHJK/ahGZlebc+SG0/wJCB9XK8gprBGDFi/Q=="], + + "bare-path": ["bare-path@3.0.0", "", { "dependencies": { "bare-os": "^3.0.1" } }, "sha512-tyfW2cQcB5NN8Saijrhqn0Zh7AnFNsnczRcuWODH0eYAXBsJ5gVxAUuNr7tsHSC6IZ77cA0SitzT+s47kot8Mw=="], + + "bare-stream": ["bare-stream@2.13.0", "", { "dependencies": { "streamx": "^2.25.0", "teex": "^1.0.1" }, "peerDependencies": { "bare-abort-controller": "*", "bare-buffer": "*", "bare-events": "*" }, "optionalPeers": ["bare-abort-controller", "bare-buffer", "bare-events"] }, "sha512-3zAJRZMDFGjdn+RVnNpF9kuELw+0Fl3lpndM4NcEOhb9zwtSo/deETfuIwMSE5BXanA0FrN1qVjffGwAg2Y7EA=="], + + "bare-url": ["bare-url@2.4.2", "", { "dependencies": { "bare-path": "^3.0.0" } }, "sha512-/9a2j4ac6ckpmAHvod/ob7x439OAHst/drc2Clnq+reRYd/ovddwcF4LfoxHyNk5AuGBnPg+HqFjmE/Zpq6v0A=="], + "base-64": ["base-64@1.0.0", "", {}, "sha512-kwDPIFCGx0NZHog36dj+tHiwP4QMzsZ3AgMViUBKI0+V5n4U0ufTCUMhnQ04diaRI8EX/QcPfql7zlhZ7j4zgg=="], "baseline-browser-mapping": ["baseline-browser-mapping@2.10.23", "", { "bin": { "baseline-browser-mapping": "dist/cli.cjs" } }, "sha512-xwVXGqevyKPsiuQdLj+dZMVjidjJV508TBqexND5HrF89cGdCYCJFB3qhcxRHSeMctdCfbR1jrxBajhDy7o29g=="], @@ -1885,6 +1919,8 @@ "colorjs.io": ["colorjs.io@0.5.2", "", {}, "sha512-twmVoizEW7ylZSN32OgKdXRmo1qg+wT5/6C3xu5b9QsWzSFAhHLn2xd8ro0diCsKfCj1RdaTP/nrcW+vAoQPIw=="], + "combined-stream": ["combined-stream@1.0.8", "", { "dependencies": { "delayed-stream": "~1.0.0" } }, "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg=="], + "comlink": ["comlink@4.4.2", "", {}, "sha512-OxGdvBmJuNKSCMO4NTl1L47VRp6xn2wG4F/2hYzB6tiCb709otOxtEYCSvK80PtjODfXXZu8ds+Nw5kVCjqd2g=="], "comma-separated-tokens": ["comma-separated-tokens@2.0.3", "", {}, "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg=="], @@ -1981,6 +2017,8 @@ "degit": ["degit@2.8.4", "", { "bin": { "degit": "degit" } }, "sha512-vqYuzmSA5I50J882jd+AbAhQtgK6bdKUJIex1JNfEUPENCgYsxugzKVZlFyMwV4i06MmnV47/Iqi5Io86zf3Ng=="], + "delayed-stream": ["delayed-stream@1.0.0", "", {}, "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ=="], + "depd": ["depd@2.0.0", "", {}, "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw=="], "dequal": ["dequal@2.0.3", "", {}, "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA=="], @@ -2041,6 +2079,8 @@ "encodeurl": ["encodeurl@2.0.0", "", {}, "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg=="], + "end-of-stream": ["end-of-stream@1.4.5", "", { "dependencies": { "once": "^1.4.0" } }, "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg=="], + "enhanced-resolve": ["enhanced-resolve@5.21.0", "", { "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.3.3" } }, "sha512-otxSQPw4lkOZWkHpB3zaEQs6gWYEsmX4xQF68ElXC/TWvGxGMSGOvoNbaLXm6/cS/fSfHtsEdw90y20PCd+sCA=="], "entities": ["entities@4.5.0", "", {}, "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw=="], @@ -2057,6 +2097,8 @@ "es-object-atoms": ["es-object-atoms@1.1.1", "", { "dependencies": { "es-errors": "^1.3.0" } }, "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA=="], + "es-set-tostringtag": ["es-set-tostringtag@2.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "get-intrinsic": "^1.2.6", "has-tostringtag": "^1.0.2", "hasown": "^2.0.2" } }, "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA=="], + "es-toolkit": ["es-toolkit@1.46.0", "", {}, "sha512-IToJ6ct9OLl5zz6WsC/1vZEwfSZ7Myil+ygl5Tf30Xjn9AEkzNB4kqp2G7VUJKF1DtTx/ra5M5KLlXvzOg51BA=="], "esast-util-from-estree": ["esast-util-from-estree@2.0.0", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "devlop": "^1.0.0", "estree-util-visit": "^2.0.0", "unist-util-position-from-estree": "^2.0.0" } }, "sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ=="], @@ -2089,6 +2131,8 @@ "eventemitter3": ["eventemitter3@4.0.7", "", {}, "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw=="], + "events-universal": ["events-universal@1.0.1", "", { "dependencies": { "bare-events": "^2.7.0" } }, "sha512-LUd5euvbMLpwOF8m6ivPCbhQeSiYVNb8Vs0fQ8QjXo0JTkEHpz8pxdQf0gStltaPpw0Cca8b39KxvK9cfKRiAw=="], + "eventsource": ["eventsource@3.0.7", "", { "dependencies": { "eventsource-parser": "^3.0.1" } }, "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA=="], "eventsource-parser": ["eventsource-parser@3.0.8", "", {}, "sha512-70QWGkr4snxr0OXLRWsFLeRBIRPuQOvt4s8QYjmUlmlkyTZkRqS7EDVRZtzU3TiyDbXSzaOeF0XUKy8PchzukQ=="], @@ -2103,6 +2147,8 @@ "fast-equals": ["fast-equals@5.4.0", "", {}, "sha512-jt2DW/aNFNwke7AUd+Z+e6pz39KO5rzdbbFCg2sGafS4mk13MI7Z8O5z9cADNn5lhGODIgLwug6TZO2ctf7kcw=="], + "fast-fifo": ["fast-fifo@1.3.2", "", {}, "sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ=="], + "fast-glob": ["fast-glob@3.3.3", "", { "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.8" } }, "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg=="], "fast-uri": ["fast-uri@3.1.0", "", {}, "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA=="], @@ -2139,6 +2185,8 @@ "fontkitten": ["fontkitten@1.0.3", "", { "dependencies": { "tiny-inflate": "^1.0.3" } }, "sha512-Wp1zXWPVUPBmfoa3Cqc9ctaKuzKAV6uLstRqlR56kSjplf5uAce+qeyYym7F+PHbGTk+tCEdkCW6RD7DX/gBZw=="], + "form-data": ["form-data@4.0.5", "", { "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", "es-set-tostringtag": "^2.1.0", "hasown": "^2.0.2", "mime-types": "^2.1.12" } }, "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w=="], + "format": ["format@0.2.2", "", {}, "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww=="], "formatly": ["formatly@0.3.0", "", { "dependencies": { "fd-package-json": "^2.0.0" }, "bin": { "formatly": "bin/index.mjs" } }, "sha512-9XNj/o4wrRFyhSMJOvsuyMwy8aUfBaZ1VrqHVfohyXf0Sw0e+yfKG+xZaY3arGCOMdwFsqObtzVOc1gU9KiT9w=="], @@ -2197,6 +2245,8 @@ "has-symbols": ["has-symbols@1.1.0", "", {}, "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ=="], + "has-tostringtag": ["has-tostringtag@1.0.2", "", { "dependencies": { "has-symbols": "^1.0.3" } }, "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw=="], + "hasown": ["hasown@2.0.3", "", { "dependencies": { "function-bind": "^1.1.2" } }, "sha512-ej4AhfhfL2Q2zpMmLo7U1Uv9+PyhIZpgQLGT1F9miIGmiCJIoCgSmczFdrc97mWT4kVY72KA+WnnhJ5pghSvSg=="], "hast-util-from-html": ["hast-util-from-html@2.0.3", "", { "dependencies": { "@types/hast": "^3.0.0", "devlop": "^1.1.0", "hast-util-from-parse5": "^8.0.0", "parse5": "^7.0.0", "vfile": "^6.0.0", "vfile-message": "^4.0.0" } }, "sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw=="], @@ -2231,6 +2281,8 @@ "hono": ["hono@4.12.15", "", {}, "sha512-qM0jDhFEaCBb4TxoW7f53Qrpv9RBiayUHo0S52JudprkhvpjIrGoU1mnnr29Fvd1U335ZFPZQY1wlkqgfGXyLg=="], + "hpagent": ["hpagent@1.2.0", "", {}, "sha512-A91dYTeIB6NoXG+PxTQpCCDDnfHsW9kc06Lvpu1TEe9gnd6ZFeiBoRO9JvzEv6xK7EX97/dUE8g/vBMTqTS3CA=="], + "html-escaper": ["html-escaper@3.0.3", "", {}, "sha512-RuMffC89BOWQoY0WKGpIhn5gX3iI54O6nRA0yC124NYVtzjmFWBIiFd8M0x+ZdX0P9R4lADg1mgP8C7PxGOWuQ=="], "html-to-text": ["html-to-text@9.0.5", "", { "dependencies": { "@selderee/plugin-htmlparser2": "^0.11.0", "deepmerge": "^4.3.1", "dom-serializer": "^2.0.0", "htmlparser2": "^8.0.2", "selderee": "^0.11.0" } }, "sha512-qY60FjREgVZL03vJU6IfMV4GDjGBIoOyvuFdpBDIX9yTlDw0TjxVBQp+P8NvpdIXNJvfWBTNul7fsAQJq2FNpg=="], @@ -2321,11 +2373,13 @@ "isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="], + "isomorphic-ws": ["isomorphic-ws@5.0.0", "", { "peerDependencies": { "ws": "*" } }, "sha512-muId7Zzn9ywDsyXgTIafTry2sV3nySZeUDe6YedVd1Hvuuep5AsIlqK+XefWpYTyJG5e503F2xIuT2lcU6rCSw=="], + "jake": ["jake@10.9.4", "", { "dependencies": { "async": "^3.2.6", "filelist": "^1.0.4", "picocolors": "^1.1.1" }, "bin": { "jake": "bin/cli.js" } }, "sha512-wpHYzhxiVQL+IV05BLE2Xn34zW1S223hvjtqk0+gsPrwd/8JNLXJgZZM/iPFsYc1xyphF+6M6EvdE5E9MBGkDA=="], "jiti": ["jiti@2.6.1", "", { "bin": { "jiti": "lib/jiti-cli.mjs" } }, "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ=="], - "jose": ["jose@6.2.2", "", {}, "sha512-d7kPDd34KO/YnzaDOlikGpOurfF0ByC2sEV4cANCtdqLlTfBlw2p14O/5d/zv40gJPbIQxfES3nSx1/oYNyuZQ=="], + "jose": ["jose@6.2.3", "", {}, "sha512-YYVDInQKFJfR/xa3ojUTl8c2KoTwiL1R5Wg9YCydwH0x0B9grbzlg5HC7mMjCtUJjbQ/YnGEZIhI5tCgfTb4Hw=="], "joycon": ["joycon@3.1.1", "", {}, "sha512-34wB/Y7MW7bzjKRjUKTa46I2Z7eV62Rkhva+KkopW7Qvv/OSWBqvkSY7vusOPrNuZcUG3tApvdVgNB8POj3SPw=="], @@ -2333,6 +2387,8 @@ "js-yaml": ["js-yaml@4.1.1", "", { "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA=="], + "jsep": ["jsep@1.4.0", "", {}, "sha512-B7qPcEVE3NVkmSJbaYxvv4cHkVW7DQsZz13pUMrfS8z8Q/BuShN+gcTXrUlPiGqM2/t/EEaI030bpxMqY8gMlw=="], + "jsesc": ["jsesc@3.1.0", "", { "bin": { "jsesc": "bin/jsesc" } }, "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA=="], "json-schema": ["json-schema@0.4.0", "", {}, "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA=="], @@ -2351,6 +2407,8 @@ "jsonfile": ["jsonfile@6.2.1", "", { "dependencies": { "universalify": "^2.0.0" }, "optionalDependencies": { "graceful-fs": "^4.1.6" } }, "sha512-zwOTdL3rFQ/lRdBnntKVOX6k5cKJwEc1HdilT71BWEu7J41gXIB2MRp+vxduPSwZJPWBxEzv4yH1wYLJGUHX4Q=="], + "jsonpath-plus": ["jsonpath-plus@10.4.0", "", { "dependencies": { "@jsep-plugin/assignment": "^1.3.0", "@jsep-plugin/regex": "^1.0.4", "jsep": "^1.4.0" }, "bin": { "jsonpath": "bin/jsonpath-cli.js", "jsonpath-plus": "bin/jsonpath-cli.js" } }, "sha512-T92WWatJXmhBbKsgH/0hl+jxjdXrifi5IKeMY02DWggRxX0UElcbVzPlmgLTbvsPeW1PasQ6xE2Q75stkhGbsA=="], + "jsonpointer": ["jsonpointer@5.0.1", "", {}, "sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ=="], "kleur": ["kleur@3.0.3", "", {}, "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w=="], @@ -2619,6 +2677,8 @@ "nth-check": ["nth-check@2.1.1", "", { "dependencies": { "boolbase": "^1.0.0" } }, "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w=="], + "oauth4webapi": ["oauth4webapi@3.8.6", "", {}, "sha512-iwemM91xz8nryHti2yTmg5fhyEMVOkOXwHNqbvcATjyajb5oQxCQzrNOA6uElRHuMhQQTKUyFKV9y/CNyg25BQ=="], + "object-assign": ["object-assign@4.1.1", "", {}, "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="], "object-inspect": ["object-inspect@1.13.4", "", {}, "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew=="], @@ -2637,6 +2697,8 @@ "oniguruma-to-es": ["oniguruma-to-es@4.3.6", "", { "dependencies": { "oniguruma-parser": "^0.12.2", "regex": "^6.1.0", "regex-recursion": "^6.0.2" } }, "sha512-csuQ9x3Yr0cEIs/Zgx/OEt9iBw9vqIunAPQkx19R/fiMq2oGVTgcMqO/V3Ybqefr1TBvosI6jU539ksaBULJyA=="], + "openid-client": ["openid-client@6.8.4", "", { "dependencies": { "jose": "^6.2.2", "oauth4webapi": "^3.8.5" } }, "sha512-QSw0BA08piujetEwfZsHoTrDpMEha7GDZDicQqVwX4u0ChCjefvjDB++TZ8BTg76UpwhzIQgdvvfgfl3HpCSAw=="], + "orderedmap": ["orderedmap@2.1.1", "", {}, "sha512-TvAWxi0nDe1j/rtMcWcIj94+Ffe6n7zhow33h40SKxmsmozs6dz/e+EajymfoFcHd7sxNn8yHM8839uixMOV6g=="], "oxc-resolver": ["oxc-resolver@11.19.1", "", { "optionalDependencies": { "@oxc-resolver/binding-android-arm-eabi": "11.19.1", "@oxc-resolver/binding-android-arm64": "11.19.1", "@oxc-resolver/binding-darwin-arm64": "11.19.1", "@oxc-resolver/binding-darwin-x64": "11.19.1", "@oxc-resolver/binding-freebsd-x64": "11.19.1", "@oxc-resolver/binding-linux-arm-gnueabihf": "11.19.1", "@oxc-resolver/binding-linux-arm-musleabihf": "11.19.1", "@oxc-resolver/binding-linux-arm64-gnu": "11.19.1", "@oxc-resolver/binding-linux-arm64-musl": "11.19.1", "@oxc-resolver/binding-linux-ppc64-gnu": "11.19.1", "@oxc-resolver/binding-linux-riscv64-gnu": "11.19.1", "@oxc-resolver/binding-linux-riscv64-musl": "11.19.1", "@oxc-resolver/binding-linux-s390x-gnu": "11.19.1", "@oxc-resolver/binding-linux-x64-gnu": "11.19.1", "@oxc-resolver/binding-linux-x64-musl": "11.19.1", "@oxc-resolver/binding-openharmony-arm64": "11.19.1", "@oxc-resolver/binding-wasm32-wasi": "11.19.1", "@oxc-resolver/binding-win32-arm64-msvc": "11.19.1", "@oxc-resolver/binding-win32-ia32-msvc": "11.19.1", "@oxc-resolver/binding-win32-x64-msvc": "11.19.1" } }, "sha512-qE/CIg/spwrTBFt5aKmwe3ifeDdLfA2NESN30E42X/lII5ClF8V7Wt6WIJhcGZjp0/Q+nQ+9vgxGk//xZNX2hg=="], @@ -2727,9 +2789,9 @@ "postgres-interval": ["postgres-interval@1.2.0", "", { "dependencies": { "xtend": "^4.0.0" } }, "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ=="], - "posthog-js": ["posthog-js@1.372.2", "", { "dependencies": { "@opentelemetry/api": "^1.9.0", "@opentelemetry/api-logs": "^0.208.0", "@opentelemetry/exporter-logs-otlp-http": "^0.208.0", "@opentelemetry/resources": "^2.2.0", "@opentelemetry/sdk-logs": "^0.208.0", "@posthog/core": "1.27.6", "@posthog/types": "1.372.2", "core-js": "^3.38.1", "dompurify": "^3.3.2", "fflate": "^0.4.8", "preact": "^10.28.2", "query-selector-shadow-dom": "^1.0.1", "web-vitals": "^5.1.0" } }, "sha512-FS+vKDXB1vghrVch3EDi3IRcoH5OnLQYxchHWi+8U4D4PzWQZnZLo5vyMRL1+ZUHNEZ2v599uX3UKhRZv2z6Cg=="], + "posthog-js": ["posthog-js@1.372.3", "", { "dependencies": { "@opentelemetry/api": "^1.9.0", "@opentelemetry/api-logs": "^0.208.0", "@opentelemetry/exporter-logs-otlp-http": "^0.208.0", "@opentelemetry/resources": "^2.2.0", "@opentelemetry/sdk-logs": "^0.208.0", "@posthog/core": "1.27.7", "@posthog/types": "1.372.3", "core-js": "^3.38.1", "dompurify": "^3.3.2", "fflate": "^0.4.8", "preact": "^10.28.2", "query-selector-shadow-dom": "^1.0.1", "web-vitals": "^5.1.0" } }, "sha512-CpKWMt6RkgY4lPpyvYzKcilKKB5VhL2gmS8HgibxmXZkEk/2rUxrEtRMScH8xi4n5WDaNSluCo87dh9yo9zArQ=="], - "posthog-node": ["posthog-node@5.30.5", "", { "dependencies": { "@posthog/core": "1.27.6" }, "peerDependencies": { "rxjs": "^7.0.0" }, "optionalPeers": ["rxjs"] }, "sha512-TlxyX+Yip2cChU1YLDWbCEVOlDEbTsO3f5ujMqR94dRLUeQw0wBCXMgjDcnxhVisdzXFTzcBoWvf0LXDWZqo8A=="], + "posthog-node": ["posthog-node@5.30.6", "", { "dependencies": { "@posthog/core": "1.27.7" }, "peerDependencies": { "rxjs": "^7.0.0" }, "optionalPeers": ["rxjs"] }, "sha512-deZuSiLkpdEipiywkww1FhQoKpVVFmJP6SAVQcZcMbugTLwJRYSGjgm+qV0Y91xghf2yP6Nr5Plfl52i9Qj15Q=="], "preact": ["preact@10.29.1", "", {}, "sha512-gQCLc/vWroE8lIpleXtdJhTFDogTdZG9AjMUpVkDf2iTCNwYNWA+u16dL41TqUDJO4gm2IgrcMv3uTpjd4Pwmg=="], @@ -2783,6 +2845,8 @@ "proxy-addr": ["proxy-addr@2.0.7", "", { "dependencies": { "forwarded": "0.2.0", "ipaddr.js": "1.9.1" } }, "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg=="], + "pump": ["pump@3.0.4", "", { "dependencies": { "end-of-stream": "^1.1.0", "once": "^1.3.1" } }, "sha512-VS7sjc6KR7e1ukRFhQSY5LM2uBWAUPiOPa/A3mkKmiMwSmRFUITt0xuj+/lesgnCv+dPIEYlkzrcyXgquIHMcA=="], + "punycode.js": ["punycode.js@2.3.1", "", {}, "sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA=="], "pvtsutils": ["pvtsutils@1.3.6", "", { "dependencies": { "tslib": "^2.8.1" } }, "sha512-PLgQXQ6H2FWCaeRak8vvk1GW462lMxB5s3Jm673N82zI4vqtVUPuZdffdZbPDFRoU8kAhItWFtPCWiPpp4/EDg=="], @@ -2923,6 +2987,8 @@ "reusify": ["reusify@1.1.0", "", {}, "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw=="], + "rfc4648": ["rfc4648@1.5.4", "", {}, "sha512-rRg/6Lb+IGfJqO05HZkN50UtY7K/JhxJag1kP23+zyMfrvoB0B7RWv06MbOzoc79RgCdNTiUaNsTT1AJZ7Z+cg=="], + "rollup": ["rollup@4.60.2", "", { "dependencies": { "@types/estree": "1.0.8" }, "optionalDependencies": { "@rollup/rollup-android-arm-eabi": "4.60.2", "@rollup/rollup-android-arm64": "4.60.2", "@rollup/rollup-darwin-arm64": "4.60.2", "@rollup/rollup-darwin-x64": "4.60.2", "@rollup/rollup-freebsd-arm64": "4.60.2", "@rollup/rollup-freebsd-x64": "4.60.2", "@rollup/rollup-linux-arm-gnueabihf": "4.60.2", "@rollup/rollup-linux-arm-musleabihf": "4.60.2", "@rollup/rollup-linux-arm64-gnu": "4.60.2", "@rollup/rollup-linux-arm64-musl": "4.60.2", "@rollup/rollup-linux-loong64-gnu": "4.60.2", "@rollup/rollup-linux-loong64-musl": "4.60.2", "@rollup/rollup-linux-ppc64-gnu": "4.60.2", "@rollup/rollup-linux-ppc64-musl": "4.60.2", "@rollup/rollup-linux-riscv64-gnu": "4.60.2", "@rollup/rollup-linux-riscv64-musl": "4.60.2", "@rollup/rollup-linux-s390x-gnu": "4.60.2", "@rollup/rollup-linux-x64-gnu": "4.60.2", "@rollup/rollup-linux-x64-musl": "4.60.2", "@rollup/rollup-openbsd-x64": "4.60.2", "@rollup/rollup-openharmony-arm64": "4.60.2", "@rollup/rollup-win32-arm64-msvc": "4.60.2", "@rollup/rollup-win32-ia32-msvc": "4.60.2", "@rollup/rollup-win32-x64-gnu": "4.60.2", "@rollup/rollup-win32-x64-msvc": "4.60.2", "fsevents": "~2.3.2" }, "bin": { "rollup": "dist/bin/rollup" } }, "sha512-J9qZyW++QK/09NyN/zeO0dG/1GdGfyp9lV8ajHnRVLfo/uFsbji5mHnDgn/qYdUHyCkM2N+8VyspgZclfAh0eQ=="], "rope-sequence": ["rope-sequence@1.3.4", "", {}, "sha512-UT5EDe2cu2E/6O4igUr5PSFs23nvvukicWHx6GnOPlHAiiYbzNuCRQCuiUdHJQcqKalLKlrYJnjY0ySGsXNQXQ=="], @@ -3031,8 +3097,14 @@ "slice-ansi": ["slice-ansi@8.0.0", "", { "dependencies": { "ansi-styles": "^6.2.3", "is-fullwidth-code-point": "^5.1.0" } }, "sha512-stxByr12oeeOyY2BlviTNQlYV5xOj47GirPr4yA1hE9JCtxfQN0+tVbkxwCtYDQWhEKWFHsEK48ORg5jrouCAg=="], + "smart-buffer": ["smart-buffer@4.2.0", "", {}, "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg=="], + "smol-toml": ["smol-toml@1.6.1", "", {}, "sha512-dWUG8F5sIIARXih1DTaQAX4SsiTXhInKf1buxdY9DIg4ZYPZK5nGM1VRIYmEbDbsHt7USo99xSLFu5Q1IqTmsg=="], + "socks": ["socks@2.8.7", "", { "dependencies": { "ip-address": "^10.0.1", "smart-buffer": "^4.2.0" } }, "sha512-HLpt+uLy/pxB+bum/9DzAgiKS8CX1EvbWxI4zlmgGCExImLdiad2iCwXT5Z4c9c3Eq8rP2318mPW2c+QbtjK8A=="], + + "socks-proxy-agent": ["socks-proxy-agent@8.0.5", "", { "dependencies": { "agent-base": "^7.1.2", "debug": "^4.3.4", "socks": "^2.8.3" } }, "sha512-HehCEsotFqbPW9sJ8WVYB6UbmIMv7kUUORIF2Nncq4VQvBfNBLibW9YZR5dlYCSUhwcD628pRllm7n+E+YTzJw=="], + "sonner": ["sonner@2.0.7", "", { "peerDependencies": { "react": "^18.0.0 || ^19.0.0 || ^19.0.0-rc", "react-dom": "^18.0.0 || ^19.0.0 || ^19.0.0-rc" } }, "sha512-W6ZN4p58k8aDKA4XPcx2hpIQXBRAgyiWVkYhT7CvK6D3iAu7xjvVyhQHg2/iaKJZ1XVJ4r7XuwGL+WGEK37i9w=="], "sorted-btree": ["sorted-btree@1.8.1", "", {}, "sha512-395+XIP+wqNn3USkFSrNz7G3Ss/MXlZEqesxvzCRFwL14h6e8LukDHdLBePn5pwbm5OQ9vGu8mDyz2lLDIqamQ=="], @@ -3051,6 +3123,10 @@ "statuses": ["statuses@2.0.2", "", {}, "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw=="], + "stream-buffers": ["stream-buffers@3.0.3", "", {}, "sha512-pqMqwQCso0PBJt2PQmDO0cFj0lyqmiwOMiMSkVtRokl7e+ZTRYgDHKnuZNbqjiJXgsg4nuqtD/zxuo9KqTp0Yw=="], + + "streamx": ["streamx@2.25.0", "", { "dependencies": { "events-universal": "^1.0.0", "fast-fifo": "^1.3.2", "text-decoder": "^1.1.0" } }, "sha512-0nQuG6jf1w+wddNEEXCF4nTg3LtufWINB5eFEN+5TNZW7KWJp6x87+JFL43vaAUPyCfH1wID+mNVyW6OHtFamg=="], + "string-width": ["string-width@8.2.1", "", { "dependencies": { "get-east-asian-width": "^1.5.0", "strip-ansi": "^7.1.2" } }, "sha512-IIaP0g3iy9Cyy18w3M9YcaDudujEAVHKt3a3QJg1+sr/oX96TbaGUubG0hJyCjCBThFH+tFpcIyoUHUn1ogaLA=="], "stringify-entities": ["stringify-entities@4.0.4", "", { "dependencies": { "character-entities-html4": "^2.0.0", "character-entities-legacy": "^3.0.0" } }, "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg=="], @@ -3095,8 +3171,16 @@ "tar": ["tar@7.5.13", "", { "dependencies": { "@isaacs/fs-minipass": "^4.0.0", "chownr": "^3.0.0", "minipass": "^7.1.2", "minizlib": "^3.1.0", "yallist": "^5.0.0" } }, "sha512-tOG/7GyXpFevhXVh8jOPJrmtRpOTsYqUIkVdVooZYJS/z8WhfQUX8RJILmeuJNinGAMSu1veBr4asSHFt5/hng=="], + "tar-fs": ["tar-fs@3.1.2", "", { "dependencies": { "pump": "^3.0.0", "tar-stream": "^3.1.5" }, "optionalDependencies": { "bare-fs": "^4.0.1", "bare-path": "^3.0.0" } }, "sha512-QGxxTxxyleAdyM3kpFs14ymbYmNFrfY+pHj7Z8FgtbZ7w2//VAgLMac7sT6nRpIHjppXO2AwwEOg0bPFVRcmXw=="], + + "tar-stream": ["tar-stream@3.1.8", "", { "dependencies": { "b4a": "^1.6.4", "bare-fs": "^4.5.5", "fast-fifo": "^1.2.0", "streamx": "^2.15.0" } }, "sha512-U6QpVRyCGHva435KoNWy9PRoi2IFYCgtEhq9nmrPPpbRacPs9IH4aJ3gbrFC8dPcXvdSZ4XXfXT5Fshbp2MtlQ=="], + + "teex": ["teex@1.0.1", "", { "dependencies": { "streamx": "^2.12.5" } }, "sha512-eYE6iEI62Ni1H8oIa7KlDU6uQBtqr4Eajni3wX7rpfXD8ysFx8z0+dri+KWEPWpBsxXfxu58x/0jvTVT1ekOSg=="], + "terminal-size": ["terminal-size@4.0.1", "", {}, "sha512-avMLDQpUI9I5XFrklECw1ZEUPJhqzcwSWsyyI8blhRLT+8N1jLJWLWWYQpB2q2xthq8xDvjZPISVh53T/+CLYQ=="], + "text-decoder": ["text-decoder@1.2.7", "", { "dependencies": { "b4a": "^1.6.4" } }, "sha512-vlLytXkeP4xvEq2otHeJfSQIRyWxo/oZGEbXrtEEF9Hnmrdly59sUbzZ/QgyWuLYHctCHxFF4tRQZNQ9k60ExQ=="], + "thenify": ["thenify@3.3.1", "", { "dependencies": { "any-promise": "^1.0.0" } }, "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw=="], "thenify-all": ["thenify-all@1.6.0", "", { "dependencies": { "thenify": ">= 3.1.0 < 4" } }, "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA=="], @@ -3265,7 +3349,7 @@ "wrappy": ["wrappy@1.0.2", "", {}, "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="], - "ws": ["ws@8.18.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw=="], + "ws": ["ws@8.20.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA=="], "xml": ["xml@1.0.1", "", {}, "sha512-huCv9IH9Tcf95zuYCsQraZtWnJvBtLVE0QHMOs8bWyZAFZNDcYjsPq1nEx8jKA9y+Beo9v+7OBPRisQTjinQMw=="], @@ -3343,6 +3427,8 @@ "@better-auth/passkey/@better-fetch/fetch": ["@better-fetch/fetch@1.1.21", "", {}, "sha512-/ImESw0sskqlVR94jB+5+Pxjf+xBwDZF/N5+y2/q4EqD7IARUTSpPfIo8uf39SYpCxyOCtbyYpUrZ3F/k0zT4A=="], + "@cloudflare/vite-plugin/ws": ["ws@8.18.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw=="], + "@cspotcode/source-map-support/@jridgewell/trace-mapping": ["@jridgewell/trace-mapping@0.3.9", "", { "dependencies": { "@jridgewell/resolve-uri": "^3.0.3", "@jridgewell/sourcemap-codec": "^1.4.10" } }, "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ=="], "@daveyplate/better-auth-ui/@better-fetch/fetch": ["@better-fetch/fetch@1.1.21", "", {}, "sha512-/ImESw0sskqlVR94jB+5+Pxjf+xBwDZF/N5+y2/q4EqD7IARUTSpPfIo8uf39SYpCxyOCtbyYpUrZ3F/k0zT4A=="], @@ -3673,6 +3759,8 @@ "filelist/minimatch": ["minimatch@5.1.9", "", { "dependencies": { "brace-expansion": "^2.0.1" } }, "sha512-7o1wEA2RyMP7Iu7GNba9vc0RWWGACJOCZBJX2GJWip0ikV+wcOsgVuY9uE8CPiyQhkGFSlhuSkZPavN7u1c2Fw=="], + "form-data/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="], + "git-diff/chalk": ["chalk@2.4.2", "", { "dependencies": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", "supports-color": "^5.3.0" } }, "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ=="], "git-diff/diff": ["diff@3.5.1", "", {}, "sha512-Z3u54A8qGyqFOSr2pk0ijYs8mOE9Qz8kTvtKeBI+upoG9j04Sq+oI7W8zAJiQybDcESET8/uIdHzs0p3k4fZlw=="], @@ -3693,8 +3781,6 @@ "ink/chalk": ["chalk@5.6.2", "", {}, "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA=="], - "ink/ws": ["ws@8.20.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA=="], - "kysely-codegen/dotenv": ["dotenv@16.6.1", "", {}, "sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow=="], "kysely-pglite/chokidar": ["chokidar@3.6.0", "", { "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", "glob-parent": "~5.1.2", "is-binary-path": "~2.1.0", "is-glob": "~4.0.1", "normalize-path": "~3.0.0", "readdirp": "~3.6.0" }, "optionalDependencies": { "fsevents": "~2.3.2" } }, "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw=="], @@ -3707,6 +3793,8 @@ "micromatch/picomatch": ["picomatch@2.3.2", "", {}, "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA=="], + "miniflare/ws": ["ws@8.18.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw=="], + "monaco-editor/dompurify": ["dompurify@3.2.7", "", { "optionalDependencies": { "@types/trusted-types": "^2.0.7" } }, "sha512-WhL/YuveyGXJaerVlMYGWhvQswa7myDG17P7Vu65EWC05o8vfeNbvNf4d/BOvH99+ZW+LlQsc1GDKMa1vNK6dw=="], "monaco-editor/marked": ["marked@14.0.0", "", { "bin": { "marked": "bin/marked.js" } }, "sha512-uIj4+faQ+MgHgwUW1l2PsPglZLOLOT1uErt06dAPtx2kjteLAkbsd/0FiYg/MGS+i7ZKLb7w2WClxHkzOOuryQ=="], @@ -3967,6 +4055,8 @@ "filelist/minimatch/brace-expansion": ["brace-expansion@2.1.0", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-TN1kCZAgdgweJhWWpgKYrQaMNHcDULHkWwQIspdtjV4Y5aurRdZpjAqn6yX3FPqTA9ngHCc4hJxMAMgGfve85w=="], + "form-data/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="], + "git-diff/chalk/ansi-styles": ["ansi-styles@3.2.1", "", { "dependencies": { "color-convert": "^1.9.0" } }, "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA=="], "git-diff/chalk/escape-string-regexp": ["escape-string-regexp@1.0.5", "", {}, "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg=="], diff --git a/deploy/helm/Chart.lock b/deploy/helm/Chart.lock index 25b832914d..327f1855cb 100644 --- a/deploy/helm/Chart.lock +++ b/deploy/helm/Chart.lock @@ -5,5 +5,8 @@ dependencies: - name: opentelemetry-collector repository: https://open-telemetry.github.io/opentelemetry-helm-charts version: 0.147.2 -digest: sha256:304c526584beae64332d2a29dc9c0b3b6eb753533516753e39d22d4c2c052004 -generated: "2026-04-08T11:33:29.690185-03:00" +- name: agent-sandbox + repository: file://./charts/agent-sandbox + version: 0.1.0 +digest: sha256:496ac6feb3655dbb973acf67117b2d3f03568d51178ef6048c86c93813b63436 +generated: "2026-04-28T17:44:22.721996-03:00" diff --git a/deploy/helm/Chart.yaml b/deploy/helm/Chart.yaml index 9ef2c22dd2..4d71400474 100644 --- a/deploy/helm/Chart.yaml +++ b/deploy/helm/Chart.yaml @@ -4,6 +4,7 @@ description: A Helm chart for deco Studio self-hosted deployment type: application version: 0.6.2 appVersion: "latest" +kubeVersion: ">=1.30.0-0" dependencies: - name: nats @@ -14,3 +15,10 @@ dependencies: version: "0.147.2" repository: "https://open-telemetry.github.io/opentelemetry-helm-charts" condition: otel.collector.enabled + # Local subchart — vendors kubernetes-sigs/agent-sandbox operator + CRDs for + # the mesh k8s sandbox runner. Upstream doesn't publish a Helm chart, so we + # pin and ship release YAML here. See charts/agent-sandbox/README.md. + - name: agent-sandbox + version: "0.1.0" + repository: "file://./charts/agent-sandbox" + condition: sandbox.agentSandbox.enabled diff --git a/deploy/helm/README.md b/deploy/helm/README.md index 32f0498615..ddc44b3d3a 100644 --- a/deploy/helm/README.md +++ b/deploy/helm/README.md @@ -1339,6 +1339,204 @@ kubectl top pods -l app.kubernetes.io/instance=deco-studio -n deco-studio - **Liveness**: Kills and recreates pods with problems - **Readiness**: Removes pods from Service when not ready +## Agent-sandbox runner + +Mesh ships with three sandbox runners (Docker, Freestyle, agent-sandbox) for +isolating user code execution. The agent-sandbox runner uses +[`kubernetes-sigs/agent-sandbox`](https://github.com/kubernetes-sigs/agent-sandbox) +as its control loop. For self-hosters on Kubernetes it's the most scalable +option; for single-node or dev setups the Docker runner is simpler and the +default. + +### Enable + +```bash +helm install deco-studio deploy/helm/ \ + --set sandbox.agentSandbox.enabled=true \ + --namespace deco-studio --create-namespace +``` + +Then set `STUDIO_SANDBOX_RUNNER=agent-sandbox` in the mesh server environment +(`configMap.meshConfig` or `env:` in values). With `enabled=true` the chart +installs, in order: + +- The vendored [`agent-sandbox`](./charts/agent-sandbox/README.md) subchart + — operator Deployment + four `v1alpha1` CRDs (`Sandbox`, `SandboxClaim`, + `SandboxTemplate`, `SandboxWarmPool`) in the `agent-sandbox-system` + namespace. +- A `SandboxTemplate` named `studio-sandbox` — the shared pod template every + `SandboxClaim` references. Image, pull policy, and resources come from + `sandbox.agentSandbox.*`. +- A `NetworkPolicy` that scopes sandbox-pod ingress/egress (see + [Security](#security)). +- Optionally a `SandboxWarmPool` (disabled unless + `sandbox.agentSandbox.warmPool.enabled=true`). + +With `enabled=false` (default) none of the above renders — `helm template` +emits zero sandbox-related resources. + +### Prereqs + +- A working Kubernetes cluster (EKS / GKE / AKS / kind / k3s — tested on + kind locally). The subchart bundles everything the operator needs; + there's no out-of-chart install step. +- Cluster capacity for sandbox pods. Defaults request `500m` CPU / `1Gi` + memory per sandbox and cap at `2` CPU / `4Gi` / `10Gi` ephemeral. Tune + via `sandbox.agentSandbox.resources.*`. +- For preview URLs (`*.preview.`), see + [Sandbox preview ingress](#sandbox-preview-ingress) below — this is the + standard path and uses the Gateway API + cert-manager. +- The legacy + `sandbox.agentSandbox.networkPolicy.previewGatewayNamespace` knob is only + needed for setups that route preview traffic *around* mesh, terminating + directly on the sandbox's port 3000. The standard path lands on port + 9000 via mesh, where the daemon's CSP/HMR rewrites apply. + +### Sandbox preview ingress + +When `sandbox.agentSandbox.previewGateway.enabled=true`, the chart renders +an Istio Gateway + HTTPRoute + cert-manager Certificate that send +`*.preview.` traffic to the mesh Service. Mesh recognises the +Host header and reverse-proxies to the matching sandbox's daemon at +port 9000 — including WebSocket upgrades, so vite HMR works. + +Required values: + +```yaml +sandbox: + agentSandbox: + enabled: true + previewUrlPattern: "https://{handle}.preview.example.com" + previewGateway: + enabled: true + domain: "preview.example.com" + clusterIssuer: "cloudflare-dns01" # name of an existing ClusterIssuer + # Optional overrides: + # gatewayClassName: "istio" + # namespace: "istio-system" +``` + +Two things are *not* templated and have to be done by hand once per +cluster — the chart will not work end-to-end without these: + +#### 1. DNS — wildcard A/CNAME + +In your DNS provider (Cloudflare, Route53, etc.), add a wildcard record +pointing at the cluster's external load balancer: + +``` +*.preview.example.com → +``` + +For Cloudflare, set the record to **DNS only** (grey-cloud, not orange) +so cert-manager's DNS-01 challenge can update TXT records under the +zone. Cloudflare proxy mode (orange-cloud) blocks DNS-01. + +To find the LB hostname for an Istio Gateway: + +```bash +kubectl get svc -n istio-system # look for the LoadBalancer service +``` + +#### 2. cert-manager DNS-01 ClusterIssuer + +DNS-01 is the only solver that works for wildcard certs. The chart does +not template the ClusterIssuer because the API token is per-cluster +infrastructure. Example for Cloudflare: + +```yaml +# Apply to the cluster ONCE — outside the chart. +apiVersion: v1 +kind: Secret +metadata: + name: cloudflare-api-token + namespace: cert-manager +type: Opaque +stringData: + api-token: "" +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: cloudflare-dns01 +spec: + acme: + server: https://acme-v02.api.letsencrypt.org/directory + email: admin@example.com + privateKeySecretRef: + name: cloudflare-dns01-account-key + solvers: + - dns01: + cloudflare: + apiTokenSecretRef: + name: cloudflare-api-token + key: api-token +``` + +Verify the cert provisions after `helm upgrade`: + +```bash +kubectl get certificate -n istio-system +kubectl describe certificate -sandbox-preview -n istio-system +``` + +cert-manager logs in `cert-manager` namespace are the place to look if +the cert hangs in `Pending` for more than a few minutes. + +### Local kind + +Pull policy defaults to `IfNotPresent` (prod). For local kind clusters +where the image is loaded via `kind load docker-image`, override: + +```bash +helm install deco-studio deploy/helm/ \ + --set sandbox.agentSandbox.enabled=true \ + --set sandbox.agentSandbox.image.pullPolicy=Never \ + --kube-context kind- \ + --namespace deco-studio --create-namespace +``` + +The repo also ships `deploy/k8s-sandbox/local/` for contributors developing +the runner itself — that path uses raw `kubectl apply` and is independent +of this Helm chart. + +### Bumping the upstream operator + +The `agent-sandbox` subchart pins `v0.4.2`. To update: + +```bash +./deploy/helm/charts/agent-sandbox/vendor.sh vX.Y.Z +# edit charts/agent-sandbox/Chart.yaml: +# appVersion: "X.Y.Z" +# version: bump subchart version (e.g. 0.1.0 -> 0.2.0) +helm dependency update deploy/helm/ +``` + +Helm never upgrades CRDs on `helm upgrade` (this is intentional upstream +behavior). After `vendor.sh` pulls a new CRD schema, apply it manually: + +```bash +kubectl apply -f deploy/helm/charts/agent-sandbox/crds/agent-sandbox-crds.yaml +``` + +Then `helm upgrade` as normal. See `charts/agent-sandbox/README.md` for +the full story. + +### Security + +The default `NetworkPolicy` (`sandbox.agentSandbox.networkPolicy.enabled=true`): + +- **Ingress**: allows mesh server pods (chart's own selector labels) to + reach port 9000 (daemon) on sandbox pods; optionally allows the + configured preview gateway namespace on port 3000 (dev server). +- **Egress**: permits DNS to CoreDNS + public internet on 80/443. Blocks + RFC1918, `169.254.0.0/16` (link-local + IMDSv2), and the EKS pod CIDR + (`100.64.0.0/10`) via `ipBlock.except`. + +On EKS, also set IMDSv2 hop-limit=1 on the node launch template — a +misconfigured egress rule alone won't protect the node IAM role if IMDS is +reachable by hop. + ## License This chart is part of the deco-studio project. diff --git a/deploy/helm/charts/agent-sandbox-0.1.0.tgz b/deploy/helm/charts/agent-sandbox-0.1.0.tgz new file mode 100644 index 0000000000..7a9c70188d Binary files /dev/null and b/deploy/helm/charts/agent-sandbox-0.1.0.tgz differ diff --git a/deploy/helm/charts/agent-sandbox/Chart.yaml b/deploy/helm/charts/agent-sandbox/Chart.yaml new file mode 100644 index 0000000000..469a83caca --- /dev/null +++ b/deploy/helm/charts/agent-sandbox/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +name: agent-sandbox +description: kubernetes-sigs/agent-sandbox operator + CRDs, vendored for mesh +type: application +# Subchart version is independent of upstream. Bump on any vendored change. +version: 0.1.0 +# Tracks the upstream release pinned by vendor.sh. +appVersion: "0.4.2" diff --git a/deploy/helm/charts/agent-sandbox/README.md b/deploy/helm/charts/agent-sandbox/README.md new file mode 100644 index 0000000000..16c92db046 --- /dev/null +++ b/deploy/helm/charts/agent-sandbox/README.md @@ -0,0 +1,68 @@ +# agent-sandbox subchart + +Local Helm subchart that vendors +[`kubernetes-sigs/agent-sandbox`](https://github.com/kubernetes-sigs/agent-sandbox) +so the parent `chart-deco-studio` can install the operator + CRDs for the +mesh k8s sandbox runner. Upstream does not publish a Helm chart as of +v0.4.2 — only raw `manifest.yaml` + `extensions.yaml` release assets — so +we vendor the YAML here and reference the subchart via `file://`. + +Version pin: **v0.4.2** (see `Chart.yaml` `appVersion`). + +## Layout + +``` +agent-sandbox/ +├── Chart.yaml +├── values.yaml # intentionally empty — no tunables +├── vendor.sh # re-fetch upstream YAML on version bump +├── crds/ +│ └── agent-sandbox-crds.yaml # all CustomResourceDefinition docs +└── templates/ + └── agent-sandbox-manifest.yaml # Deployment, RBAC, Namespace, Service, ServiceAccount +``` + +Upstream `extensions.yaml` and `manifest.yaml` both contain a mix of CRDs +and non-CRD resources (controller Deployment, RBAC, Namespace). `vendor.sh` +splits on `kind: CustomResourceDefinition` at document boundaries and +routes each doc to the right directory. + +## CRD upgrade caveat + +Helm installs files in `crds/` on first install but **never upgrades +them** (this is an intentional Helm design choice — CRD upgrades are +treated as a manual operation because schema changes can break existing +custom resources). That's acceptable for mesh because: + +- Upstream pin is tight (v0.4.2). +- CRD schema changes are rare between upstream patch releases. + +To pick up upstream CRD schema changes after running `vendor.sh`: + +```bash +kubectl apply -f deploy/helm/charts/agent-sandbox/crds/agent-sandbox-crds.yaml +# then helm upgrade as normal +``` + +Uninstall + reinstall also works but drops existing SandboxClaims. + +## Bumping upstream version + +```bash +./vendor.sh v0.4.3 # re-fetches + re-splits +# edit Chart.yaml: appVersion -> "0.4.3" +# bump version: ... (subchart version; e.g. 0.1.0 -> 0.2.0) +helm dependency update ../../ # refresh parent Chart.lock +``` + +Check the upstream release notes for CRD schema changes — if the +`sandboxtemplates` or `sandboxwarmpools` CRD shape changes, the parent +chart's `templates/sandbox-template.yaml` and `templates/sandbox-warm-pool.yaml` +may need corresponding edits. + +## Why not an upstream Helm chart? + +Upstream hasn't published one. Filing a request with prior art pointing at +this subchart is worthwhile — if it lands, this vendored copy goes away +and the parent chart switches to `repository: oci://...` or a Helm repo. +Not a blocker for mesh. diff --git a/deploy/helm/charts/agent-sandbox/crds/agent-sandbox-crds.yaml b/deploy/helm/charts/agent-sandbox/crds/agent-sandbox-crds.yaml new file mode 100644 index 0000000000..07daf0a113 --- /dev/null +++ b/deploy/helm/charts/agent-sandbox/crds/agent-sandbox-crds.yaml @@ -0,0 +1,8286 @@ +# Vendored from kubernetes-sigs/agent-sandbox v0.4.2 via vendor.sh. +# Do not edit by hand — re-run vendor.sh to refresh. +# Contains: CustomResourceDefinition docs only. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: sandboxes.agents.x-k8s.io +spec: + group: agents.x-k8s.io + names: + kind: Sandbox + listKind: SandboxList + plural: sandboxes + shortNames: + - sandbox + singular: sandbox + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + podTemplate: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + properties: + activeDeadlineSeconds: + format: int64 + type: integer + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + type: boolean + containers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + properties: + key: + type: string + optional: + default: false + type: boolean + path: + type: string + volumeName: + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + type: string + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + restartPolicyRules: + items: + properties: + action: + type: string + exitCodes: + properties: + operator: + type: string + values: + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + type: string + enableServiceLinks: + type: boolean + ephemeralContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + properties: + key: + type: string + optional: + default: false + type: boolean + path: + type: string + volumeName: + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + type: string + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + restartPolicyRules: + items: + properties: + action: + type: string + exitCodes: + properties: + operator: + type: string + values: + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + targetContainerName: + type: string + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + items: + properties: + hostnames: + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + type: boolean + hostNetwork: + type: boolean + hostPID: + type: boolean + hostUsers: + type: boolean + hostname: + type: string + hostnameOverride: + type: string + imagePullSecrets: + items: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + properties: + key: + type: string + optional: + default: false + type: boolean + path: + type: string + volumeName: + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + type: string + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + restartPolicyRules: + items: + properties: + action: + type: string + exitCodes: + properties: + operator: + type: string + values: + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + preemptionPolicy: + type: string + priority: + format: int32 + type: integer + priorityClassName: + type: string + readinessGates: + items: + properties: + conditionType: + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + items: + properties: + name: + type: string + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + runtimeClassName: + type: string + schedulerName: + type: string + schedulingGates: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxChangePolicy: + type: string + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + serviceAccountName: + type: string + setHostnameAsFQDN: + type: boolean + shareProcessNamespace: + type: boolean + subdomain: + type: string + terminationGracePeriodSeconds: + format: int64 + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podCertificate: + properties: + certificateChainPath: + type: string + credentialBundlePath: + type: string + keyPath: + type: string + keyType: + type: string + maxExpirationSeconds: + format: int32 + type: integer + signerName: + type: string + userAnnotations: + additionalProperties: + type: string + type: object + required: + - keyType + - signerName + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + workloadRef: + properties: + name: + type: string + podGroup: + type: string + podGroupReplicaKey: + type: string + required: + - name + - podGroup + type: object + required: + - containers + type: object + required: + - spec + type: object + replicas: + default: 1 + format: int32 + maximum: 1 + minimum: 0 + type: integer + shutdownPolicy: + default: Retain + enum: + - Delete + - Retain + type: string + shutdownTime: + format: date-time + type: string + volumeClaimTemplates: + items: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + name: + type: string + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: array + required: + - podTemplate + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + podIPs: + items: + type: string + type: array + replicas: + format: int32 + minimum: 0 + type: integer + selector: + type: string + service: + type: string + serviceFQDN: + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: sandboxclaims.extensions.agents.x-k8s.io +spec: + group: extensions.agents.x-k8s.io + names: + kind: SandboxClaim + listKind: SandboxClaimList + plural: sandboxclaims + shortNames: + - sandboxclaim + singular: sandboxclaim + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + additionalPodMetadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + env: + items: + properties: + containerName: + type: string + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + lifecycle: + properties: + shutdownPolicy: + default: Retain + enum: + - Delete + - DeleteForeground + - Retain + type: string + shutdownTime: + format: date-time + type: string + type: object + sandboxTemplateRef: + properties: + name: + type: string + required: + - name + type: object + warmpool: + default: default + type: string + required: + - sandboxTemplateRef + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + maxLength: 32768 + type: string + observedGeneration: + format: int64 + minimum: 0 + type: integer + reason: + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + enum: + - "True" + - "False" + - Unknown + type: string + type: + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + sandbox: + properties: + name: + type: string + podIPs: + items: + type: string + type: array + type: object + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: sandboxtemplates.extensions.agents.x-k8s.io +spec: + group: extensions.agents.x-k8s.io + names: + kind: SandboxTemplate + listKind: SandboxTemplateList + plural: sandboxtemplates + shortNames: + - sandboxtemplate + singular: sandboxtemplate + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + envVarsInjectionPolicy: + default: Disallowed + enum: + - Allowed + - Overrides + - Disallowed + type: string + networkPolicy: + properties: + egress: + items: + properties: + ports: + items: + properties: + endPort: + format: int32 + type: integer + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + protocol: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + to: + items: + properties: + ipBlock: + properties: + cidr: + type: string + except: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - cidr + type: object + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: array + ingress: + items: + properties: + from: + items: + properties: + ipBlock: + properties: + cidr: + type: string + except: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - cidr + type: object + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + podSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + ports: + items: + properties: + endPort: + format: int32 + type: integer + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + protocol: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: array + type: object + networkPolicyManagement: + default: Managed + enum: + - Managed + - Unmanaged + type: string + podTemplate: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + properties: + activeDeadlineSeconds: + format: int64 + type: integer + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + automountServiceAccountToken: + type: boolean + containers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + properties: + key: + type: string + optional: + default: false + type: boolean + path: + type: string + volumeName: + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + type: string + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + restartPolicyRules: + items: + properties: + action: + type: string + exitCodes: + properties: + operator: + type: string + values: + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + type: string + enableServiceLinks: + type: boolean + ephemeralContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + properties: + key: + type: string + optional: + default: false + type: boolean + path: + type: string + volumeName: + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + type: string + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + restartPolicyRules: + items: + properties: + action: + type: string + exitCodes: + properties: + operator: + type: string + values: + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + targetContainerName: + type: string + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + hostAliases: + items: + properties: + hostnames: + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + type: string + required: + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostIPC: + type: boolean + hostNetwork: + type: boolean + hostPID: + type: boolean + hostUsers: + type: boolean + hostname: + type: string + hostnameOverride: + type: string + imagePullSecrets: + items: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + initContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + properties: + key: + type: string + optional: + default: false + type: boolean + path: + type: string + volumeName: + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + stopSignal: + type: string + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + restartPolicyRules: + items: + properties: + action: + type: string + exitCodes: + properties: + operator: + type: string + values: + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + type: object + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + nodeName: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: atomic + os: + properties: + name: + type: string + required: + - name + type: object + overhead: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + preemptionPolicy: + type: string + priority: + format: int32 + type: integer + priorityClassName: + type: string + readinessGates: + items: + properties: + conditionType: + type: string + required: + - conditionType + type: object + type: array + x-kubernetes-list-type: atomic + resourceClaims: + items: + properties: + name: + type: string + resourceClaimName: + type: string + resourceClaimTemplateName: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + runtimeClassName: + type: string + schedulerName: + type: string + schedulingGates: + items: + properties: + name: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + securityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxChangePolicy: + type: string + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + serviceAccountName: + type: string + setHostnameAsFQDN: + type: boolean + shareProcessNamespace: + type: boolean + subdomain: + type: string + terminationGracePeriodSeconds: + format: int64 + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podCertificate: + properties: + certificateChainPath: + type: string + credentialBundlePath: + type: string + keyPath: + type: string + keyType: + type: string + maxExpirationSeconds: + format: int32 + type: integer + signerName: + type: string + userAnnotations: + additionalProperties: + type: string + type: object + required: + - keyType + - signerName + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + workloadRef: + properties: + name: + type: string + podGroup: + type: string + podGroupReplicaKey: + type: string + required: + - name + - podGroup + type: object + required: + - containers + type: object + required: + - spec + type: object + required: + - podTemplate + type: object + status: + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: sandboxwarmpools.extensions.agents.x-k8s.io +spec: + group: extensions.agents.x-k8s.io + names: + kind: SandboxWarmPool + listKind: SandboxWarmPoolList + plural: sandboxwarmpools + shortNames: + - swp + singular: sandboxwarmpool + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.readyReplicas + name: Ready + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + replicas: + format: int32 + minimum: 0 + type: integer + sandboxTemplateRef: + properties: + name: + type: string + required: + - name + type: object + updateStrategy: + properties: + type: + default: OnReplenish + enum: + - Recreate + - OnReplenish + type: string + type: object + required: + - replicas + - sandboxTemplateRef + type: object + status: + properties: + readyReplicas: + format: int32 + type: integer + replicas: + format: int32 + type: integer + selector: + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} diff --git a/deploy/helm/charts/agent-sandbox/templates/agent-sandbox-manifest.yaml b/deploy/helm/charts/agent-sandbox/templates/agent-sandbox-manifest.yaml new file mode 100644 index 0000000000..453305dc58 --- /dev/null +++ b/deploy/helm/charts/agent-sandbox/templates/agent-sandbox-manifest.yaml @@ -0,0 +1,293 @@ +# Vendored from kubernetes-sigs/agent-sandbox v0.4.2 via vendor.sh. +# Do not edit by hand — re-run vendor.sh to refresh. +# Contains: controller Deployments, RBAC, Namespace, Service, ServiceAccount. +# +# LOCAL EDIT — preserve when re-running vendor.sh: +# PodSecurity admission labels added to the Namespace below. `baseline` +# is enforced (operator controller pod runs without an explicit +# securityContext; `restricted` would block it until that's patched). +# `restricted` is set as warn/audit so violations from sandbox pods or +# the controller surface in audit logs without rejecting admission. +# When the operator's pod spec hardens to `restricted`, flip enforce. +kind: Namespace +apiVersion: v1 +metadata: + name: agent-sandbox-system + labels: + pod-security.kubernetes.io/enforce: baseline + pod-security.kubernetes.io/enforce-version: latest + pod-security.kubernetes.io/warn: restricted + pod-security.kubernetes.io/warn-version: latest + pod-security.kubernetes.io/audit: restricted + pod-security.kubernetes.io/audit-version: latest + +--- + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: agent-sandbox-controller + namespace: agent-sandbox-system + labels: + app: agent-sandbox-controller + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: agent-sandbox-controller +subjects: +- kind: ServiceAccount + name: agent-sandbox-controller + namespace: agent-sandbox-system +roleRef: + kind: ClusterRole + name: agent-sandbox-controller + apiGroup: rbac.authorization.k8s.io + +--- + +kind: Service +apiVersion: v1 +metadata: + name: agent-sandbox-controller + namespace: agent-sandbox-system + labels: + app: agent-sandbox-controller +spec: + selector: + app: agent-sandbox-controller + ports: + - name: metrics + port: 8080 + targetPort: metrics + protocol: TCP + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + name: agent-sandbox-controller + namespace: agent-sandbox-system + labels: + app: agent-sandbox-controller +spec: + replicas: 1 + selector: + matchLabels: + app: agent-sandbox-controller + template: + metadata: + labels: + app: agent-sandbox-controller + spec: + serviceAccountName: agent-sandbox-controller + containers: + - name: agent-sandbox-controller + image: registry.k8s.io/agent-sandbox/agent-sandbox-controller:v0.4.2 + args: + - --leader-elect=true + ports: + - name: metrics + containerPort: 8080 + protocol: TCP + - name: healthz + containerPort: 8081 + protocol: TCP +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: agent-sandbox-controller +rules: +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - agents.x-k8s.io + resources: + - sandboxes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - agents.x-k8s.io + resources: + - sandboxes/finalizers + - sandboxes/status + verbs: + - get + - patch + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - events.k8s.io + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: agent-sandbox-controller-extensions +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - agents.x-k8s.io + resources: + - sandboxes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - extensions.agents.x-k8s.io + resources: + - sandboxclaims + - sandboxtemplates + - sandboxwarmpools + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - extensions.agents.x-k8s.io + resources: + - sandboxclaims/finalizers + - sandboxclaims/status + - sandboxtemplates/finalizers + - sandboxtemplates/status + - sandboxwarmpools/finalizers + - sandboxwarmpools/status + verbs: + - get + - patch + - update +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: agent-sandbox-controller + namespace: agent-sandbox-system + labels: + app: agent-sandbox-controller +spec: + replicas: 1 + selector: + matchLabels: + app: agent-sandbox-controller + template: + metadata: + labels: + app: agent-sandbox-controller + spec: + serviceAccountName: agent-sandbox-controller + containers: + - name: agent-sandbox-controller + image: registry.k8s.io/agent-sandbox/agent-sandbox-controller:v0.4.2 + args: + - "--leader-elect=true" + - "--extensions" + ports: + - name: metrics + containerPort: 8080 + protocol: TCP + - name: healthz + containerPort: 8081 + protocol: TCP +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: agent-sandbox-controller-extensions +subjects: +- kind: ServiceAccount + name: agent-sandbox-controller + namespace: agent-sandbox-system +roleRef: + kind: ClusterRole + name: agent-sandbox-controller-extensions + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/helm/charts/agent-sandbox/values.yaml b/deploy/helm/charts/agent-sandbox/values.yaml new file mode 100644 index 0000000000..75602a1dbd --- /dev/null +++ b/deploy/helm/charts/agent-sandbox/values.yaml @@ -0,0 +1,6 @@ +# Vendored from kubernetes-sigs/agent-sandbox — upstream release assets are +# raw YAML, not templated. This subchart ships them as-is. +# +# To tune behavior, set values on the parent chart's `sandbox.agentSandbox.*` +# (SandboxTemplate image/resources, NetworkPolicy toggle, WarmPool size). +# This subchart has no knobs of its own. diff --git a/deploy/helm/charts/agent-sandbox/vendor.sh b/deploy/helm/charts/agent-sandbox/vendor.sh new file mode 100755 index 0000000000..d80b5439f4 --- /dev/null +++ b/deploy/helm/charts/agent-sandbox/vendor.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash +# Re-vendor kubernetes-sigs/agent-sandbox release assets into this subchart. +# +# Upstream ships raw multi-doc YAML (manifest.yaml + extensions.yaml), not a +# Helm chart. We split by kind: CustomResourceDefinition docs land in crds/, +# everything else in templates/ so Helm treats CRDs with its install-only +# lifecycle (see README.md for the upgrade caveat). +# +# Usage: ./vendor.sh [vX.Y.Z] (default v0.4.2 — must match appVersion) +set -euo pipefail + +UPSTREAM_VERSION="${1:-v0.4.2}" +REPO="kubernetes-sigs/agent-sandbox" + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +CRDS_FILE="${SCRIPT_DIR}/crds/agent-sandbox-crds.yaml" +TMPL_FILE="${SCRIPT_DIR}/templates/agent-sandbox-manifest.yaml" + +WORK="$(mktemp -d)" +trap 'rm -rf "${WORK}"' EXIT + +log() { printf "\033[1;34m[vendor]\033[0m %s\n" "$*"; } + +log "fetching ${REPO}@${UPSTREAM_VERSION}" +curl -fsSLo "${WORK}/manifest.yaml" \ + "https://github.com/${REPO}/releases/download/${UPSTREAM_VERSION}/manifest.yaml" +curl -fsSLo "${WORK}/extensions.yaml" \ + "https://github.com/${REPO}/releases/download/${UPSTREAM_VERSION}/extensions.yaml" + +# Split each multi-doc YAML by `---` boundaries, classify each doc by kind. +# awk is portable (no yq dependency) and good enough for manifests that only +# need a kind: line scanned. +split_docs() { + local src="$1" crds_out="$2" other_out="$3" + awk -v crds="${crds_out}" -v other="${other_out}" ' + function flush( isCrd, i, out) { + if (n == 0) return + isCrd = 0 + for (i = 1; i <= n; i++) { + if (buf[i] ~ /^kind:[[:space:]]*CustomResourceDefinition[[:space:]]*$/) { + isCrd = 1 + break + } + } + out = isCrd ? crds : other + for (i = 1; i <= n; i++) print buf[i] >> out + print "---" >> out + n = 0 + } + /^---[[:space:]]*$/ { flush(); next } + { buf[++n] = $0 } + END { flush() } + ' "${src}" +} + +log "splitting CRDs from non-CRDs" +: > "${WORK}/crds.yaml" +: > "${WORK}/other.yaml" +split_docs "${WORK}/manifest.yaml" "${WORK}/crds.yaml" "${WORK}/other.yaml" +split_docs "${WORK}/extensions.yaml" "${WORK}/crds.yaml" "${WORK}/other.yaml" + +# Strip trailing empty doc separator so `helm template` doesn't warn. +sed -i.bak -e '$d' "${WORK}/crds.yaml" && rm "${WORK}/crds.yaml.bak" +sed -i.bak -e '$d' "${WORK}/other.yaml" && rm "${WORK}/other.yaml.bak" + +mkdir -p "$(dirname "${CRDS_FILE}")" "$(dirname "${TMPL_FILE}")" + +HEADER_CRDS="# Vendored from ${REPO} ${UPSTREAM_VERSION} via vendor.sh. +# Do not edit by hand — re-run vendor.sh to refresh. +# Contains: CustomResourceDefinition docs only. +" + +HEADER_TMPL="# Vendored from ${REPO} ${UPSTREAM_VERSION} via vendor.sh. +# Do not edit by hand — re-run vendor.sh to refresh. +# Contains: controller Deployments, RBAC, Namespace, Service, ServiceAccount. +" + +printf "%s" "${HEADER_CRDS}" > "${CRDS_FILE}" +cat "${WORK}/crds.yaml" >> "${CRDS_FILE}" + +printf "%s" "${HEADER_TMPL}" > "${TMPL_FILE}" +cat "${WORK}/other.yaml" >> "${TMPL_FILE}" + +log "wrote $(wc -l <"${CRDS_FILE}") lines -> ${CRDS_FILE}" +log "wrote $(wc -l <"${TMPL_FILE}") lines -> ${TMPL_FILE}" +log "done. Remember to update appVersion in Chart.yaml if the version changed." diff --git a/deploy/helm/examples/postgres-kind.yaml b/deploy/helm/examples/postgres-kind.yaml new file mode 100644 index 0000000000..42f956f598 --- /dev/null +++ b/deploy/helm/examples/postgres-kind.yaml @@ -0,0 +1,81 @@ +# Minimal Postgres for kind dev installs. +# +# The chart-deco-studio chart's `database.engine: sqlite` mode is a relic of +# the published `decocms` npm package — the current source code uses Kysely + +# node-postgres exclusively. So a postgres instance has to exist somewhere +# the studio pod can reach. +# +# This manifest stands one up in the same namespace as the helm release. Pair +# with values-kind.yaml's `database.engine: postgresql` + `database.url` +# overrides. NOT for production — no auth hardening, no backups, single +# replica. Replace with a managed RDS / CloudSQL / external Postgres URL in +# any real deploy. +# +# Apply with: +# kubectl apply -n deco-studio -f deploy/helm/examples/postgres-kind.yaml +apiVersion: v1 +kind: Service +metadata: + name: postgres +spec: + selector: + app: postgres + ports: + - port: 5432 + targetPort: 5432 + clusterIP: None # headless — DNS resolves directly to the pod +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: postgres-data +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + # storageClassName omitted → cluster default (`standard` on kind). +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postgres +spec: + replicas: 1 + strategy: + type: Recreate # SQLite-style: single writer, RWO PVC + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: postgres:17-alpine + env: + - name: POSTGRES_USER + value: postgres + - name: POSTGRES_PASSWORD + value: postgres + - name: POSTGRES_DB + value: postgres + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + ports: + - containerPort: 5432 + volumeMounts: + - name: data + mountPath: /var/lib/postgresql/data + readinessProbe: + exec: + command: ["pg_isready", "-U", "postgres"] + initialDelaySeconds: 5 + periodSeconds: 5 + volumes: + - name: data + persistentVolumeClaim: + claimName: postgres-data diff --git a/deploy/helm/examples/values-kind.yaml b/deploy/helm/examples/values-kind.yaml new file mode 100644 index 0000000000..d6b3c718d1 --- /dev/null +++ b/deploy/helm/examples/values-kind.yaml @@ -0,0 +1,152 @@ +# Local kind cluster overrides for chart-deco-studio. +# +# Use with the runbook in deploy/helm/examples/README-kind.md (or paste from +# the chat). Designed to work on macOS (arm64 / Apple Silicon) AND linux/amd64 +# without changes — no nodeSelector pins, default StorageClass. +# +# Pairs with: +# - sandbox.agentSandbox.enabled=true → installs the agent-sandbox subchart +# so the helm release fully owns the operator + CRDs (no separate +# deploy/k8s-sandbox/local/up.sh step). +# - mesh-sandbox:local image built locally and `kind load`ed into the cluster +# before `helm install`. + +# ── single replica, SQLite, no autoscaling ───────────────────────────── +replicaCount: 1 + +# Studio image: built from current branch source via tests/resilience/ +# Dockerfile.studio and `kind load`ed. Two reasons not to use the published +# image: +# 1. `ghcr.io/decocms/studio/studio` is private — anonymous pull returns 403. +# 2. The published `decocms` npm package may lag the current branch (e.g. +# STUDIO_SANDBOX_RUNNER=agent-sandbox wasn't recognized in 2.281.2). Building +# from source guarantees we're testing the code under development. +image: + repository: deco-studio + tag: local + pullPolicy: Never + # Source build doesn't install the `decocms` npm package, so the default + # chart command (`bun run deco …`) won't resolve. Use cli.ts directly: + # it goes through buildSettings → ensureServices, which spawns embedded + # postgres + runs Better Auth + Kysely migrations end-to-end. The + # `dist/server/migrate.js` path the chart's sqlite branch wires up + # bypasses ensureServices and tries to pg-connect to DATABASE_URL + # literally, which fails on the chart's `/app/data/mesh.db` placeholder. + command: + - bun + - run + - src/cli.ts + - --no-tui + - --no-local-mode + +# ── extra env: TLS workaround for Bun + K8s port-forward ───────────── +# @kubernetes/client-node's PortForward uses the `ws` package for the +# WebSocket upgrade. Bun's `ws` build doesn't honor the Node-style `ca` +# option, so the WS handshake fails TLS validation against the kind API +# server's self-signed cert. Plain HTTPS calls (claim CRUD) work fine — +# only WS-backed port-forward breaks. Symptom: studio sees +# `[AgentSandboxRunner] port-forward to :9000 failed: [object +# ErrorEvent]` and the daemon /health probe times out. +# +# Workaround for kind dev only: disable TLS verification globally for the +# pod. NEVER set this in prod — it disables all cert validation on the +# studio process. Track upstream Bun fix or switch the studio image to +# Node.js if you need to run @kubernetes/client-node port-forward +# securely in production. +env: + - name: NODE_TLS_REJECT_UNAUTHORIZED + value: "0" + +# ── database ────────────────────────────────────────────────────────── +# Use the in-cluster postgres deployed via examples/postgres-kind.yaml. +# The chart's `sqlite` mode is incompatible with the current branch (the +# bundled migrate.js opens pg-pool against DATABASE_URL literally and +# crashes when handed `/app/data/mesh.db`). +database: + engine: postgresql + url: "postgresql://postgres:postgres@postgres.deco-studio.svc.cluster.local:5432/postgres" + +# Single-node kind cluster has no zones. Drop the spread constraint to +# keep `kubectl describe` output clean. +topologySpreadConstraints: [] + +# Tighten requests so the cluster fits comfortably in Docker Desktop's +# default ~4Gi. Bump back up when stress-testing. +resources: + requests: + memory: "1Gi" + cpu: "500m" + limits: + memory: "2Gi" + +# ── storage ─────────────────────────────────────────────────────────── +# kind ships rancher.io/local-path as the default StorageClass. +# Empty string = use whatever the cluster marks default (= local-path here). +persistence: + enabled: true + accessMode: ReadWriteOnce + # Empty = chart omits storageClassName, K8s falls back to the default + # StorageClass (`standard` / rancher.io/local-path on kind). + storageClass: "" + size: 5Gi + distributed: false + +# SQLite + ReadWriteOnce → chart auto-detects Recreate strategy. Leave +# strategy unset. + +# ── secrets (override on the command line, NOT here) ────────────────── +# Pass via --set at install time: +# --set secret.BETTER_AUTH_SECRET="$(openssl rand -base64 32)" +# --set secret.ENCRYPTION_KEY="$(openssl rand -base64 32)" + +# ── NATS subchart ───────────────────────────────────────────────────── +# Keep enabled (mesh's event bus needs it). PVC storageClassName is +# already empty in the parent default; that resolves to local-path on kind. + +# ── agent-sandbox runner ────────────────────────────────────────────── +sandbox: + agentSandbox: + enabled: true + image: + # Built locally via packages/sandbox + `kind load docker-image`. + # See runbook for the exact build command. + repository: mesh-sandbox + tag: local + pullPolicy: Never + # Modest sandbox limits to fit alongside studio + NATS on a laptop + # kind cluster. Mirrors deploy/k8s-sandbox/local/sandbox-template.yaml. + resources: + requests: + cpu: "100m" + memory: "512Mi" + limits: + cpu: "1" + memory: "3Gi" + networkPolicy: + # kindnet enforces NetworkPolicy as of kind v0.27 (kindnetd v1.x). + # Leave the chart's policy on so studio → sandbox traffic on port + # 9000 is explicitly allow-listed; otherwise the operator-managed + # default (only `app: sandbox-router` ingress, locked off via + # `networkPolicyManagement: Unmanaged` in the chart's SandboxTemplate) + # would have re-blocked everything. Older kindnet builds (pre-0.27) + # ignore the policy and the rule is inert — safe either way. + enabled: true + # No Istio in kind → no preview gateway. Mesh's HTTP edge handles preview + # routing in-process via apps/mesh/src/sandbox/preview-proxy.ts: it reads + # the Host header, extracts the sandbox handle, and reverse-proxies to + # the in-cluster daemon Service. + previewGateway: + enabled: false + # `*.localhost` is special — browsers resolve every subdomain of + # localhost to 127.0.0.1 without /etc/hosts entries. Combined with the + # user's `kubectl port-forward svc/deco-studio 8080:80`, this means + # browser → `studio-sb-XXX.preview.localhost:8080` → kube port-forward → + # mesh edge → preview-proxy → sandbox daemon Service. No ingress, no DNS, + # no certs. Match the port to whatever the user port-forwards to. + previewUrlPattern: "http://{handle}.preview.localhost:8080" + # Plain host-users mode. userns remap requires K8s 1.30+ on a kernel + # that supports it; kind nodes can vary. Keep simple for local dev. + hostUsers: true + readOnlyRootFilesystem: false + warmPool: + enabled: false diff --git a/deploy/helm/templates/_helpers.tpl b/deploy/helm/templates/_helpers.tpl index d3eb01d605..ab8a71b8bd 100644 --- a/deploy/helm/templates/_helpers.tpl +++ b/deploy/helm/templates/_helpers.tpl @@ -178,6 +178,25 @@ Validate OTel collector/S3 configuration. {{- end }} {{- end }} +{{/* +Validate that Gateway API + cert-manager CRDs are present when the sandbox +preview gateway is enabled. Without this check, `helm install` would push +Gateway/HTTPRoute/Certificate to an API server that doesn't know those +kinds — the failure mode is an opaque "no matches for kind" rejection, +sometimes after partial-apply. Failing at template time keeps the release +atomic and gives a pointer to the right install command. +*/}} +{{- define "chart-deco-studio.validateSandboxPreviewGateway" -}} +{{- if and .Values.sandbox.agentSandbox.enabled .Values.sandbox.agentSandbox.previewGateway.enabled }} +{{- if not (.Capabilities.APIVersions.Has "gateway.networking.k8s.io/v1") }} +{{- fail "chart-deco-studio: sandbox.agentSandbox.previewGateway.enabled=true requires the Gateway API CRDs (gateway.networking.k8s.io/v1). Install: kubectl apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.1.0/standard-install.yaml — and a Gateway controller (Istio, Envoy Gateway, Cilium, ...) implementing the chosen gatewayClassName." -}} +{{- end }} +{{- if not (.Capabilities.APIVersions.Has "cert-manager.io/v1") }} +{{- fail "chart-deco-studio: sandbox.agentSandbox.previewGateway.enabled=true requires cert-manager (cert-manager.io/v1). Install: helm install cert-manager jetstack/cert-manager -n cert-manager --create-namespace --set crds.enabled=true" -}} +{{- end }} +{{- end }} +{{- end }} + {{/* Formats OTEL headers map as key=value,key2=value2 format. */}} diff --git a/deploy/helm/templates/configmap.yaml b/deploy/helm/templates/configmap.yaml index c66f1d00e7..205b26f2eb 100644 --- a/deploy/helm/templates/configmap.yaml +++ b/deploy/helm/templates/configmap.yaml @@ -17,3 +17,11 @@ data: {{- if ne (lower (default "sqlite" .Values.database.engine)) "postgresql" }} DATABASE_URL: {{ include "chart-deco-studio.databaseUrl" . | trim | quote }} {{- end }} + {{- if .Values.sandbox.agentSandbox.enabled }} + {{- if not (hasKey .Values.configMap.meshConfig "STUDIO_SANDBOX_RUNNER") }} + STUDIO_SANDBOX_RUNNER: "agent-sandbox" + {{- end }} + {{- with .Values.sandbox.agentSandbox.previewUrlPattern }} + STUDIO_SANDBOX_PREVIEW_URL_PATTERN: {{ . | quote }} + {{- end }} + {{- end }} diff --git a/deploy/helm/templates/pvc.yaml b/deploy/helm/templates/pvc.yaml index dfcd2148f5..c98c4b84fb 100644 --- a/deploy/helm/templates/pvc.yaml +++ b/deploy/helm/templates/pvc.yaml @@ -12,10 +12,8 @@ spec: resources: requests: storage: {{ .Values.persistence.size }} - {{- if .Values.persistence.storageClass }} - storageClassName: {{ .Values.persistence.storageClass }} - {{- else }} - storageClassName: "" # Use default storage class + {{- with .Values.persistence.storageClass }} + storageClassName: {{ . }} {{- end }} {{- end }} {{- end }} diff --git a/deploy/helm/templates/sandbox-network-policy.yaml b/deploy/helm/templates/sandbox-network-policy.yaml new file mode 100644 index 0000000000..4db2a77e2d --- /dev/null +++ b/deploy/helm/templates/sandbox-network-policy.yaml @@ -0,0 +1,93 @@ +{{- if and .Values.sandbox.agentSandbox.enabled .Values.sandbox.agentSandbox.networkPolicy.enabled }} +# NetworkPolicy for mesh sandbox pods. Derived from PLAN-K8S-REMAINING.md +# section 2.1 — the prior 2.1 PR that was meant to land a local/prod network +# policy had not merged at the time this chart was authored, so the spec +# below tracks the plan directly. Reconcile if the 2.1 PR lands a different +# shape. +# +# Scope: selects pods in agent-sandbox-system labeled +# app.kubernetes.io/name=studio-sandbox. Applies both ingress and egress rules, +# so egress is deny-by-default (policyType Egress with only allowed rules). +# +# Threat model: workload is arbitrary user code. Egress must not reach IMDS +# (169.254.169.254), in-cluster RFC1918 services, or link-local addresses. +# Combine with EKS IMDSv2 hop-limit=1 at the node level — this policy alone +# is not sufficient on clouds where IMDS is reachable via hop. +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: studio-sandbox + namespace: agent-sandbox-system + labels: + app.kubernetes.io/name: studio-sandbox + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "chart-deco-studio.chart" . }} +spec: + podSelector: + matchLabels: + app.kubernetes.io/name: studio-sandbox + policyTypes: + - Ingress + - Egress + ingress: + # Daemon port (9000) — mesh server pods call this to exec tools, stream + # logs, etc. Also the *preview* request path: when previewGateway is + # enabled, mesh reverse-proxies `*.preview.` traffic to the + # daemon here so the daemon's CSP/HMR rewrites apply (port 3000 would + # bypass them). Matches the chart's own selectorLabels so self-hosters + # who use the default deployment get the rule automatically. + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} + podSelector: + matchLabels: + {{- include "chart-deco-studio.selectorLabels" . | nindent 14 }} + ports: + - protocol: TCP + port: 9000 + {{- with .Values.sandbox.agentSandbox.networkPolicy.previewGatewayNamespace }} + # Legacy/standby — direct ingress on dev port 3000 from a configured + # gateway namespace. Only needed for setups that route preview traffic + # *around* mesh (no daemon CSP/HMR rewrites). The standard Istio + # Gateway API path lands on port 9000 via mesh and doesn't need this. + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ . }} + ports: + - protocol: TCP + port: 3000 + {{- end }} + egress: + # CoreDNS — UDP + TCP 53. + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 + - protocol: TCP + port: 53 + # Public internet on 80/443 with IMDS, RFC1918, and link-local blocked. + # Covers bun install, git clone, external APIs without opening the + # sandbox to in-cluster services. + - to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 169.254.0.0/16 # link-local + IMDSv2 + - 10.0.0.0/8 # RFC1918 + - 172.16.0.0/12 # RFC1918 + - 192.168.0.0/16 # RFC1918 + - 100.64.0.0/10 # carrier-grade NAT / EKS VPC CNI pods + ports: + - protocol: TCP + port: 443 + - protocol: TCP + port: 80 +{{- end }} diff --git a/deploy/helm/templates/sandbox-preview-cert.yaml b/deploy/helm/templates/sandbox-preview-cert.yaml new file mode 100644 index 0000000000..31415e567b --- /dev/null +++ b/deploy/helm/templates/sandbox-preview-cert.yaml @@ -0,0 +1,34 @@ +{{- if and .Values.sandbox.agentSandbox.enabled .Values.sandbox.agentSandbox.previewGateway.enabled }} +{{- $domain := required "sandbox.agentSandbox.previewGateway.domain is required when previewGateway.enabled=true" .Values.sandbox.agentSandbox.previewGateway.domain }} +{{- $issuer := required "sandbox.agentSandbox.previewGateway.clusterIssuer is required when previewGateway.enabled=true" .Values.sandbox.agentSandbox.previewGateway.clusterIssuer }} +{{- $gwNamespace := .Values.sandbox.agentSandbox.previewGateway.namespace }} +{{- $tlsSecretName := default (printf "%s-sandbox-preview-tls" (include "chart-deco-studio.fullname" .)) .Values.sandbox.agentSandbox.previewGateway.tlsSecretName }} +{{- $meshServiceName := include "chart-deco-studio.fullname" . }} +# Wildcard cert for the sandbox preview Gateway. cert-manager places the +# Secret in the gateway namespace so the Gateway listener can mount it +# without a cross-namespace reference. +# +# DNS-01 is the only solver that can validate a wildcard SAN, so the +# referenced ClusterIssuer must be DNS-01 (e.g. Cloudflare, Route53). The +# chart does not template the ClusterIssuer itself — the API tokens +# required to provision DNS records are per-cluster infra, not chart +# config. See deploy/helm/README.md for a Cloudflare DNS-01 ClusterIssuer +# template. +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ $meshServiceName }}-sandbox-preview + namespace: {{ $gwNamespace }} + labels: + app.kubernetes.io/name: studio-sandbox-preview + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "chart-deco-studio.chart" . }} +spec: + secretName: {{ $tlsSecretName }} + issuerRef: + kind: ClusterIssuer + name: {{ $issuer | quote }} + commonName: {{ printf "*.%s" $domain | quote }} + dnsNames: + - {{ printf "*.%s" $domain | quote }} +{{- end }} diff --git a/deploy/helm/templates/sandbox-preview-gateway.yaml b/deploy/helm/templates/sandbox-preview-gateway.yaml new file mode 100644 index 0000000000..b403d3a1c2 --- /dev/null +++ b/deploy/helm/templates/sandbox-preview-gateway.yaml @@ -0,0 +1,78 @@ +{{- if and .Values.sandbox.agentSandbox.enabled .Values.sandbox.agentSandbox.previewGateway.enabled }} +{{- $domain := required "sandbox.agentSandbox.previewGateway.domain is required when previewGateway.enabled=true" .Values.sandbox.agentSandbox.previewGateway.domain }} +{{- $issuer := required "sandbox.agentSandbox.previewGateway.clusterIssuer is required when previewGateway.enabled=true" .Values.sandbox.agentSandbox.previewGateway.clusterIssuer }} +{{- $gwNamespace := .Values.sandbox.agentSandbox.previewGateway.namespace }} +{{- $tlsSecretName := default (printf "%s-sandbox-preview-tls" (include "chart-deco-studio.fullname" .)) .Values.sandbox.agentSandbox.previewGateway.tlsSecretName }} +{{- $hostname := printf "*.%s" $domain }} +{{- $meshServiceName := include "chart-deco-studio.fullname" . }} +# Wildcard preview-URL ingress: Approach B from the K8s sandbox plan. A +# single Gateway + HTTPRoute terminate `*.preview.` and forward to +# the mesh Service; mesh inspects the Host header and reverse-proxies to +# the matching sandbox's daemon at port 9000 (daemon owns the public +# surface; routing browsers straight at dev port 3000 would bypass the +# daemon's CSP/HMR rewrites and break iframe embedding + SSE). +# +# Mesh stays in the request path for the first ship; the longer-term plan +# is per-claim HTTPRoutes that bypass mesh entirely. Switching to that +# requires per-Service routing + RBAC for mesh to mint HTTPRoutes, which +# is deferred. +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: {{ $meshServiceName }}-sandbox-preview + namespace: {{ $gwNamespace }} + labels: + app.kubernetes.io/name: studio-sandbox-preview + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "chart-deco-studio.chart" . }} + annotations: + # cert-manager picks up the cert from the listener's TLS secret ref; + # this annotation tells it which ClusterIssuer to use when minting + # the wildcard. Required because Gateway listeners don't have a + # built-in `issuerRef` field. + cert-manager.io/cluster-issuer: {{ $issuer | quote }} +spec: + gatewayClassName: {{ .Values.sandbox.agentSandbox.previewGateway.gatewayClassName | quote }} + listeners: + - name: https + protocol: HTTPS + port: 443 + hostname: {{ $hostname | quote }} + tls: + mode: Terminate + certificateRefs: + - kind: Secret + name: {{ $tlsSecretName }} + namespace: {{ $gwNamespace }} + allowedRoutes: + namespaces: + # HTTPRoute lives in the release namespace (next to mesh), so + # we have to explicitly allow cross-namespace attachment from + # there. Without `from: All` plus a tight selector this would + # silently drop the route. + from: Selector + selector: + matchLabels: + kubernetes.io/metadata.name: {{ .Release.Namespace }} +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: {{ $meshServiceName }}-sandbox-preview + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: studio-sandbox-preview + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "chart-deco-studio.chart" . }} +spec: + parentRefs: + - kind: Gateway + name: {{ $meshServiceName }}-sandbox-preview + namespace: {{ $gwNamespace }} + hostnames: + - {{ $hostname | quote }} + rules: + - backendRefs: + - name: {{ $meshServiceName }} + port: {{ .Values.service.port }} +{{- end }} diff --git a/deploy/helm/templates/sandbox-rbac.yaml b/deploy/helm/templates/sandbox-rbac.yaml new file mode 100644 index 0000000000..580034415f --- /dev/null +++ b/deploy/helm/templates/sandbox-rbac.yaml @@ -0,0 +1,56 @@ +{{- if .Values.sandbox.agentSandbox.enabled }} +# RBAC for the mesh ServiceAccount to drive the agent-sandbox operator from +# inside the cluster. The runner (packages/sandbox/server/runner/agent-sandbox/) needs: +# - sandboxclaims CRUD/patch (per-tenant claim lifecycle, idle TTL refresh) +# - sandboxes get/list/watch (waitForSandboxReady streams `?watch=true`) +# - pods/portforward create (kubectl-style tunnel to the daemon container) +# Pods get is included for portforward error paths; the runner itself doesn't +# read pod specs directly. +# +# Scope: Role in agent-sandbox-system (the operator's namespace). The mesh +# ServiceAccount lives in {{ .Release.Namespace }} and the RoleBinding crosses +# namespaces by referencing it explicitly. Keeps blast radius of a mesh +# compromise limited to the sandbox namespace. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: studio-sandbox-runner + namespace: agent-sandbox-system + labels: + {{- include "chart-deco-studio.labels" . | nindent 4 }} +rules: + - apiGroups: ["extensions.agents.x-k8s.io"] + resources: ["sandboxclaims"] + verbs: ["get", "create", "delete", "patch"] + - apiGroups: ["agents.x-k8s.io"] + resources: ["sandboxes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get"] + - apiGroups: [""] + resources: ["pods/portforward"] + # `get` is required for the WebSocket-based port-forward path used by + # @kubernetes/client-node v1.x (and Bun's native WS, and any newer + # client-go-equivalent). The legacy SPDY path only needed `create`, but + # all modern clients use the WS upgrade — which the API server enforces + # as a `GET` against the subresource. Without `get`, the upgrade returns + # 403 and the runner sees `[object ErrorEvent]`. + verbs: ["get", "create"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: studio-sandbox-runner + namespace: agent-sandbox-system + labels: + {{- include "chart-deco-studio.labels" . | nindent 4 }} +subjects: + - kind: ServiceAccount + name: {{ include "chart-deco-studio.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: studio-sandbox-runner + apiGroup: rbac.authorization.k8s.io +{{- end }} diff --git a/deploy/helm/templates/sandbox-template.yaml b/deploy/helm/templates/sandbox-template.yaml new file mode 100644 index 0000000000..72b55ff6ea --- /dev/null +++ b/deploy/helm/templates/sandbox-template.yaml @@ -0,0 +1,130 @@ +{{- if .Values.sandbox.agentSandbox.enabled }} +# Shared SandboxTemplate consumed by every SandboxClaim the mesh runner +# creates. Mirrors deploy/k8s-sandbox/local/sandbox-template.yaml with prod +# ceilings from values.sandbox.agentSandbox.resources. +# +# Hardcoded to the operator's own namespace (agent-sandbox-system) — the CRDs +# ship with that as the install target, and the operator's RBAC watches it by +# default. +apiVersion: extensions.agents.x-k8s.io/v1alpha1 +kind: SandboxTemplate +metadata: + name: studio-sandbox + namespace: agent-sandbox-system + labels: + app.kubernetes.io/name: studio-sandbox + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "chart-deco-studio.chart" . }} +spec: + # Claims inject DAEMON_TOKEN per-provision (see PLAN-K8S-REMAINING 2.1). The + # template carries no shared secret; leakage of the template compromises + # nothing on its own. + envVarsInjectionPolicy: Allowed + # The CRD defaults to Managed, which makes the operator install its own + # NetworkPolicy that only allows ingress from pods labeled + # `app: sandbox-router`. That's intended for Istio-style sidecar routing + # and silently blocks the mesh → daemon path the preview-proxy depends on. + # We surface the netpol via templates/sandbox-network-policy.yaml instead + # (gated by `sandbox.agentSandbox.networkPolicy.enabled`), so flag the + # operator's policy off. + networkPolicyManagement: Unmanaged + podTemplate: + metadata: + labels: + app.kubernetes.io/name: studio-sandbox + # Do NOT set `studio.decocms.com/role` here. The operator (v0.4.2+) + # rejects claims whose additionalPodMetadata defines a label key + # already present in the template — even when the values differ — + # with "metadata override conflict". The runner sets role=claimed + # via additionalPodMetadata, so the template must leave that key + # undefined. Warm-pool pods end up without the role label; + # dashboards filter by absence-of-handle instead. + spec: + automountServiceAccountToken: false + {{- with .Values.sandbox.agentSandbox.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.sandbox.agentSandbox.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.sandbox.agentSandbox.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if not .Values.sandbox.agentSandbox.hostUsers }} + # User namespace remap: UID 1000 inside the pod maps to a high + # subordinate UID on the node, so a container escape lands as a + # nobody-user, not as a real node UID. Requires K8s 1.30+ and a + # containerd/kernel that support userns (EKS default AMIs are fine). + hostUsers: false + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + seccompProfile: + type: RuntimeDefault + containers: + - name: sandbox + image: "{{ .Values.sandbox.agentSandbox.image.repository }}:{{ .Values.sandbox.agentSandbox.image.tag }}" + imagePullPolicy: {{ .Values.sandbox.agentSandbox.image.pullPolicy }} + workingDir: /app + env: + - name: DAEMON_PORT + value: "9000" + - name: WORKDIR + value: "/app" + # DAEMON_TOKEN is injected per-claim via SandboxClaim.spec.env. + {{- if .Values.sandbox.agentSandbox.readOnlyRootFilesystem }} + # With RO rootfs + emptyDir on /app, the mount root is owned + # root:1000 (fsGroup). Git 2.35+'s "dubious ownership" check + # would refuse to operate. Disable the check inside the + # sandbox — single-tenant pod, no untrusted same-pod user. + - name: GIT_CONFIG_COUNT + value: "1" + - name: GIT_CONFIG_KEY_0 + value: "safe.directory" + - name: GIT_CONFIG_VALUE_0 + value: "*" + {{- end }} + ports: + - name: daemon + containerPort: 9000 + protocol: TCP + - name: dev + containerPort: 3000 + protocol: TCP + resources: + {{- toYaml .Values.sandbox.agentSandbox.resources | nindent 12 }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + readOnlyRootFilesystem: {{ .Values.sandbox.agentSandbox.readOnlyRootFilesystem }} + {{- if .Values.sandbox.agentSandbox.readOnlyRootFilesystem }} + volumeMounts: + - name: workdir + mountPath: /app + - name: tmp + mountPath: /tmp + - name: home + mountPath: /home/sandbox + {{- end }} + {{- if .Values.sandbox.agentSandbox.readOnlyRootFilesystem }} + volumes: + # Sized to match the per-container ephemeral-storage limit shape; + # individual mounts get a slice. Adjust if a workload needs more. + - name: workdir + emptyDir: + sizeLimit: 5Gi + - name: tmp + emptyDir: + sizeLimit: 1Gi + - name: home + emptyDir: + sizeLimit: 2Gi + {{- end }} +{{- end }} diff --git a/deploy/helm/templates/sandbox-warm-pool.yaml b/deploy/helm/templates/sandbox-warm-pool.yaml new file mode 100644 index 0000000000..0dd731a297 --- /dev/null +++ b/deploy/helm/templates/sandbox-warm-pool.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.sandbox.agentSandbox.enabled .Values.sandbox.agentSandbox.warmPool.enabled }} +# Pre-warms N sandbox pods against the shared SandboxTemplate so new claims +# bind instantly rather than waiting on image pull + kubelet start. Disabled +# by default — enable only after measuring cold-start pain; every pool +# replica costs the full sandbox resource request. +# +# Schema (v1alpha1): see CRD at +# deploy/helm/charts/agent-sandbox/crds/agent-sandbox-crds.yaml. +apiVersion: extensions.agents.x-k8s.io/v1alpha1 +kind: SandboxWarmPool +metadata: + name: studio-sandbox + namespace: agent-sandbox-system + labels: + app.kubernetes.io/name: studio-sandbox + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "chart-deco-studio.chart" . }} +spec: + replicas: {{ .Values.sandbox.agentSandbox.warmPool.size }} + sandboxTemplateRef: + name: studio-sandbox +{{- end }} diff --git a/deploy/helm/templates/validations.yaml b/deploy/helm/templates/validations.yaml index 7373c68694..87a40b21f5 100644 --- a/deploy/helm/templates/validations.yaml +++ b/deploy/helm/templates/validations.yaml @@ -3,4 +3,5 @@ This file only runs chart-level validations and renders no resources. */ -}} {{- include "chart-deco-studio.validate" . -}} {{- include "chart-deco-studio.validateOtel" . -}} +{{- include "chart-deco-studio.validateSandboxPreviewGateway" . -}} diff --git a/deploy/helm/values.yaml b/deploy/helm/values.yaml index fbdc563c2c..d6caa55619 100644 --- a/deploy/helm/values.yaml +++ b/deploy/helm/values.yaml @@ -131,6 +131,10 @@ terminationGracePeriodSeconds: 60 nodeSelector: kubernetes.io/arch: amd64 +# arm64 clusters (Apple Silicon kind, ARM EKS, bare-metal RPi) should override +# this — set `nodeSelector: {}` or pin to `arm64`. Mesh's own image is +# multi-arch, but the s3-sync sidecar and some upstream subchart images may +# be amd64-only. tolerations: [] # Example: @@ -314,3 +318,136 @@ opentelemetry-collector: receivers: [otlp] processors: [memory_limiter, batch] exporters: [debug] + +# Agent-sandbox runner. Requires STUDIO_SANDBOX_RUNNER=agent-sandbox in mesh env. +# Disabled by default — self-hosters on the Docker runner (simpler, no +# cluster needed) leave this off and don't pay the agent-sandbox operator +# install cost. +# +# When enabled, installs: +# - agent-sandbox operator + CRDs (vendored in charts/agent-sandbox/) +# - SandboxTemplate used by every SandboxClaim mesh creates +# - NetworkPolicy locking down sandbox-pod ingress/egress +# - (optional) SandboxWarmPool for cold-start mitigation +# +# See deploy/helm/README.md for the enable/upgrade procedure. +sandbox: + agentSandbox: + enabled: false + image: + repository: ghcr.io/decocms/mesh-sandbox + tag: latest + # Default IfNotPresent; override to Never for local kind clusters that + # load the image via `kind load docker-image`. + pullPolicy: IfNotPresent + # Prod ceilings. Adjust per measured workload. + resources: + requests: + cpu: 500m + memory: 1Gi + limits: + cpu: "2" + memory: 4Gi + ephemeral-storage: 10Gi + networkPolicy: + enabled: true + # Legacy/standby — kept for setups whose external gateway terminates + # *directly* on the sandbox's port 3000 (dev server). The default + # preview path goes through mesh and lands on port 9000 (daemon) + # instead, so most installs leave this empty. See + # `previewGateway` below for the standard Istio Gateway API path. + previewGatewayNamespace: "" + # Public URL pattern that ingress terminates on for sandbox previews. + # The runner substitutes {handle} (or whatever applyPreviewPattern + # supports) with the per-claim handle. Set in prod once a wildcard + # gateway + cert is in place; leave empty in local dev so the runner + # falls back to the 127.0.0.1 port-forward URL. Surfaced into the + # mesh container as STUDIO_SANDBOX_PREVIEW_URL_PATTERN. + # + # Should match `previewGateway.domain` below — e.g. when + # previewGateway.domain="preview.decocms.com", set this to + # "https://{handle}.preview.decocms.com". + previewUrlPattern: "" + # previewUrlPattern: "https://{handle}.preview.example.com" + # Wildcard preview-URL ingress (Approach B in the K8s sandbox plan). + # Renders an Istio Gateway + HTTPRoute that send all + # *.preview. traffic to the mesh Service; mesh recognises the + # Host header and reverse-proxies to the matching sandbox's daemon at + # port 9000 (daemon, not dev port 3000 — the daemon's reverse proxy + # injects the HMR bootstrap + strips CSP that the iframe needs). + # + # Manual prerequisites (not templated): + # 1. DNS: Cloudflare (or other) wildcard `*.preview.` → + # cluster external LB hostname. + # 2. cert-manager ClusterIssuer for the wildcard cert. DNS-01 is + # required (HTTP-01 doesn't work for wildcards). Set + # `clusterIssuer` to that issuer's name. + previewGateway: + enabled: false + # gatewayClassName for the Gateway. EKS clusters running Istio + # ambient/sidecar default to "istio". Confirm with + # `kubectl get gatewayclasses` before flipping enabled=true. + gatewayClassName: "istio" + # Namespace where the Gateway + HTTPRoute land. Mesh's existing + # gateway typically lives in `istio-system`; some setups use a + # dedicated `gateway` ns. The cert Secret is created in the same ns. + namespace: "istio-system" + # Wildcard domain for previews — e.g. "preview.decocms.com" yields + # `*.preview.decocms.com`. Required when enabled=true. + domain: "" + # cert-manager ClusterIssuer that issues the wildcard cert. Required + # when enabled=true. The chart does NOT template the ClusterIssuer + # itself — that is per-cluster infrastructure (a Cloudflare DNS-01 + # issuer, for example, needs your API token in a Secret). + clusterIssuer: "" + # PEM-format secret name created by cert-manager. Defaults to + # `-sandbox-preview-tls`. Override only if the cert lives + # under a name dictated by external tooling. + tlsSecretName: "" + # Pin sandbox pods to a dedicated NodePool so a container escape lands + # on a node that has no mesh / postgres / NATS / OTel pods on it. Pair + # with a Karpenter NodePool that taints + labels matching nodes; see + # deploy/helm/README.md for the snippet. Empty defaults run sandbox + # pods on whatever the scheduler picks (current behavior). + nodeSelector: {} + # nodeSelector: + # workload: sandbox + tolerations: [] + # tolerations: + # - key: workload + # operator: Equal + # value: sandbox + # effect: NoSchedule + # Affinity rules merged into the sandbox PodSpec. Use podAffinity to + # co-locate sandbox pods on the same node (cheaper warm-pool packing, + # shared image cache); use nodeAffinity for soft node preferences that + # nodeSelector can't express. Empty default = scheduler picks. + affinity: {} + # affinity: + # podAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchLabels: + # app.kubernetes.io/name: studio-sandbox + # topologyKey: kubernetes.io/hostname + # User namespace remap (`spec.hostUsers: false`): UID 1000 inside the + # pod maps to a high, unprivileged subordinate UID on the node, so a + # container escape doesn't land as a real node UID. Requires K8s + # 1.30+ with a containerd/kernel that support userns (EKS default + # AMIs from late 2024 onward are fine). Defaults to current behavior + # (host users); flip to false to opt in. + hostUsers: true + # Read-only root filesystem. When true, /app + /tmp + /home/sandbox + # are remounted as emptyDirs and `safe.directory '*'` is set so git + # works against the chowned mount. Validate end-to-end (clone + + # bun/npm install + dev server start) on staging before flipping; the + # daemon may write to paths we haven't covered. Defaults to false to + # preserve current behavior on upgrade. + readOnlyRootFilesystem: false + warmPool: + # Enable only after measuring cold-start pain; every warm pod costs + # the full resources.requests above. + enabled: false + size: 0 diff --git a/deploy/k8s-sandbox/local/README.md b/deploy/k8s-sandbox/local/README.md new file mode 100644 index 0000000000..53f45cbc76 --- /dev/null +++ b/deploy/k8s-sandbox/local/README.md @@ -0,0 +1,186 @@ +# Local k8s sandbox (kind) + +Scripted local bring-up for `AgentSandboxRunner`. One-command cluster + +agent-sandbox operator + mesh `SandboxTemplate`, loaded with the same +sandbox image the Docker runner uses. + +This is **dev ergonomics only** — no Terraform. Prod/staging installs the +operator via the deco infrastructure repo, not these scripts. Helm is used +for upstream third-party stacks (Prometheus, Grafana, OpenTelemetry +Collector) since that's their canonical install path; mesh-owned manifests +(SandboxTemplate, agent-sandbox operator) stay raw `kubectl apply`. + +The monitoring stack (kube-prometheus-stack + OTel collector daemonset + +sandbox dashboard) is deployed from values shared with prod — base values +live in [`../monitoring/`](../monitoring/), and `up.sh` layers the +kind-only overlay in [`monitoring/`](monitoring/) on top. See +[`../monitoring/README.md`](../monitoring/README.md) for the prod install. + +## Prereqs + +- [`docker`](https://docs.docker.com/engine/install/) — running +- [`kind`](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) +- [`kubectl`](https://kubernetes.io/docs/tasks/tools/) +- [`helm`](https://helm.sh/docs/intro/install/) — required only for the + monitoring stack; skip with `MONITORING=0 ./up.sh` if not installed. + +Pins: +- agent-sandbox operator: `v0.4.2` (matches prod; hardcoded in `up.sh`) +- kube-prometheus-stack: `65.5.1` +- opentelemetry-collector: `0.108.0` +- cluster name: `studio-sandbox-dev` +- namespace: `agent-sandbox-system` (sandboxes), `monitoring` (Prom/Grafana/OTel) +- image tag: `mesh-sandbox:local` + +## Usage + +```bash +# bring everything up (idempotent) +./deploy/k8s-sandbox/local/up.sh + +# rebuild + reload the sandbox image after editing image/ +./deploy/k8s-sandbox/local/reload-image.sh + +# tear the cluster down +./deploy/k8s-sandbox/local/down.sh +``` + +`up.sh` does, in order: + +1. Creates the kind cluster (skipped if it exists) +2. Applies the agent-sandbox `v0.4.2` base manifest (namespace, CRDs, controller) +3. Applies the agent-sandbox `v0.4.2` extensions manifest (SandboxClaim, SandboxTemplate, …) +4. Waits for controller deployments to report `Available` +5. Builds the daemon bundle (`bun run --cwd packages/sandbox build`), then `packages/sandbox/image/Dockerfile` as `mesh-sandbox:local` +6. Loads the image into kind (required because the template pins `imagePullPolicy: Never`) +7. Applies `sandbox-template.yaml` +8. Installs `kube-prometheus-stack` (Prom + Grafana + the operator that + discovers `ServiceMonitor`s) and the OTel Collector daemonset that + scrapes per-node kubelet, enriches with tenant labels, and exposes + `/metrics` for Prometheus. Skip with `MONITORING=0 ./up.sh`. + +All `kubectl` calls pass `--context kind-studio-sandbox-dev` so an ambient +`KUBECONFIG` can't accidentally hit a real cluster. + +## Local Grafana + +After `up.sh`: + +```bash +kubectl --context kind-studio-sandbox-dev port-forward \ + -n monitoring svc/kube-prometheus-stack-grafana 3001:80 +# → http://localhost:3001 (admin / admin) +# → Dashboards → "Studio Sandbox Overview" +``` + +Dashboard panels (per-org, per-sandbox-handle): + +- Active sandboxes by org +- Egress rate by org +- CPU / memory by org +- Top 10 sandboxes by 1-hour egress +- Warm-pool overhead pod count (no owning org) + +The pipeline: + +``` +kubelet (cAdvisor) ──► OTel collector daemonset + │ - kubeletstats receiver + │ - k8sattributes processor (reads pod labels: + │ studio.decocms.com/{org-id,user-id,sandbox-handle,role} + │ → series labels: org_id, user_id, sandbox_handle, sandbox_role) + │ - prometheus exporter on :8889 + ▼ + PodMonitor → kube-prometheus-stack Prometheus → Grafana +``` + +Pod labels come from `SandboxClaim.spec.additionalPodMetadata.labels`, +populated in `AgentSandboxRunner.provision()` from the `tenant` field +on `EnsureOptions`. Verify they're landing: + +```bash +kubectl --context kind-studio-sandbox-dev \ + get pod -n agent-sandbox-system --show-labels | grep studio.decocms.com +``` + +To iterate on dashboards/values without rebuilding the cluster: + +```bash +MONITORING_ONLY=1 ./deploy/k8s-sandbox/local/down.sh +./deploy/k8s-sandbox/local/up.sh +``` + +### Production install + +The same base values + dashboard ship to prod from this repo. See +[`../monitoring/README.md`](../monitoring/README.md) for the install +commands and the prod-overlay examples (remote-write, SSO, scoped +k8sattributes filter, ServiceMonitor for the in-cluster mesh Deployment). + +## Smoke test + +Stage 1 exit criterion from PLAN-K8S-MVP.md. Exercises +`AgentSandboxRunner` end-to-end against the live kind cluster: +ensure → exec → preview fetch → delete → recreate → ensure (warm) → +alive → delete. + +```bash +bun run deploy/k8s-sandbox/local/smoke.ts +``` + +Exits 0 on success. Uses a unique sandbox id per invocation and cleans up +after itself, so repeated runs don't collide. Not in `bun test` — the +runner needs a real cluster and ~5s of pod lifecycle. + +### Manual health check + +If you want to test the template/daemon layer without the runner. Since +Stage 2.1 dropped the shared token from the template, the claim itself +has to carry one — any string works, just keep it consistent with the +curl call below. + +```bash +CTX=kind-studio-sandbox-dev +TOKEN="smoke-$(openssl rand -hex 16)" + +cat </dev/null | grep -qx "${CLUSTER_NAME}"; then + log "cluster ${CLUSTER_NAME} not found, nothing to remove" + exit 0 + fi + log "uninstalling monitoring stack only" + helm uninstall otel-collector-sandbox --namespace monitoring --kube-context "${KCTX}" >/dev/null 2>&1 || true + helm uninstall kube-prometheus-stack --namespace monitoring --kube-context "${KCTX}" >/dev/null 2>&1 || true + kubectl --context "${KCTX}" delete namespace monitoring --ignore-not-found + exit 0 +fi + +if kind get clusters 2>/dev/null | grep -qx "${CLUSTER_NAME}"; then + log "deleting kind cluster ${CLUSTER_NAME}" + kind delete cluster --name "${CLUSTER_NAME}" +else + log "cluster ${CLUSTER_NAME} not found, nothing to do" +fi diff --git a/deploy/k8s-sandbox/local/monitoring/values-kube-prometheus-stack.local.yaml b/deploy/k8s-sandbox/local/monitoring/values-kube-prometheus-stack.local.yaml new file mode 100644 index 0000000000..3237f1b248 --- /dev/null +++ b/deploy/k8s-sandbox/local/monitoring/values-kube-prometheus-stack.local.yaml @@ -0,0 +1,27 @@ +# Kind-only overlay layered on top of `../../monitoring/values-kube-prometheus-stack.yaml`. +# Helm deep-merges these values into the base — see `local/up.sh` for the +# `helm upgrade -f base.yaml -f local-overlay.yaml` invocation. +# +# Anything here is unsafe / wrong for prod. + +grafana: + # Plaintext admin password is fine for kind, never for prod. Prod sets + # `grafana.admin.existingSecret` or wires SSO. + adminPassword: admin + +prometheus: + prometheusSpec: + # Mesh runs on the host (`bun run dev`) in local-dev mode, exposing + # /metrics on http://localhost:3000. From kind's perspective the host + # is reachable via `host.docker.internal` (Docker Desktop on macOS/Windows + # provides this DNS name natively; on Linux you'd need `extraHosts`). + # Prod replaces this with a ServiceMonitor on the in-cluster mesh + # Deployment. + additionalScrapeConfigs: + - job_name: studio-host + scrape_interval: 30s + metrics_path: /metrics + static_configs: + - targets: ["host.docker.internal:3000"] + labels: + source: studio-runner diff --git a/deploy/k8s-sandbox/local/monitoring/values-otel-collector.local.yaml b/deploy/k8s-sandbox/local/monitoring/values-otel-collector.local.yaml new file mode 100644 index 0000000000..a79b9feb1e --- /dev/null +++ b/deploy/k8s-sandbox/local/monitoring/values-otel-collector.local.yaml @@ -0,0 +1,13 @@ +# Kind-only overlay layered on top of `../../monitoring/values-otel-collector.yaml`. +# Helm deep-merges these values into the base — see `local/up.sh` for the +# `helm upgrade -f base.yaml -f local-overlay.yaml` invocation. +# +# Anything here is unsafe / wrong for prod. + +config: + receivers: + kubeletstats: + # kind nodes use kubelet self-signed certs; skip verify locally. + # Prod relies on the cluster's CA-signed kubelet certs and never + # sets this. + insecure_skip_verify: true diff --git a/deploy/k8s-sandbox/local/reload-image.sh b/deploy/k8s-sandbox/local/reload-image.sh new file mode 100755 index 0000000000..456d7fcc57 --- /dev/null +++ b/deploy/k8s-sandbox/local/reload-image.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Rebuild the sandbox image, reload it into kind, and evict any running +# sandbox pods so the operator recreates them against the new binary. +# +# The SandboxTemplate itself isn't re-applied here — for template changes +# re-run up.sh. This script is strictly for iterating on image contents. +set -euo pipefail + +CLUSTER_NAME="studio-sandbox-dev" +IMAGE_TAG="mesh-sandbox:local" + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)" +SANDBOX_PKG="${REPO_ROOT}/packages/sandbox" +DOCKERFILE="${SANDBOX_PKG}/image/Dockerfile" +KCTX="kind-${CLUSTER_NAME}" + +log() { printf "\033[1;34m[reload]\033[0m %s\n" "$*"; } + +if ! kind get clusters 2>/dev/null | grep -qx "${CLUSTER_NAME}"; then + echo "cluster ${CLUSTER_NAME} does not exist — run ./up.sh first" >&2 + exit 1 +fi + +log "rebuilding daemon bundle" +bun run --cwd "${SANDBOX_PKG}" build + +log "rebuilding ${IMAGE_TAG}" +docker build -t "${IMAGE_TAG}" -f "${DOCKERFILE}" "${SANDBOX_PKG}" + +log "reloading ${IMAGE_TAG} into kind" +kind load docker-image "${IMAGE_TAG}" --name "${CLUSTER_NAME}" + +# No Deployment to roll — sandbox pods are owned by Sandbox resources (owned +# by SandboxClaims). Deleting the pods lets the operator recreate them with +# the freshly loaded image while leaving claims intact. +log "evicting running sandbox pods" +kubectl --context "${KCTX}" delete pod \ + -n agent-sandbox-system \ + -l app.kubernetes.io/name=studio-sandbox \ + --ignore-not-found + +log "done" diff --git a/deploy/k8s-sandbox/local/sandbox-template.yaml b/deploy/k8s-sandbox/local/sandbox-template.yaml new file mode 100644 index 0000000000..8d59ac193f --- /dev/null +++ b/deploy/k8s-sandbox/local/sandbox-template.yaml @@ -0,0 +1,106 @@ +# Mesh sandbox pod template for local kind clusters. +# +# One shared SandboxTemplate per cluster; every SandboxClaim references it by +# name and gets its own pod. Production will carry a near-identical template +# with hardened values (IfNotPresent pull, per-claim DAEMON_TOKEN via Secret, +# resource limits tuned to measured load). Changes here should be mirrored +# into decocms/infra_applications/provisioning/agent-sandbox/eks-envs/ when +# they graduate. +# +# The daemon listens on 9000 and refuses to start without DAEMON_TOKEN; user +# workloads bind port 3000 for the dev server (preview ingress terminates +# on that port). Ports + UID 1000 + /app workdir must track the Dockerfile. +apiVersion: extensions.agents.x-k8s.io/v1alpha1 +kind: SandboxTemplate +metadata: + name: studio-sandbox + namespace: agent-sandbox-system + labels: + app.kubernetes.io/name: studio-sandbox + app.kubernetes.io/managed-by: studio +spec: + # Claims inject DAEMON_TOKEN per-provision (Stage 2.1). The SandboxClaim + # CRD only accepts literal `{name, value}` on spec.env (no valueFrom), so + # the token is plaintext in the claim and mesh/RBAC is the secrecy + # boundary. Additive merge — claim env is added on top of this template's + # env. Once per-claim git credentials land (later in Stage 2+), the same + # mechanism carries them. + envVarsInjectionPolicy: Allowed + # The CRD defaults to Managed, which makes the operator install its own + # NetworkPolicy that only allows ingress from `app: sandbox-router` pods. + # That blocks the mesh → daemon path the preview-proxy uses. Mark the + # netpol Unmanaged so a real CNI (kindnet on recent kind, Calico/Cilium + # in prod) lets mesh reach the sandbox Service on port 9000. + networkPolicyManagement: Unmanaged + podTemplate: + metadata: + labels: + app.kubernetes.io/name: studio-sandbox + # Do NOT set `studio.decocms.com/role` here. Operator v0.4.2+ rejects + # claims whose additionalPodMetadata defines a label key the template + # already declared, even when values differ. The runner sets + # role=claimed via additionalPodMetadata, so this key must be absent. + spec: + automountServiceAccountToken: false + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + seccompProfile: + type: RuntimeDefault + containers: + - name: sandbox + image: mesh-sandbox:local + # Local only: kind loads the image manually via `kind load + # docker-image`. Prevents accidental ghcr pulls. + imagePullPolicy: Never + workingDir: /app + env: + - name: DAEMON_PORT + value: "9000" + - name: WORKDIR + value: "/app" + # DAEMON_TOKEN is injected per-claim (Stage 2.1) via + # SandboxClaim.spec.env rather than baked into the template — + # one leak compromises one sandbox, not every sandbox. + ports: + - name: daemon + containerPort: 9000 + protocol: TCP + - name: dev + containerPort: 3000 + protocol: TCP + # Laptop-shape limits. Docker Desktop on macOS defaults to ~4 GiB + # for the whole kind VM — a single 4 GiB sandbox would evict the + # control plane (coredns/etcd/apiserver). Prod runs a separate + # template with higher ceilings; do NOT just bump these here when + # you hit OOM on a heavy repo — bump Docker Desktop memory first, + # and only raise these limits if your laptop has the headroom. + # 3Gi fits a full deco dev stack (bun install + vite + react + # compiler + tailwind v4 + firebase/sentry) with margin; 1Gi OOMs + # as soon as vite boots. + resources: + requests: + cpu: "100m" + memory: "512Mi" + limits: + cpu: "1" + memory: "3Gi" + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + # User code writes to ~/.bun, /tmp, and the workdir; a read-only + # rootfs would need tmpfs mounts for each. Revisit after Stage 1. + readOnlyRootFilesystem: false + # No volumeMounts on /app on purpose: the image chowns /app to + # sandbox:sandbox (uid 1000). An emptyDir overlay would replace it + # with a root-owned dir, and Git 2.35+ refuses to operate there + # ("detected dubious ownership"). The pod's ephemeral image layer + # gives the same lifetime as emptyDir for a shutdownPolicy=Delete + # claim. + # Daemon has its own /_daemon/health route; we rely on the + # agent-sandbox operator's Ready condition (see waitForSandboxReady + # in runner/k8s/client.ts) for readiness gating. No need to + # duplicate probes here until we see a false-ready in practice. diff --git a/deploy/k8s-sandbox/local/smoke.ts b/deploy/k8s-sandbox/local/smoke.ts new file mode 100644 index 0000000000..eb362a3604 --- /dev/null +++ b/deploy/k8s-sandbox/local/smoke.ts @@ -0,0 +1,209 @@ +/** + * Stage 1 exit-criterion smoke test (see PLAN-K8S-MVP.md § 1.5/1.6). + * + * Runs AgentSandboxRunner end-to-end against a live kind cluster: + * ensure → exec → preview fetch → delete → recreate (cold) → ensure (warm) + * → alive → delete. + * + * Not part of `bun test` — the runner needs a real cluster to talk to and + * tears up/down ~60s of pod lifecycle. Run explicitly: + * + * bun run deploy/k8s-sandbox/local/smoke.ts + * + * Preconditions (see README.md): + * - `deploy/k8s-sandbox/local/up.sh` succeeded + * - `kubectl --context kind-studio-sandbox-dev get pods -n agent-sandbox-system` + * shows the controller Running + * - `mesh-sandbox:local` is loaded into the kind cluster + * + * Exit codes: + * 0 — all steps passed + * 1 — any step failed (error surfaced to stderr) + */ + +// Imported via relative paths, not the package export name: this script is +// not inside any package, so bun would resolve module names from repo-root +// node_modules (which doesn't carry @decocms/sandbox). Relative imports +// resolve @kubernetes/client-node from the package's own node_modules +// naturally. +import { + KubeConfig, + AgentSandboxRunner, +} from "../../../packages/sandbox/server/runner/agent-sandbox"; +import type { SandboxId } from "../../../packages/sandbox/server/runner/types"; + +const KIND_CONTEXT = "kind-studio-sandbox-dev"; +const NAMESPACE = "agent-sandbox-system"; + +// Unique per run so repeated invocations don't collide on stale state. +const RUN_ID = process.env.SMOKE_RUN_ID ?? Date.now().toString(36); +const ID: SandboxId = { + userId: `smoke-user-${RUN_ID}`, + projectRef: `agent:smoke-org:smoke-vmcp:smoke-${RUN_ID}`, +}; + +function log(step: string, detail = ""): void { + const ts = new Date().toISOString().slice(11, 23); + process.stdout.write( + `[smoke ${ts}] ${step}${detail ? ` — ${detail}` : ""}\n`, + ); +} + +function assertEq(actual: T, expected: T, message: string): void { + if (actual !== expected) { + throw new Error( + `${message}\n expected: ${JSON.stringify(expected)}\n actual: ${JSON.stringify(actual)}`, + ); + } +} + +function buildKubeConfig(): KubeConfig { + const kc = new KubeConfig(); + kc.loadFromDefault(); + // Force kind context: ambient KUBECONFIG on dev laptops often points at + // prod/staging, and a misfire here would create real pods. + kc.setCurrentContext(KIND_CONTEXT); + return kc; +} + +async function run(): Promise { + const kubeConfig = buildKubeConfig(); + + // Two runner instances with disjoint in-process maps — simulates a mesh + // restart between steps (1st provision → 2nd ensure after delete). + // No stateStore: the K8s API is the source of truth for the smoke test. + const runnerA = new AgentSandboxRunner({ + kubeConfig, + namespace: NAMESPACE, + }); + const runnerB = new AgentSandboxRunner({ + kubeConfig, + namespace: NAMESPACE, + }); + + let handle = ""; + let firstPreviewUrl: string | null = null; + + try { + // 1. ensure (cold). Claim, pod, daemon, port-forward. + log("1/8 ensure (cold)"); + const t0 = Date.now(); + const sandboxA = await runnerA.ensure(ID, {}); + handle = sandboxA.handle; + log(" created", `handle=${handle} (${Date.now() - t0}ms)`); + if (!handle.startsWith("studio-sb-")) { + throw new Error(`handle missing expected prefix: ${handle}`); + } + + // 2. exec. Bash round-trip through the daemon. + log("2/8 exec"); + const echo = await runnerA.exec(handle, { + command: "echo hello-from-pod && id -u && pwd", + }); + if (echo.exitCode !== 0) { + throw new Error(`exec exit=${echo.exitCode} stderr=${echo.stderr}`); + } + const lines = echo.stdout.trim().split("\n"); + assertEq(lines[0], "hello-from-pod", "exec stdout line 1"); + assertEq(lines[1], "1000", "pod should run as uid 1000"); + assertEq(lines[2], "/app", "workdir should be /app"); + + // 3. preview URL + HTTP fetch. + log("3/8 preview"); + const preview = await runnerA.getPreviewUrl(handle); + if (!preview) throw new Error("getPreviewUrl returned null"); + firstPreviewUrl = preview; + if (!preview.startsWith("http://127.0.0.1:")) { + throw new Error(`unexpected preview URL shape: ${preview}`); + } + // Dev server won't bind in a bare-pod smoke (no repo / no dev script). + // We only check that the port-forward is live — any HTTP response (incl. + // 502/404) proves the listener is accepting connections. + const previewResp = await fetch(preview, { + signal: AbortSignal.timeout(3_000), + }).catch((err: unknown) => err as Error); + if (previewResp instanceof Error) { + // Connection refused from inside the pod is fine — dev server isn't + // running. What we must NOT see is ECONNREFUSED on the 127.0.0.1 + // listener itself (that would mean the forwarder never bound). + const msg = previewResp.message; + if (/ECONNREFUSED.*127\.0\.0\.1/.test(msg)) { + throw new Error(`preview port-forward not listening: ${msg}`); + } + log(" forwarder live", "dev server not bound (expected)"); + } else { + log(" forwarder live", `status=${previewResp.status}`); + } + + // 4. alive=true. + log("4/8 alive"); + const aliveBefore = await runnerA.alive(handle); + if (!aliveBefore) throw new Error("alive returned false for Ready claim"); + + // 5. delete. + log("5/8 delete"); + await runnerA.delete(handle); + // Operator takes a beat to reconcile the claim gone; alive() depends on + // the claim existing, so this may flip false on the next tick. Poll + // briefly rather than sleep a fixed amount. + const deletedBy = Date.now() + 30_000; + while (Date.now() < deletedBy) { + if (!(await runnerA.alive(handle))) break; + await new Promise((r) => setTimeout(r, 500)); + } + if (await runnerA.alive(handle)) { + throw new Error("claim still reports alive 30s after delete"); + } + + // 6. recreate (cold) through runnerB. Fresh process view of the same id — + // proves a restarted mesh can bring the same projectRef back up without + // any in-process state. + log("6/8 recreate (cold)"); + const t1 = Date.now(); + const sandboxB = await runnerB.ensure(ID, {}); + log(" recreated", `handle=${sandboxB.handle} (${Date.now() - t1}ms)`); + assertEq(sandboxB.handle, handle, "recreate should yield same handle"); + + // 7. ensure (warm) — second call on runnerB must short-circuit through + // the in-process map, no new provision, same handle. + log("7/8 ensure (warm)"); + const t2 = Date.now(); + const sandboxWarm = await runnerB.ensure(ID, {}); + const warmElapsed = Date.now() - t2; + log(" warm", `${warmElapsed}ms`); + assertEq(sandboxWarm.handle, handle, "warm ensure should match handle"); + if (warmElapsed > 5_000) { + throw new Error( + `warm ensure took ${warmElapsed}ms — expected in-process cache to make this near-instant`, + ); + } + + // Alive should still be true after the warm round-trip. + if (!(await runnerB.alive(handle))) { + throw new Error("alive=false after warm ensure"); + } + + // 8. final delete. Leaves the kind cluster clean for the next run. + log("8/8 delete (final)"); + await runnerB.delete(handle); + + log("OK", `handle=${handle} firstPreview=${firstPreviewUrl ?? "null"}`); + } catch (err) { + // Best-effort cleanup so a failing run doesn't wedge the cluster. + if (handle) { + await runnerA.delete(handle).catch(() => {}); + await runnerB.delete(handle).catch(() => {}); + } + throw err; + } +} + +run().then( + () => process.exit(0), + (err: unknown) => { + process.stderr.write( + `[smoke] FAILED: ${err instanceof Error ? (err.stack ?? err.message) : String(err)}\n`, + ); + process.exit(1); + }, +); diff --git a/deploy/k8s-sandbox/local/up.sh b/deploy/k8s-sandbox/local/up.sh new file mode 100755 index 0000000000..59d46f9d5a --- /dev/null +++ b/deploy/k8s-sandbox/local/up.sh @@ -0,0 +1,131 @@ +#!/usr/bin/env bash +# Bring up the local kind cluster used by AgentSandboxRunner. +# +# Idempotent: re-running re-applies the operator + template and reloads the +# sandbox image. Cluster creation is skipped if studio-sandbox-dev already +# exists. +# +# Pins agent-sandbox to v0.4.2 (matches prod in +# decocms/infra_applications/provisioning/agent-sandbox-operator/eks-envs/). +# Bumping here means bumping prod too. +set -euo pipefail + +CLUSTER_NAME="studio-sandbox-dev" +OPERATOR_VERSION="v0.4.2" +IMAGE_TAG="mesh-sandbox:local" + +# Monitoring stack pins. Bumping these is fine but verify the dashboard +# queries still work (kubeletstats metric names occasionally rename across +# OTel collector contrib releases). +KUBE_PROM_STACK_VERSION="65.5.1" +OTEL_COLLECTOR_VERSION="0.108.0" + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)" +SANDBOX_PKG="${REPO_ROOT}/packages/sandbox" +DOCKERFILE="${SANDBOX_PKG}/image/Dockerfile" +# Shared (prod-safe) monitoring values live one level up; this script layers +# the kind-only overlay in local/monitoring/ on top. +MONITORING_DIR="${SCRIPT_DIR}/../monitoring" + +MANIFEST_URL="https://github.com/kubernetes-sigs/agent-sandbox/releases/download/${OPERATOR_VERSION}/manifest.yaml" +EXTENSIONS_URL="https://github.com/kubernetes-sigs/agent-sandbox/releases/download/${OPERATOR_VERSION}/extensions.yaml" + +log() { printf "\033[1;34m[up]\033[0m %s\n" "$*"; } + +# 1. kind cluster +if kind get clusters 2>/dev/null | grep -qx "${CLUSTER_NAME}"; then + log "cluster ${CLUSTER_NAME} already exists, skipping create" +else + log "creating kind cluster ${CLUSTER_NAME}" + kind create cluster --name "${CLUSTER_NAME}" +fi + +# kubectl commands target kind's context explicitly so an ambient KUBECONFIG +# pointing at a real cluster can't accidentally install the operator there. +KCTX="kind-${CLUSTER_NAME}" + +# 2. agent-sandbox operator (creates namespace + CRDs + controller deployment) +log "applying agent-sandbox ${OPERATOR_VERSION} base manifest" +kubectl --context "${KCTX}" apply -f "${MANIFEST_URL}" + +# 3. agent-sandbox extensions CRDs (SandboxClaim, SandboxTemplate, …) +log "applying agent-sandbox ${OPERATOR_VERSION} extensions" +kubectl --context "${KCTX}" apply -f "${EXTENSIONS_URL}" + +# 4. wait for controller(s) to be Available before applying our template +log "waiting for agent-sandbox controller(s) to become Available" +kubectl --context "${KCTX}" wait \ + --for=condition=Available deployment \ + -n agent-sandbox-system --all --timeout=180s + +# 5. build the sandbox image (same Dockerfile the Docker runner uses). +# The Dockerfile copies `daemon/dist/daemon.js` from the build context, so +# the daemon bundle has to be produced first and the build context has to +# be the sandbox package root (not image/). +log "building daemon bundle" +bun run --cwd "${SANDBOX_PKG}" build + +log "building ${IMAGE_TAG} from ${SANDBOX_PKG}" +docker build -t "${IMAGE_TAG}" -f "${DOCKERFILE}" "${SANDBOX_PKG}" + +# 6. load into kind so imagePullPolicy: Never resolves +log "loading ${IMAGE_TAG} into kind cluster ${CLUSTER_NAME}" +kind load docker-image "${IMAGE_TAG}" --name "${CLUSTER_NAME}" + +# 7. mesh SandboxTemplate (shared by every SandboxClaim) +log "applying SandboxTemplate" +kubectl --context "${KCTX}" apply -f "${SCRIPT_DIR}/sandbox-template.yaml" + +# 8. monitoring stack: kube-prometheus-stack (Prom + Grafana + the operator +# whose CRDs the OTel collector's ServiceMonitor depends on) followed by +# the OTel daemonset that scrapes kubelet → enriches with tenant labels → +# exposes /metrics for Prometheus to scrape. +# +# Helm enters the local stack only for these third-party charts; SandboxTemplate +# and operator stay raw kubectl. Skip via `MONITORING=0 ./up.sh` if you want +# the cluster without dashboards. +if [[ "${MONITORING:-1}" == "1" ]]; then + if ! command -v helm >/dev/null 2>&1; then + log "helm not installed; skipping monitoring stack (set MONITORING=0 to silence)" + else + log "adding helm repos" + helm repo add prometheus-community https://prometheus-community.github.io/helm-charts >/dev/null 2>&1 || true + helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts >/dev/null 2>&1 || true + helm repo update >/dev/null + + log "installing kube-prometheus-stack ${KUBE_PROM_STACK_VERSION}" + helm upgrade --install kube-prometheus-stack \ + prometheus-community/kube-prometheus-stack \ + --kube-context "${KCTX}" \ + --namespace monitoring --create-namespace \ + --version "${KUBE_PROM_STACK_VERSION}" \ + -f "${MONITORING_DIR}/values-kube-prometheus-stack.yaml" \ + -f "${SCRIPT_DIR}/monitoring/values-kube-prometheus-stack.local.yaml" \ + --wait --timeout 5m + + log "installing opentelemetry-collector ${OTEL_COLLECTOR_VERSION} (daemonset)" + helm upgrade --install otel-collector-sandbox \ + open-telemetry/opentelemetry-collector \ + --kube-context "${KCTX}" \ + --namespace monitoring \ + --version "${OTEL_COLLECTOR_VERSION}" \ + -f "${MONITORING_DIR}/values-otel-collector.yaml" \ + -f "${SCRIPT_DIR}/monitoring/values-otel-collector.local.yaml" \ + --wait --timeout 3m + + log "applying sandbox dashboard ConfigMap" + # `--dry-run | apply` so re-runs replace the ConfigMap idempotently. + kubectl --context "${KCTX}" -n monitoring create configmap studio-sandbox-dashboard \ + --from-file="${MONITORING_DIR}/dashboards/sandbox-overview.json" \ + --dry-run=client -o yaml | \ + kubectl --context "${KCTX}" apply -f - + kubectl --context "${KCTX}" -n monitoring label configmap studio-sandbox-dashboard \ + grafana_dashboard=1 --overwrite >/dev/null + + log "monitoring ready: kubectl port-forward -n monitoring svc/kube-prometheus-stack-grafana 3001:80" + log " → http://localhost:3001 (admin / admin) → Dashboards → 'Studio Sandbox Overview'" + fi +fi + +log "ready. smoke test: see README.md" diff --git a/deploy/k8s-sandbox/monitoring/README.md b/deploy/k8s-sandbox/monitoring/README.md new file mode 100644 index 0000000000..d22fe7ec7d --- /dev/null +++ b/deploy/k8s-sandbox/monitoring/README.md @@ -0,0 +1,152 @@ +# Mesh sandbox monitoring stack + +Per-org / per-sandbox cost-attribution metrics. Pipeline: + +``` +kubelet (cAdvisor) ──► OTel collector daemonset + │ - kubeletstats receiver + │ - k8sattributes processor (reads pod labels: + │ studio.decocms.com/{org-id,user-id,sandbox-handle,role} + │ → series labels: org_id, user_id, sandbox_handle, sandbox_role) + │ - prometheus exporter on :8889 + ▼ + PodMonitor → kube-prometheus-stack Prometheus → Grafana +``` + +Pod labels are populated by `AgentSandboxRunner.provision()` from the +`tenant` field on `EnsureOptions` and surface on every sandbox pod via +`SandboxClaim.spec.additionalPodMetadata.labels`. + +## Files + +| File | What it is | +|---|---| +| `values-kube-prometheus-stack.yaml` | Base values — Prometheus + Grafana + the operator. Prod-safe defaults; no admin credentials, no host scrape configs. | +| `values-otel-collector.yaml` | Base values — OTel collector daemonset that scrapes kubelet → enriches with tenant labels → exposes `/metrics` for Prometheus. | +| `dashboards/sandbox-overview.json` | Grafana dashboard. Loaded via the `grafana_dashboard=1` ConfigMap label sidecar. | + +Local kind layers a kind-only overlay on top of these — see +`../local/monitoring/`. Prod layers its own overlay (storage backend, auth, +remote-write, larger resources). + +## Prod install + +Versions track [`local/up.sh`][up.sh] — bump them together. + +```bash +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts +helm repo update + +# 1. kube-prometheus-stack (Prometheus + Grafana + operator) +helm upgrade --install kube-prometheus-stack \ + prometheus-community/kube-prometheus-stack \ + --namespace monitoring --create-namespace \ + --version 65.5.1 \ + -f deploy/k8s-sandbox/monitoring/values-kube-prometheus-stack.yaml \ + -f your-prod-overlay.yaml \ + --wait + +# 2. OTel collector daemonset +helm upgrade --install otel-collector-sandbox \ + open-telemetry/opentelemetry-collector \ + --namespace monitoring \ + --version 0.108.0 \ + -f deploy/k8s-sandbox/monitoring/values-otel-collector.yaml \ + -f your-prod-overlay.yaml \ + --wait + +# 3. Grafana dashboard ConfigMap (sidecar auto-imports it) +kubectl -n monitoring create configmap studio-sandbox-dashboard \ + --from-file=deploy/k8s-sandbox/monitoring/dashboards/sandbox-overview.json \ + --dry-run=client -o yaml | kubectl apply -f - +kubectl -n monitoring label configmap studio-sandbox-dashboard grafana_dashboard=1 --overwrite +``` + +## Prod overlay examples + +### kube-prometheus-stack + +```yaml +# your-prod-overlay.yaml +grafana: + admin: + existingSecret: grafana-admin # provision out of band + userKey: admin-user + passwordKey: admin-password + grafana.ini: + auth.generic_oauth: { ... } # SSO + +prometheus: + prometheusSpec: + retention: 15d # bump from base 7d + resources: + requests: { cpu: 500m, memory: 2Gi } + limits: { memory: 4Gi } + # Stream long-term storage to Mimir / VictoriaMetrics / Cortex + remoteWrite: + - url: https://mimir.internal/api/v1/push + # ...auth bits + # In-cluster scrape of the mesh Deployment (replaces the local + # host.docker.internal scrape). + additionalScrapeConfigs: + - job_name: mesh + kubernetes_sd_configs: + - role: endpoints + namespaces: { names: [mesh] } + relabel_configs: + - source_labels: [__meta_kubernetes_service_label_app_kubernetes_io_name] + regex: mesh + action: keep +``` + +Or, cleaner: deploy a `ServiceMonitor` alongside the mesh chart and let the +operator pick it up via the +`serviceMonitorSelectorNilUsesHelmValues: false` knob already set in the +base values. + +### OTel collector + +```yaml +# your-prod-overlay.yaml +config: + processors: + k8sattributes: + filter: + # Scope label enrichment to the sandbox namespace once you're not + # also debugging system pods. + namespaces: [agent-sandbox-system] +``` + +## Verify + +Confirm tenant labels are landing on sandbox pods: + +```bash +kubectl get pod -n agent-sandbox-system --show-labels | grep studio.decocms.com +``` + +Confirm the collector's `/metrics` endpoint is being scraped: in the +Prometheus UI (or whatever queries your remote-write target), + +```promql +sum by (org_id, sandbox_handle) ( + rate(container_cpu_usage_seconds_total{namespace="agent-sandbox-system"}[5m]) +) +``` + +should return one series per `(org_id, sandbox_handle)` pair with active +sandboxes. + +## Versioning + +| Component | Pinned version | Where | +|---|---|---| +| `kube-prometheus-stack` | `65.5.1` | `local/up.sh` `KUBE_PROM_STACK_VERSION` | +| `opentelemetry-collector` | `0.108.0` | `local/up.sh` `OTEL_COLLECTOR_VERSION` | + +Bumping these is fine, but verify the dashboard's PromQL queries still work +— kubeletstats metric names occasionally rename across OTel collector +contrib releases. + +[up.sh]: ../local/up.sh diff --git a/deploy/k8s-sandbox/monitoring/dashboards/sandbox-overview.json b/deploy/k8s-sandbox/monitoring/dashboards/sandbox-overview.json new file mode 100644 index 0000000000..22d9ea7a3b --- /dev/null +++ b/deploy/k8s-sandbox/monitoring/dashboards/sandbox-overview.json @@ -0,0 +1,225 @@ +{ + "title": "Studio Sandbox Overview", + "uid": "studio-sandbox-overview", + "schemaVersion": 39, + "version": 1, + "refresh": "30s", + "time": { "from": "now-1h", "to": "now" }, + "tags": ["studio", "sandbox"], + "templating": { + "list": [ + { + "name": "namespace", + "type": "constant", + "current": { + "value": "agent-sandbox-system", + "text": "agent-sandbox-system" + }, + "query": "agent-sandbox-system", + "hide": 2 + } + ] + }, + "panels": [ + { + "id": 1, + "type": "stat", + "title": "Active sandboxes", + "description": "Total live sandboxes (claimed pods in the namespace). Source: cAdvisor via kubeletstats — depends only on the in-cluster pipeline so it stays accurate even if mesh isn't running locally. Lags ~30s on creation, up to 5min on deletion (Prometheus staleness).", + "gridPos": { "h": 6, "w": 4, "x": 0, "y": 0 }, + "fieldConfig": { "defaults": { "unit": "none", "noValue": "0" } }, + "options": { + "reduceOptions": { "calcs": ["last"] }, + "graphMode": "none", + "justifyMode": "auto" + }, + "targets": [ + { + "expr": "count(k8s_pod_cpu_utilization_ratio{k8s_namespace_name=\"$namespace\",sandbox_role=\"claimed\"}) or vector(0)", + "refId": "A", + "instant": true, + "range": false + } + ] + }, + { + "id": 11, + "type": "stat", + "title": "Orgs with active sandboxes", + "description": "Distinct orgs currently holding at least one sandbox.", + "gridPos": { "h": 6, "w": 4, "x": 4, "y": 0 }, + "fieldConfig": { "defaults": { "unit": "none", "noValue": "0" } }, + "options": { + "reduceOptions": { "calcs": ["last"] }, + "graphMode": "none", + "justifyMode": "auto" + }, + "targets": [ + { + "expr": "count(count by (org_id) (k8s_pod_cpu_utilization_ratio{k8s_namespace_name=\"$namespace\",sandbox_role=\"claimed\"})) or vector(0)", + "refId": "A", + "instant": true, + "range": false + } + ] + }, + { + "id": 2, + "type": "stat", + "title": "Warm-pool overhead pods", + "description": "Sandbox-template pods sitting idle in the namespace with no owning org (sandbox_handle empty). Cost paid for warm-start latency savings.", + "gridPos": { "h": 6, "w": 4, "x": 8, "y": 0 }, + "fieldConfig": { "defaults": { "unit": "none", "noValue": "0" } }, + "options": { + "reduceOptions": { "calcs": ["last"] }, + "graphMode": "none", + "justifyMode": "auto" + }, + "targets": [ + { + "expr": "count(k8s_pod_cpu_utilization_ratio{k8s_namespace_name=\"$namespace\",app_name=\"studio-sandbox\",sandbox_handle=\"\"}) or vector(0)", + "refId": "A", + "instant": true, + "range": false + } + ] + }, + { + "id": 3, + "type": "timeseries", + "title": "Egress rate by org (bytes/sec)", + "gridPos": { "h": 6, "w": 12, "x": 12, "y": 0 }, + "fieldConfig": { "defaults": { "unit": "Bps" } }, + "targets": [ + { + "expr": "sum by (org_id) (rate(k8s_pod_network_io_bytes_total{k8s_namespace_name=\"$namespace\",direction=\"transmit\",sandbox_role=\"claimed\"}[5m]))", + "legendFormat": "{{org_id}}", + "refId": "A" + } + ] + }, + { + "id": 4, + "type": "timeseries", + "title": "CPU usage by org (cores)", + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 6 }, + "fieldConfig": { "defaults": { "unit": "none" } }, + "targets": [ + { + "expr": "sum by (org_id) (k8s_pod_cpu_utilization_ratio{k8s_namespace_name=\"$namespace\",sandbox_role=\"claimed\"})", + "legendFormat": "{{org_id}}", + "refId": "A" + } + ] + }, + { + "id": 5, + "type": "timeseries", + "title": "Memory (working set) by org", + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 6 }, + "fieldConfig": { "defaults": { "unit": "bytes" } }, + "targets": [ + { + "expr": "sum by (org_id) (k8s_pod_memory_working_set_bytes{k8s_namespace_name=\"$namespace\",sandbox_role=\"claimed\"})", + "legendFormat": "{{org_id}}", + "refId": "A" + } + ] + }, + { + "id": 6, + "type": "table", + "title": "Top 10 sandboxes by egress (last 1h, MB)", + "gridPos": { "h": 9, "w": 24, "x": 0, "y": 14 }, + "fieldConfig": { "defaults": { "unit": "decmbytes" } }, + "options": { "showHeader": true }, + "targets": [ + { + "expr": "topk(10, sum by (org_id, user_id, sandbox_handle) (increase(k8s_pod_network_io_bytes_total{k8s_namespace_name=\"$namespace\",direction=\"transmit\",sandbox_role=\"claimed\"}[1h])) / 1024 / 1024)", + "format": "table", + "instant": true, + "refId": "A" + } + ] + }, + { + "id": 7, + "type": "row", + "title": "Mesh-side (runner POV)", + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 23 }, + "collapsed": false + }, + { + "id": 8, + "type": "timeseries", + "title": "Active sandboxes by org over time", + "description": "Per-org count of claimed pods. Useful for spotting which tenants are spinning up / draining. Source: cAdvisor.", + "gridPos": { "h": 8, "w": 12, "x": 0, "y": 24 }, + "fieldConfig": { "defaults": { "unit": "none" } }, + "targets": [ + { + "expr": "count by (org_id) (k8s_pod_cpu_utilization_ratio{k8s_namespace_name=\"$namespace\",sandbox_role=\"claimed\"})", + "legendFormat": "{{org_id}}", + "refId": "A" + } + ] + }, + { + "id": 9, + "type": "timeseries", + "title": "Ensure outcomes (cold-start ratio)", + "description": "fresh = new claim provisioned, resume = state-store rehydrate, adopt = adopted an existing cluster claim. High fresh ratio after a mesh restart = warm-pool would help.", + "gridPos": { "h": 8, "w": 12, "x": 12, "y": 24 }, + "fieldConfig": { "defaults": { "unit": "ops" } }, + "targets": [ + { + "expr": "sum by (outcome) (rate(studio_sandbox_ensure_outcome_total{runner_kind=\"kubernetes\"}[5m]))", + "legendFormat": "{{outcome}}", + "refId": "A" + } + ] + }, + { + "id": 10, + "type": "timeseries", + "title": "Proxy P95 latency by source (ms)", + "description": "P95 of mesh→daemon proxy duration. source=daemon is tool exec / control plane; source=preview is iframe traffic. Spikes correlate with daemon load or port-forward stalls (dev mode).", + "gridPos": { "h": 8, "w": 24, "x": 0, "y": 32 }, + "fieldConfig": { "defaults": { "unit": "ms" } }, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum by (le, source) (rate(studio_sandbox_proxy_duration_ms_bucket{runner_kind=\"kubernetes\"}[5m])))", + "legendFormat": "{{source}}", + "refId": "A" + } + ] + }, + { + "id": 12, + "type": "row", + "title": "Cross-checks (mesh ↔ cluster drift)", + "gridPos": { "h": 1, "w": 24, "x": 0, "y": 40 }, + "collapsed": false + }, + { + "id": 13, + "type": "timeseries", + "title": "Active count: mesh-side vs cAdvisor", + "description": "Two views of 'how many sandboxes exist now'. mesh-side is what the mesh runner believes (real-time UpDownCounter); cAdvisor is what kubeletstats reports for pods consuming resources (lags ~30s–5min). Persistent divergence indicates orphan claims (mesh deleted but K8s didn't reap) or pods mesh doesn't know about (cluster drift).", + "gridPos": { "h": 8, "w": 24, "x": 0, "y": 41 }, + "fieldConfig": { "defaults": { "unit": "none" } }, + "targets": [ + { + "expr": "sum(studio_sandbox_active{runner_kind=\"kubernetes\"})", + "legendFormat": "mesh", + "refId": "A" + }, + { + "expr": "count(k8s_pod_cpu_utilization_ratio{k8s_namespace_name=\"$namespace\",sandbox_role=\"claimed\"})", + "legendFormat": "cAdvisor", + "refId": "B" + } + ] + } + ] +} diff --git a/deploy/k8s-sandbox/monitoring/values-kube-prometheus-stack.yaml b/deploy/k8s-sandbox/monitoring/values-kube-prometheus-stack.yaml new file mode 100644 index 0000000000..18b6b9ba69 --- /dev/null +++ b/deploy/k8s-sandbox/monitoring/values-kube-prometheus-stack.yaml @@ -0,0 +1,69 @@ +# kube-prometheus-stack values for the mesh sandbox monitoring stack. +# +# Used by both local kind (`deploy/k8s-sandbox/local/up.sh`) and prod +# installs. Local layers a kind-only overlay on top — see +# `local/monitoring/values-kube-prometheus-stack.local.yaml`. +# +# Scope: Prometheus + Grafana + the prometheus-operator (so ServiceMonitor +# / PodMonitor CRs work). Disabled the parts we don't need for sandbox +# cost-attribution dashboards: +# - alertmanager: nothing to alert on yet +# - node-exporter: per-node OS metrics aren't on the customer bill +# - kube-state-metrics: the OTel collector pipeline already enriches with +# pod labels, so we don't need KSM's parallel `kube_pod_labels` series. +# Re-enable later if you want pod-state info we can't get from kubeletstats. + +grafana: + # Prod must override `adminPassword` (or set `admin.existingSecret`) and + # typically wires SSO via grafana.ini. Local sets a plaintext password + # in its overlay. + defaultDashboardsTimezone: browser + service: + type: ClusterIP + # Sidecar watches for ConfigMaps with `grafana_dashboard=1` and auto-imports + # them as dashboards. up.sh creates one for sandbox-overview.json; prod can + # apply the same ConfigMap (or commit it as a manifest). + sidecar: + dashboards: + enabled: true + label: grafana_dashboard + labelValue: "1" + searchNamespace: ALL + # Datasource is auto-wired to the bundled Prometheus. + +prometheus: + prometheusSpec: + # Pick up ServiceMonitors / PodMonitors created by other charts (the OTel + # collector chart in particular) regardless of helm-managed labels. + # Without these, the operator only matches its own release's monitors + # and our collector's monitor is invisible. + serviceMonitorSelectorNilUsesHelmValues: false + podMonitorSelectorNilUsesHelmValues: false + ruleSelectorNilUsesHelmValues: false + probeSelectorNilUsesHelmValues: false + # Modest defaults: prod typically replaces these with `remoteWrite` to + # Mimir/VictoriaMetrics and bumps `retention` + `resources`. See + # `monitoring/README.md` for prod overlay examples. + retention: 7d + resources: + requests: + memory: "400Mi" + cpu: "100m" + limits: + memory: "1Gi" + +alertmanager: + enabled: false + +# camelCase: kube-prometheus-stack's own toggles for the bundled subcharts. +# (The dashed-name forms `nodeExporter:` and `kube-state-metrics:` would be +# subchart values, not the parent toggle — silently ignored.) +nodeExporter: + enabled: false + +kubeStateMetrics: + enabled: false + +# Trim default dashboards/rules — we only care about sandbox-overview here. +defaultRules: + create: false diff --git a/deploy/k8s-sandbox/monitoring/values-otel-collector.yaml b/deploy/k8s-sandbox/monitoring/values-otel-collector.yaml new file mode 100644 index 0000000000..bb74c3227a --- /dev/null +++ b/deploy/k8s-sandbox/monitoring/values-otel-collector.yaml @@ -0,0 +1,169 @@ +# OpenTelemetry Collector (DaemonSet) — scrapes per-node kubelet for +# cAdvisor/pod metrics, enriches with our tenant labels, exposes +# /metrics for kube-prometheus-stack's Prometheus to scrape. +# +# Used by both local kind (`deploy/k8s-sandbox/local/up.sh`) and prod +# installs. Local layers a kind-only overlay on top — see +# `local/monitoring/values-otel-collector.local.yaml`. +# +# DaemonSet (not Deployment) because kubeletstats reads from each node's +# kubelet at https://${K8S_NODE_IP}:10250 — needs one collector per node. +# +# Why not the existing mesh OTel collector subchart: that one is a +# Deployment, scoped to mesh app traces/logs/metrics (OTLP receiver). +# Different pipeline, different topology, different concern. + +mode: daemonset + +image: + # contrib has kubeletstats + k8sattributes; the core image doesn't. + repository: otel/opentelemetry-collector-contrib + +# Downward API: kubeletstats needs the host IP, k8sattributes' pod_association +# needs the pod's namespace. Auto-injected by the chart's `presets.kubeletMetrics` +# but we set explicit config below, so wire env vars directly. +extraEnvs: + - name: K8S_NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + +# RBAC: kubeletstats hits nodes/stats + nodes/proxy on the local kubelet; +# k8sattributes watches pods + namespaces cluster-wide to enrich on label +# lookup. Replicas-set/deployment perms are not needed since we filter to +# the agent-sandbox-system namespace. +clusterRole: + create: true + rules: + - apiGroups: [""] + resources: ["nodes/stats", "nodes/proxy"] + verbs: ["get"] + - apiGroups: [""] + resources: ["pods", "namespaces", "nodes"] + verbs: ["get", "list", "watch"] + +# Container-level :8889/metrics for Prometheus to scrape. +ports: + metrics: + enabled: true + containerPort: 8889 + servicePort: 8889 + protocol: TCP + +# DaemonSet mode: no Service is created (pods are per-node), so ServiceMonitor +# has nothing to scrape. PodMonitor matches pods directly. +podMonitor: + enabled: true + metricsEndpoints: + - port: metrics + interval: 30s + +resources: + requests: + cpu: 100m + memory: 200Mi + limits: + memory: 500Mi + +config: + receivers: + # Suppress the OTLP receivers the chart enables by default — this + # collector is metrics-only, kubeletstats-fed. + otlp: null + jaeger: null + zipkin: null + prometheus: null + kubeletstats: + collection_interval: 30s + auth_type: serviceAccount + endpoint: "https://${env:K8S_NODE_IP}:10250" + # Prod relies on the cluster's CA-signed kubelet certs. Local kind + # uses self-signed kubelet certs and overrides `insecure_skip_verify` + # in `local/monitoring/values-otel-collector.local.yaml`. + # `volume` group requires the PV CSI metrics endpoint which kind + # doesn't expose. node/pod/container is what we need anyway. + metric_groups: [node, pod, container] + + processors: + # Resource enrichment: read the pod's labels via the API server and + # surface them as resource attributes. resource_to_telemetry_conversion + # on the prometheus exporter then promotes them to series labels. + k8sattributes: + auth_type: serviceAccount + passthrough: false + filter: + # Watch all namespaces (otherwise we miss the kube-system pods our + # node metrics are tagged against). Filtering happens at the + # exporter/dashboard layer, not here. + node_from_env_var: K8S_NODE_NAME + pod_association: + - sources: + - from: resource_attribute + name: k8s.pod.name + - from: resource_attribute + name: k8s.namespace.name + extract: + # k8s.deployment.name intentionally omitted — would require watching + # ReplicaSets cluster-wide, and we don't use the value. + metadata: + - k8s.pod.name + - k8s.pod.uid + - k8s.namespace.name + - k8s.node.name + labels: + - tag_name: org_id + key: studio.decocms.com/org-id + from: pod + - tag_name: user_id + key: studio.decocms.com/user-id + from: pod + - tag_name: sandbox_handle + key: studio.decocms.com/sandbox-handle + from: pod + - tag_name: sandbox_role + key: studio.decocms.com/role + from: pod + # `app.kubernetes.io/name=studio-sandbox` is set by the SandboxTemplate + # on every sandbox pod (warm-pool + claimed). Lets the warm-pool + # query distinguish sandbox pods from operator/other pods in the + # namespace by app_name + absence-of-handle. + - tag_name: app_name + key: app.kubernetes.io/name + from: pod + batch: + send_batch_size: 1024 + timeout: 10s + memory_limiter: + check_interval: 1s + limit_percentage: 80 + spike_limit_percentage: 25 + + exporters: + debug: null + # Promotes resource attributes (org_id, user_id, sandbox_handle, …) to + # Prometheus series labels. Without this they stay as metadata and + # dashboards can't filter on them. + prometheus: + endpoint: "0.0.0.0:8889" + resource_to_telemetry_conversion: + enabled: true + send_timestamps: true + enable_open_metrics: true + + service: + telemetry: + logs: + level: info + pipelines: + # Drop the chart's default traces/logs pipelines — this collector is + # metrics-only. + traces: null + logs: null + metrics: + receivers: [kubeletstats] + processors: [memory_limiter, k8sattributes, batch] + exporters: [prometheus] diff --git a/packages/mesh-sdk/src/types/virtual-mcp.ts b/packages/mesh-sdk/src/types/virtual-mcp.ts index b2e44c95cf..693b224d26 100644 --- a/packages/mesh-sdk/src/types/virtual-mcp.ts +++ b/packages/mesh-sdk/src/types/virtual-mcp.ts @@ -137,6 +137,8 @@ export type GithubRepo = z.infer; * `runnerKind` lets the UI construct daemon URLs correctly: * - docker: daemon is reached via the mesh proxy at `/api/sandbox//_daemon/*` * - freestyle: daemon lives at `${previewUrl}/_decopilot_vm/*` on the VM domain + * - agent-sandbox: daemon is reached via the mesh proxy (same transport as docker); + * preview URL is the per-claim HTTPRoute host (in-cluster) or a local port-forward (kind dev). * * `previewUrl` is nullable: blank / tool sandboxes (no `workload`, no dev * server) have nothing to render. UI code MUST check before constructing @@ -152,7 +154,7 @@ export const VmMapEntrySchema = z.object({ .describe( "URL where the VM's iframe-proxied UI is served, or null when the sandbox has no dev server (blank / tool sandboxes).", ), - runnerKind: z.enum(["docker", "freestyle"]).optional(), + runnerKind: z.enum(["docker", "freestyle", "agent-sandbox"]).optional(), createdAt: z .number() .optional() diff --git a/packages/sandbox/README.md b/packages/sandbox/README.md index 85fc214e4f..d6cc78d7ee 100644 --- a/packages/sandbox/README.md +++ b/packages/sandbox/README.md @@ -2,6 +2,42 @@ Isolated per-user sandboxes for MCP tool execution. +One sandbox per `(userId, projectRef)`: a container (or VM) holding a checked-out +repo plus an in-pod daemon that proxies exec, file ops, and the dev server. +Callers go through a single `SandboxRunner` interface; the runner decides how +the sandbox is provisioned and reached. + +## Runners + +Three runner backends live behind the common `SandboxRunner` interface +(`server/runner/types.ts`): + +- **Docker** (`./runner`) — default for local dev. Spawns containers via the + local Docker CLI and routes browser traffic through an in-process ingress + bound on `SANDBOX_INGRESS_PORT`. +- **Freestyle** (`./runner/freestyle`) — hosted VMs. Preview URL is a + Freestyle-provided HTTPS domain; daemon traffic is base64-wrapped to clear + Cloudflare WAF. SDKs are `optionalDependencies` and only pulled in when this + runner is selected. +- **agent-sandbox** (`./runner/agent-sandbox`) — one `SandboxClaim` per sandbox + against the [kubernetes-sigs/agent-sandbox](https://github.com/kubernetes-sigs/agent-sandbox) + operator. Mesh talks to pods via apiserver port-forward in dev; in prod, + `previewUrlPattern` switches the preview URL to real ingress and skips the + dev forward. + +### Selection + +The host app calls `resolveRunnerKindFromEnv()` / `tryResolveRunnerKindFromEnv()` +from `./runner`: + +1. `STUDIO_SANDBOX_RUNNER=docker|freestyle|agent-sandbox` wins when set. +2. Otherwise, `FREESTYLE_API_KEY` present → `freestyle`. +3. Otherwise, in `NODE_ENV=production` → unresolved (strict variant throws). +4. Otherwise (dev) → `docker` if the CLI is on `PATH`, else unresolved. + +agent-sandbox is **explicit-only** — it's never auto-selected, so docker-only +deploys don't accidentally need a kubeconfig. + ## URL shape - **Prod**: `https://./*` → pod dev server on `:3000` @@ -14,7 +50,7 @@ truncated SHA256 of `userId:projectRef`; collisions are bounded per-project. The URL itself is the routing key, not a capability — daemon endpoints require a bearer token. -## Local dev +## Local dev (Docker) The local ingress forwarder binds both `127.0.0.1` and `::1` on `SANDBOX_INGRESS_PORT` (default `7070`) and routes requests by `Host:` header. @@ -29,7 +65,13 @@ for this, you can remove them — they're no longer needed. ## Environment -- `SANDBOX_INGRESS_PORT` (default `7070`) — local forwarder bind port. +- `STUDIO_SANDBOX_RUNNER` — pin the runner: `docker`, `freestyle`, or + `agent-sandbox`. Leave unset in dev to let auto-detect pick docker. +- `FREESTYLE_API_KEY` — required for the Freestyle runner. Presence also + auto-selects it when `STUDIO_SANDBOX_RUNNER` is unset. +- `MESH_SANDBOX_IMAGE` — override the Docker runner image + (default `mesh-sandbox:local`, built from `image/Dockerfile`). +- `SANDBOX_INGRESS_PORT` (default `7070`) — local Docker ingress bind port. - `SANDBOX_ROOT_URL` — production template for the pod URL. Either a bare base (`https://sandboxes.example.com` → handle becomes leading subdomain) or a `{handle}` template (`https://{handle}.sandboxes.example.com`). diff --git a/packages/sandbox/daemon/entry.ts b/packages/sandbox/daemon/entry.ts index cf49b01626..866b8367e2 100644 --- a/packages/sandbox/daemon/entry.ts +++ b/packages/sandbox/daemon/entry.ts @@ -18,9 +18,11 @@ import { makeScriptsHandler } from "./routes/scripts"; import { makeHealthHandler } from "./routes/health"; import { makeEventsHandler } from "./routes/events-stream"; import { makeProxyHandler } from "./proxy"; +import { makeWsUpgrader, type WsProxyData } from "./ws-proxy"; import { jsonResponse } from "./routes/body-parser"; import { startUpstreamProbe } from "./probe"; import { BranchStatusMonitor } from "./git/branch-status"; +import { discoverDescendantListeningPorts } from "./process/port-discovery"; // Auto-generate DAEMON_BOOT_ID when not provided (dev/test). In production // the runner supplies a per-container UUID via env. @@ -46,13 +48,46 @@ const orchestrator = new SetupOrchestrator({ const branchStatus = new BranchStatusMonitor(config, broadcaster); let discoveredScripts: string[] | null = null; + +// Build the ordered candidate-port list each tick: +// 1. Ports any descendant of a daemon-managed dev process is listening on +// (Vite v7 / Next / Astro / etc. mostly ignore PORT=$DEV_PORT, so this +// is the source of truth.) +// 2. config.devPort — the env-hint fallback. Honored by frameworks that +// respect PORT, and used by the e2e tests where there's no managed +// dev process and the upstream is started externally. +const excludeFromDiscovery = new Set([config.proxyPort]); +const getCandidatePorts = (): number[] => { + const ordered: number[] = []; + const seen = new Set(); + const push = (p: number) => { + if (!seen.has(p)) { + seen.add(p); + ordered.push(p); + } + }; + const rootPids = processManager.allPids(); + if (rootPids.length > 0) { + for (const port of discoverDescendantListeningPorts({ + rootPids, + excludePorts: excludeFromDiscovery, + })) { + push(port); + } + } + push(config.devPort); + return ordered; +}; + const lastStatus = startUpstreamProbe({ upstreamHost: "localhost", - upstreamPort: config.devPort, + getCandidatePorts, onChange: (s) => broadcaster.broadcastEvent("status", { type: "status", ...s }), }); +const getDevPort = (): number => lastStatus.port ?? config.devPort; + const scriptsHandler = makeScriptsHandler(() => discoveredScripts ?? []); // Intercept the `scripts` event so SSE replay can serve the latest list on @@ -90,16 +125,29 @@ const eventsH = makeEventsHandler({ getActiveProcesses: () => processManager.activeNames(), getLastBranchStatus: () => branchStatus.getLast(), }); -const proxyH = makeProxyHandler({ config, broadcaster }); +const proxyH = makeProxyHandler({ broadcaster, getDevPort }); +const wsProxy = makeWsUpgrader(getDevPort); -Bun.serve({ +Bun.serve({ port: config.proxyPort, hostname: "0.0.0.0", idleTimeout: 0, - async fetch(req) { + async fetch(req, server) { const url = new URL(req.url); const p = url.pathname; + // WebSocket upgrade — Vite HMR + any other dev-server WS. We forward + // to in-pod localhost:devPort so HMR survives the daemon's reverse + // proxy. Daemon-internal SSE (/_decopilot_vm/events) stays HTTP. + if ( + req.headers.get("upgrade")?.toLowerCase() === "websocket" && + !p.startsWith("/_decopilot_vm/") + ) { + const ok = server.upgrade(req, { data: wsProxy.upgradeData(req) }); + if (ok) return undefined as unknown as Response; + return new Response("Upgrade failed", { status: 400 }); + } + if (p === "/health" && req.method === "GET") return healthH(); if (req.method === "GET" && p === "/_decopilot_vm/events") return eventsH(); @@ -135,6 +183,11 @@ Bun.serve({ return proxyH(req); }, + websocket: { + open: wsProxy.open, + message: wsProxy.message, + close: wsProxy.close, + }, }); // Start the branch-status monitor once .git is on disk. Two paths: diff --git a/packages/sandbox/daemon/events/broadcast.ts b/packages/sandbox/daemon/events/broadcast.ts index 6bc359f6f1..a6dc023500 100644 --- a/packages/sandbox/daemon/events/broadcast.ts +++ b/packages/sandbox/daemon/events/broadcast.ts @@ -26,6 +26,10 @@ export class Broadcaster { broadcastChunk(source: string, data: string): void { if (!data) return; this.replay.append(source, data); + // Tee to stdout so `kubectl logs` / k9s show the same output that SSE + // subscribers see. The structured events (broadcastEvent below) stay + // SSE-only — they're machine-readable JSON and would be noise here. + process.stdout.write(`[${source}] ${data}`); const bytes = sseFormat("log", JSON.stringify({ source, data })); this.fan(bytes); } diff --git a/packages/sandbox/daemon/events/sse.ts b/packages/sandbox/daemon/events/sse.ts index 81e61fc2ea..5c7295140e 100644 --- a/packages/sandbox/daemon/events/sse.ts +++ b/packages/sandbox/daemon/events/sse.ts @@ -3,7 +3,11 @@ import { sseFormat } from "./sse-format"; export interface SseHandshakeDeps { broadcaster: Broadcaster; - getLastStatus: () => { ready: boolean; htmlSupport: boolean }; + getLastStatus: () => { + ready: boolean; + htmlSupport: boolean; + port: number | null; + }; getDiscoveredScripts: () => string[] | null; getActiveProcesses: () => string[]; getLastBranchStatus: () => unknown | null; diff --git a/packages/sandbox/daemon/probe.ts b/packages/sandbox/daemon/probe.ts index c32ab51026..aa4d19fb5b 100644 --- a/packages/sandbox/daemon/probe.ts +++ b/packages/sandbox/daemon/probe.ts @@ -3,34 +3,120 @@ import { FAST_PROBE_LIMIT, FAST_PROBE_MS, SLOW_PROBE_MS } from "./constants"; export interface ProbeState { ready: boolean; htmlSupport: boolean; + /** The port that last responded to HEAD `/`, or null if none yet. */ + port: number | null; } export interface ProbeDeps { upstreamHost: string; - upstreamPort: number; + /** + * Candidate ports to score each tick. All are probed in parallel; the + * one with the highest "looks like the dev preview" score wins. Empty + * array → state stays { ready:false, port:null }. + */ + getCandidatePorts: () => number[]; onChange: (state: ProbeState) => void; } +interface ProbeResult { + port: number; + responded: boolean; + ready: boolean; + htmlSupport: boolean; + /** Higher = more likely to be the actual dev preview surface. */ + score: number; +} + +interface HeadResult { + ok: boolean; + status: number; + isHtml: boolean; +} + +/** + * Score a port. The `/@vite/client` probe disambiguates Vite from any + * other listener that happens to also serve HTML at `/`: Vite returns + * JS, anything else returns HTML or 404. Sidecar runtimes (workerd, + * esbuild) are filtered upstream by port-discovery.ts — we don't probe + * them from here. + */ +function score(root: HeadResult | null, viteClient: HeadResult | null): number { + let s = 0; + if (root) { + if (root.ok) s += root.isHtml ? 100 : 50; + else s += 10; // HTTP, but not 2xx-3xx + } + if (viteClient && viteClient.ok && !viteClient.isHtml) s += 50; + return s; +} + /** Kicks off the probe loop; returns the current state (live-updated). */ export function startUpstreamProbe(deps: ProbeDeps): ProbeState { - const state: ProbeState = { ready: false, htmlSupport: false }; + const state: ProbeState = { ready: false, htmlSupport: false, port: null }; let count = 0; - const tick = async () => { - const prev = state.ready; + const head = async (url: string): Promise => { try { - const res = await fetch( - `http://${deps.upstreamHost}:${deps.upstreamPort}/`, - { method: "HEAD", signal: AbortSignal.timeout(5000) }, - ); + const res = await fetch(url, { + method: "HEAD", + signal: AbortSignal.timeout(5000), + }); const ct = (res.headers.get("content-type") ?? "").toLowerCase(); - state.ready = res.status >= 200 && res.status < 400; - state.htmlSupport = ct.includes("text/html"); + return { + ok: res.status >= 200 && res.status < 400, + status: res.status, + isHtml: ct.includes("text/html"), + }; } catch { + return null; + } + }; + + const tryOne = async (port: number): Promise => { + const base = `http://${deps.upstreamHost}:${port}`; + // Probe `/` first; only ask `/@vite/client` if root looks like a real + // HTML responder. Avoids hammering ports that don't speak HTTP. + const root = await head(`${base}/`); + let viteClient: HeadResult | null = null; + if (root && root.ok && root.isHtml) { + viteClient = await head(`${base}/@vite/client`); + } + return { + port, + responded: root !== null, + ready: root?.ok ?? false, + htmlSupport: root?.isHtml ?? false, + score: score(root, viteClient), + }; + }; + + const tick = async () => { + const prevReady = state.ready; + const prevPort = state.port; + const prevHtml = state.htmlSupport; + const candidates = deps.getCandidatePorts(); + const results = await Promise.all(candidates.map(tryOne)); + // Highest score wins; on tie, the candidate-list order (already + // discovered-first) breaks it — `Array.sort` is stable in modern JS. + const best = results + .filter((r) => r.responded) + .sort((a, b) => b.score - a.score)[0]; + if (best) { + state.port = best.port; + state.ready = best.ready; + state.htmlSupport = best.htmlSupport; + } else { + state.port = null; state.ready = false; state.htmlSupport = false; } - if (state.ready !== prev) deps.onChange({ ...state }); + if ( + prevReady !== state.ready || + prevPort !== state.port || + prevHtml !== state.htmlSupport + ) { + deps.onChange({ ...state }); + } count++; setTimeout(tick, count < FAST_PROBE_LIMIT ? FAST_PROBE_MS : SLOW_PROBE_MS); }; diff --git a/packages/sandbox/daemon/process/port-discovery.ts b/packages/sandbox/daemon/process/port-discovery.ts new file mode 100644 index 0000000000..8b8cd107da --- /dev/null +++ b/packages/sandbox/daemon/process/port-discovery.ts @@ -0,0 +1,173 @@ +import { readdirSync, readFileSync, readlinkSync } from "node:fs"; + +/** + * Discover TCP ports the descendants of a given pid are listening on. + * + * The daemon launches `bun run dev` (etc.) with `PORT=$DEV_PORT` as a hint, + * but most modern dev servers (Vite v7, Next, Astro …) ignore that env and + * pick their own port. Reading /proc lets the proxy follow whatever the + * dev process actually bound to. + * + * Linux-only; on macOS/test hosts the readSync calls throw and we fall back + * to an empty result. Callers should treat "no discovery" as "use the env + * hint" — see entry.ts for the candidate-list composition. + */ + +const SOCKET_INODE_RE = /^socket:\[(\d+)\]$/; + +/** + * Sidecar runtimes spawned by dev servers that listen on TCP but are NOT + * the user-facing preview surface. Probing them with HEAD requests can + * crash their handlers (workerd throws on any request whose worker code + * does relative `fetch()`; node --inspect treats it as a debugger probe) + * and pollutes the dev process's own logs. Filtered out of port + * discovery by checking /proc//comm. + */ +const SIDECAR_COMMS = new Set([ + "workerd", + "esbuild", + "wrangler", + "tsserver", +]); + +/** Reads `/proc//comm` (truncated process name); empty string on error. */ +function getProcessComm(pid: number): string { + try { + return readFileSync(`/proc/${pid}/comm`, "utf8").trim(); + } catch { + return ""; + } +} + +/** Walks /proc/*\/stat to compute the transitive children of `rootPid`. */ +function getDescendantPids(rootPid: number): number[] { + let entries: string[]; + try { + entries = readdirSync("/proc"); + } catch { + return []; + } + const ppids = new Map(); + for (const e of entries) { + const pid = Number(e); + if (!Number.isInteger(pid) || pid <= 0) continue; + try { + const stat = readFileSync(`/proc/${pid}/stat`, "utf8"); + // Format: pid (comm) state ppid … comm is parenthesised and may + // contain spaces or unbalanced inner parens — split off everything + // up to the LAST `)` to skip it safely. + const close = stat.lastIndexOf(")"); + if (close === -1) continue; + const tail = stat.slice(close + 2).split(" "); + const ppid = Number(tail[1]); + if (Number.isInteger(ppid)) ppids.set(pid, ppid); + } catch { + // pid exited between readdir and read — skip + } + } + const out = new Set([rootPid]); + let changed = true; + while (changed) { + changed = false; + for (const [pid, ppid] of ppids) { + if (out.has(ppid) && !out.has(pid)) { + out.add(pid); + changed = true; + } + } + } + out.delete(rootPid); + return Array.from(out); +} + +/** Resolves the socket inodes a pid currently has open via /proc//fd. */ +function getProcessSocketInodes(pid: number): Set { + const inodes = new Set(); + let fds: string[]; + try { + fds = readdirSync(`/proc/${pid}/fd`); + } catch { + return inodes; + } + for (const fd of fds) { + try { + const link = readlinkSync(`/proc/${pid}/fd/${fd}`); + const m = SOCKET_INODE_RE.exec(link); + if (m) inodes.add(Number(m[1])); + } catch { + // fd may have closed mid-scan — skip + } + } + return inodes; +} + +interface ListeningRow { + port: number; + inode: number; +} + +/** Parses the LISTEN rows (state 0A) from /proc/net/tcp + tcp6. */ +function readListeningTcp(): ListeningRow[] { + const out: ListeningRow[] = []; + for (const path of ["/proc/net/tcp", "/proc/net/tcp6"]) { + let raw: string; + try { + raw = readFileSync(path, "utf8"); + } catch { + continue; + } + const lines = raw.split("\n"); + // Columns: sl local rem state tx_queue rx_queue tr tm->when + // retrnsmt uid timeout inode … + for (let i = 1; i < lines.length; i++) { + const line = lines[i].trim(); + if (!line) continue; + const cols = line.split(/\s+/); + if (cols.length < 10) continue; + if (cols[3] !== "0A") continue; + const local = cols[1]; + const portHex = local.split(":")[1]; + if (!portHex) continue; + const port = parseInt(portHex, 16); + if (!Number.isInteger(port) || port <= 0) continue; + const inode = Number(cols[9]); + if (!Number.isInteger(inode)) continue; + out.push({ port, inode }); + } + } + return out; +} + +export interface DiscoverPortsOpts { + rootPids: readonly number[]; + excludePorts?: ReadonlySet; +} + +/** + * Returns the listening TCP ports owned by any descendant of `rootPids`, + * minus `excludePorts`. Empty array on non-Linux or any read failure. + */ +export function discoverDescendantListeningPorts({ + rootPids, + excludePorts, +}: DiscoverPortsOpts): number[] { + if (rootPids.length === 0) return []; + const owned = new Set(); + for (const root of rootPids) { + for (const pid of [root, ...getDescendantPids(root)]) { + // Skip sidecars (workerd / esbuild / etc.) — their listening sockets + // are runtime internals, not preview surfaces, and probing them can + // wedge the dev server. + if (SIDECAR_COMMS.has(getProcessComm(pid))) continue; + for (const inode of getProcessSocketInodes(pid)) owned.add(inode); + } + } + if (owned.size === 0) return []; + const ports = new Set(); + for (const row of readListeningTcp()) { + if (!owned.has(row.inode)) continue; + if (excludePorts?.has(row.port)) continue; + ports.add(row.port); + } + return Array.from(ports); +} diff --git a/packages/sandbox/daemon/process/run-process.ts b/packages/sandbox/daemon/process/run-process.ts index 37c6f52951..217972e7d6 100644 --- a/packages/sandbox/daemon/process/run-process.ts +++ b/packages/sandbox/daemon/process/run-process.ts @@ -16,6 +16,15 @@ export class ProcessManager { return Array.from(this.children.keys()); } + /** Pids of every child currently tracked — used to scope port discovery. */ + allPids(): number[] { + const out: number[] = []; + for (const child of this.children.values()) { + if (typeof child.pid === "number") out.push(child.pid); + } + return out; + } + run(source: string, cmd: string, label: string): ChildProcess { const existing = this.children.get(source); if (existing) { @@ -29,8 +38,14 @@ export class ProcessManager { this.children.delete(source); } this.deps.broadcaster.broadcastChunk(source, `${label}\r\n`); + // stdin is `pipe` (not `ignore`) so it's an open writable that never + // closes. Vite's CLI shortcuts call setRawMode then watch stdin for EOF; + // with stdin closed at spawn the child sees EOF immediately and exits + // right after announcing it's ready. Keeping the pipe open without ever + // writing to it is the cheapest way to keep long-running dev servers + // alive under the `script` PTY wrapper. const opts: Parameters[2] = { - stdio: ["ignore", "pipe", "pipe"], + stdio: ["pipe", "pipe", "pipe"], env: this.deps.env, }; if (this.deps.dropPrivileges) { diff --git a/packages/sandbox/daemon/proxy.ts b/packages/sandbox/daemon/proxy.ts index 16f1f0e752..5013b12f17 100644 --- a/packages/sandbox/daemon/proxy.ts +++ b/packages/sandbox/daemon/proxy.ts @@ -1,13 +1,13 @@ import { BOOTSTRAP_SCRIPT } from "./constants"; import type { Broadcaster } from "./events/broadcast"; -import type { Config } from "./types"; export interface ProxyDeps { - config: Config; broadcaster: Broadcaster; + /** Resolved each request — follows the dev process's actual listening port. */ + getDevPort: () => number; } -export function makeProxyHandler({ config, broadcaster }: ProxyDeps) { +export function makeProxyHandler({ broadcaster, getDevPort }: ProxyDeps) { function log(...args: string[]) { const msg = `[daemon] ${new Date().toISOString()} ${args.join(" ")}`; broadcaster.broadcastChunk("daemon", msg + "\r\n"); @@ -16,7 +16,7 @@ export function makeProxyHandler({ config, broadcaster }: ProxyDeps) { return async (req: Request): Promise => { const url = new URL(req.url); log("proxy", req.method, url.pathname); - const target = `http://localhost:${config.devPort}${url.pathname}${url.search}`; + const target = `http://localhost:${getDevPort()}${url.pathname}${url.search}`; const outHeaders = new Headers(req.headers); outHeaders.delete("accept-encoding"); outHeaders.delete("host"); diff --git a/packages/sandbox/daemon/ws-proxy.ts b/packages/sandbox/daemon/ws-proxy.ts new file mode 100644 index 0000000000..2e605d4be9 --- /dev/null +++ b/packages/sandbox/daemon/ws-proxy.ts @@ -0,0 +1,112 @@ +/** + * Transparent WebSocket reverse proxy for the daemon. + * + * The daemon's HTTP proxy uses fetch(), which doesn't carry WebSocket + * upgrade semantics. Without this, Vite's HMR client (and any other + * dev-server WS) gets 502 on the upgrade, retries a few times, then + * triggers a full-page reload as recovery — the user sees the page load + * then immediately reload, in a loop. + * + * On upgrade we stash the rewritten in-pod target URL (plus the client's + * negotiated subprotocols) in ws.data, then open the upstream WS on the + * `open` callback and bridge frames in both directions. Subprotocols + * (`vite-hmr`, `vite-ping`, …) are forwarded — Vite ignores connections + * that drop them. + */ +import type { ServerWebSocket } from "bun"; + +/** + * Cap on frames buffered between client upgrade and upstream WS open. The + * upstream here is the in-pod dev server on localhost; if it isn't yet + * listening (booting / crashed), an unbounded pending queue would let a + * chatty client exhaust the daemon's memory. + */ +const MAX_PENDING_FRAMES = 256; + +export interface WsProxyData { + /** Full upstream URL — `ws://localhost:?`. */ + target: string; + /** Subprotocols the client advertised on the upgrade request. */ + protocols: string[] | undefined; + upstream: WebSocket | null; + /** Frames received from the client before the upstream handshake completes. */ + pending: (string | ArrayBuffer | Uint8Array)[]; +} + +export function makeWsUpgrader(getDevPort: () => number) { + return { + /** Build the per-connection state attached to ws.data at upgrade time. */ + upgradeData(req: Request): WsProxyData { + const url = new URL(req.url); + const target = `ws://localhost:${getDevPort()}${url.pathname}${url.search}`; + const protoHeader = req.headers.get("sec-websocket-protocol"); + const protocols = protoHeader + ? protoHeader + .split(",") + .map((s) => s.trim()) + .filter(Boolean) + : undefined; + return { target, protocols, upstream: null, pending: [] }; + }, + + open(ws: ServerWebSocket): void { + const upstream = new WebSocket(ws.data.target, ws.data.protocols); + upstream.binaryType = "arraybuffer"; + ws.data.upstream = upstream; + + upstream.addEventListener("open", () => { + for (const frame of ws.data.pending) { + try { + upstream.send(frame as never); + } catch {} + } + ws.data.pending.length = 0; + }); + upstream.addEventListener("message", (e) => { + try { + ws.send(e.data as never); + } catch {} + }); + upstream.addEventListener("close", () => { + try { + ws.close(); + } catch {} + }); + upstream.addEventListener("error", () => { + try { + ws.close(); + } catch {} + }); + }, + + message(ws: ServerWebSocket, message: string | Buffer): void { + const upstream = ws.data.upstream; + const frame = typeof message === "string" ? message : message.buffer; + if (upstream && upstream.readyState === WebSocket.OPEN) { + try { + upstream.send(frame as never); + } catch {} + return; + } + if (ws.data.pending.length >= MAX_PENDING_FRAMES) { + // Backlog overflow: upstream isn't draining. 1011 = internal error. + try { + ws.close(1011, "ws-proxy backlog overflow"); + } catch {} + try { + ws.data.upstream?.close(); + } catch {} + return; + } + ws.data.pending.push(frame as ArrayBuffer | string); + }, + + close(ws: ServerWebSocket): void { + try { + ws.data.upstream?.close(); + } catch {} + }, + }; +} + +export type WsUpgrader = ReturnType; diff --git a/packages/sandbox/package.json b/packages/sandbox/package.json index 10b5af029d..ae0376ce98 100644 --- a/packages/sandbox/package.json +++ b/packages/sandbox/package.json @@ -12,8 +12,13 @@ "exports": { "./shared": "./shared.ts", "./runner": "./server/runner/index.ts", + "./runner/agent-sandbox": "./server/runner/agent-sandbox/index.ts", "./runner/freestyle": "./server/runner/freestyle/index.ts" }, + "dependencies": { + "@kubernetes/client-node": "^1.4.0", + "@opentelemetry/api": "^1.9.0" + }, "optionalDependencies": { "@freestyle-sh/with-bun": "^0.2.12", "@freestyle-sh/with-deno": "^0.0.4", diff --git a/packages/sandbox/server/runner/agent-sandbox/client.test.ts b/packages/sandbox/server/runner/agent-sandbox/client.test.ts new file mode 100644 index 0000000000..10cbba7cef --- /dev/null +++ b/packages/sandbox/server/runner/agent-sandbox/client.test.ts @@ -0,0 +1,409 @@ +import { afterEach, beforeEach, describe, expect, it, mock } from "bun:test"; +import { K8S_CONSTANTS } from "./constants"; +import { + createSandboxClaim, + deleteSandboxClaim, + getSandboxClaim, + patchSandboxClaimShutdown, + type SandboxClaim, + type SandboxResource, + waitForSandboxReady, +} from "./client"; + +// ---- Minimal KubeConfig stub ----------------------------------------------- +// client.ts only touches `getCurrentCluster` and `applyToHTTPSOptions`; the +// stub mirrors those and omits the 100-method surface of the real class. + +const STUB_SERVER = "https://kube.test"; + +function makeKc( + cluster: { server: string; skipTLSVerify?: boolean } = { + server: STUB_SERVER, + }, +) { + const apply = async (opts: Record) => { + opts.headers = { Authorization: "Bearer stub-token" }; + opts.cert = "STUB_CERT_PEM"; + opts.key = "STUB_KEY_PEM"; + opts.ca = "STUB_CA_PEM"; + }; + return { + getCurrentCluster: () => cluster, + applyToHTTPSOptions: apply, + } as unknown as import("@kubernetes/client-node").KubeConfig; +} + +// ---- Fetch interception ---------------------------------------------------- +// Keep the real global fetch so test infra (bun itself) isn't affected, but +// swap it per-test with a stub that records calls + returns scripted responses. + +type FetchCall = { url: string; init: RequestInit }; +const fetchCalls: FetchCall[] = []; +let fetchImpl: (url: string, init: RequestInit) => Promise = + async () => { + throw new Error("no fetch impl set"); + }; +const originalFetch = globalThis.fetch; + +beforeEach(() => { + fetchCalls.length = 0; + globalThis.fetch = mock(async (url: URL | string, init: RequestInit = {}) => { + const record: FetchCall = { + url: typeof url === "string" ? url : url.toString(), + init, + }; + fetchCalls.push(record); + return fetchImpl(record.url, init); + }) as unknown as typeof globalThis.fetch; +}); + +afterEach(() => { + globalThis.fetch = originalFetch; +}); + +// ---- Response helpers ------------------------------------------------------- + +function jsonResponse(status: number, body: unknown): Response { + return new Response(body === undefined ? null : JSON.stringify(body), { + status, + headers: { "content-type": "application/json" }, + }); +} + +/** Build a response whose body is a push-driven ND-JSON stream. */ +function ndJsonResponse(status: number): { + resp: Response; + push: (obj: unknown) => void; + close: () => void; +} { + let controller!: ReadableStreamDefaultController; + const stream = new ReadableStream({ + start: (c) => { + controller = c; + }, + }); + const encoder = new TextEncoder(); + return { + resp: new Response(stream, { + status, + headers: { "content-type": "application/json" }, + }), + push: (obj) => + controller.enqueue(encoder.encode(`${JSON.stringify(obj)}\n`)), + close: () => controller.close(), + }; +} + +// ---- Fixtures --------------------------------------------------------------- + +const NS = "agent-sandbox-system"; + +function makeClaim(name: string): SandboxClaim { + return { + apiVersion: `${K8S_CONSTANTS.CLAIM_API_GROUP}/${K8S_CONSTANTS.CLAIM_API_VERSION}`, + kind: "SandboxClaim", + metadata: { name, namespace: NS }, + spec: { + sandboxTemplateRef: { name: "studio-sandbox" }, + lifecycle: { shutdownPolicy: "Delete" }, + }, + }; +} + +// ---------------------------------------------------------------------------- + +describe("createSandboxClaim", () => { + it("POSTs the claim body verbatim to the plural endpoint", async () => { + fetchImpl = async () => jsonResponse(201, { kind: "SandboxClaim" }); + const claim = makeClaim("studio-sb-abc"); + await createSandboxClaim(makeKc(), NS, claim); + + expect(fetchCalls).toHaveLength(1); + const [call] = fetchCalls; + expect(call!.url).toBe( + `${STUB_SERVER}/apis/${K8S_CONSTANTS.CLAIM_API_GROUP}/${K8S_CONSTANTS.CLAIM_API_VERSION}/namespaces/${NS}/${K8S_CONSTANTS.CLAIM_PLURAL}`, + ); + expect(call!.init.method).toBe("POST"); + expect(JSON.parse(String(call!.init.body))).toEqual(claim); + // Auth header flows through from applyToHTTPSOptions. + const headers = call!.init.headers as Record; + expect(headers.Authorization).toBe("Bearer stub-token"); + }); + + it("round-trips spec.env + warmpool (per-claim DAEMON_TOKEN shape)", async () => { + // Stage 2.1 claim shape: per-claim env requires warmpool: "none". + // Lock the exact wire payload so a bad serializer regression (dropping + // env, mangling warmpool) surfaces in unit tests — before it wastes a + // kind-cluster provision cycle discovering the same bug. + fetchImpl = async () => jsonResponse(201, { kind: "SandboxClaim" }); + const claim: SandboxClaim = { + apiVersion: `${K8S_CONSTANTS.CLAIM_API_GROUP}/${K8S_CONSTANTS.CLAIM_API_VERSION}`, + kind: "SandboxClaim", + metadata: { name: "studio-sb-tok", namespace: NS }, + spec: { + sandboxTemplateRef: { name: "studio-sandbox" }, + env: [{ name: "DAEMON_TOKEN", value: "abc123" }], + warmpool: "none", + lifecycle: { shutdownPolicy: "Delete" }, + }, + }; + await createSandboxClaim(makeKc(), NS, claim); + const body = JSON.parse(String(fetchCalls[0]!.init.body)); + expect(body.spec.env).toEqual([{ name: "DAEMON_TOKEN", value: "abc123" }]); + expect(body.spec.warmpool).toBe("none"); + }); + + it("wraps non-2xx errors in SandboxError with the claim name", async () => { + fetchImpl = async () => + jsonResponse(409, { + kind: "Status", + status: "Failure", + reason: "AlreadyExists", + message: "already exists", + code: 409, + }); + await expect( + createSandboxClaim(makeKc(), NS, makeClaim("dup")), + ).rejects.toThrow(/Failed to create SandboxClaim: dup/); + }); +}); + +describe("deleteSandboxClaim", () => { + it("swallows 404 silently (idempotent delete)", async () => { + fetchImpl = async () => + jsonResponse(404, { + kind: "Status", + reason: "NotFound", + message: "not found", + }); + await expect( + deleteSandboxClaim(makeKc(), NS, "gone"), + ).resolves.toBeUndefined(); + expect(fetchCalls[0]!.init.method).toBe("DELETE"); + }); + + it("re-throws non-404 errors wrapped in SandboxError", async () => { + fetchImpl = async () => + jsonResponse(403, { + kind: "Status", + reason: "Forbidden", + message: "forbidden", + }); + await expect(deleteSandboxClaim(makeKc(), NS, "x")).rejects.toThrow( + /Failed to delete SandboxClaim: x/, + ); + }); +}); + +describe("getSandboxClaim", () => { + it("returns undefined on 404", async () => { + fetchImpl = async () => + jsonResponse(404, { + kind: "Status", + reason: "NotFound", + message: "not found", + }); + const result = await getSandboxClaim(makeKc(), NS, "missing"); + expect(result).toBeUndefined(); + }); + + it("returns the resource body on 200", async () => { + const body: SandboxResource = { + metadata: { name: "present" }, + status: { conditions: [{ type: "Ready", status: "False" }] }, + }; + fetchImpl = async () => jsonResponse(200, body); + const result = await getSandboxClaim(makeKc(), NS, "present"); + expect(result).toEqual(body); + }); + + it("URL-encodes the claim name", async () => { + fetchImpl = async () => jsonResponse(404, null); + await getSandboxClaim(makeKc(), NS, "weird/name"); + expect(fetchCalls[0]!.url).toContain("/weird%2Fname"); + }); +}); + +describe("patchSandboxClaimShutdown", () => { + it("sends merge-patch with lifecycle.shutdownTime only", async () => { + fetchImpl = async () => jsonResponse(200, { kind: "SandboxClaim" }); + await patchSandboxClaimShutdown( + makeKc(), + NS, + "studio-sb-x", + "2026-04-01T12:00:00.000Z", + ); + expect(fetchCalls).toHaveLength(1); + const [call] = fetchCalls; + expect(call!.init.method).toBe("PATCH"); + const headers = call!.init.headers as Record; + expect(headers["content-type"]).toBe("application/merge-patch+json"); + expect(JSON.parse(String(call!.init.body))).toEqual({ + spec: { + lifecycle: { + shutdownPolicy: "Delete", + shutdownTime: "2026-04-01T12:00:00.000Z", + }, + }, + }); + }); + + it("swallows 404 silently (claim deleted between lookup and patch)", async () => { + fetchImpl = async () => + jsonResponse(404, { + kind: "Status", + reason: "NotFound", + message: "not found", + }); + await expect( + patchSandboxClaimShutdown( + makeKc(), + NS, + "gone", + "2026-04-01T12:00:00.000Z", + ), + ).resolves.toBeUndefined(); + }); + + it("wraps other errors in SandboxError", async () => { + fetchImpl = async () => + jsonResponse(409, { + kind: "Status", + reason: "Conflict", + message: "conflict", + }); + await expect( + patchSandboxClaimShutdown( + makeKc(), + NS, + "busy", + "2026-04-01T12:00:00.000Z", + ), + ).rejects.toThrow(/Failed to patch SandboxClaim shutdownTime: busy/); + }); +}); + +describe("waitForSandboxReady", () => { + it("resolves with sandboxName + podName once Ready=True is observed", async () => { + const stream = ndJsonResponse(200); + fetchImpl = async () => stream.resp; + const p = waitForSandboxReady(makeKc(), NS, "claim-xyz", 60); + stream.push({ + type: "MODIFIED", + object: { + metadata: { + name: "claim-xyz", + annotations: { [K8S_CONSTANTS.POD_NAME_ANNOTATION]: "pod-42" }, + }, + status: { conditions: [{ type: "Ready", status: "True" }] }, + }, + }); + await expect(p).resolves.toEqual({ + sandboxName: "claim-xyz", + podName: "pod-42", + }); + const url = fetchCalls[0]!.url; + expect(url).toContain("?watch=true"); + expect(url).toContain("fieldSelector="); + }); + + it("falls back to sandboxName when pod-name annotation is absent", async () => { + const stream = ndJsonResponse(200); + fetchImpl = async () => stream.resp; + const p = waitForSandboxReady(makeKc(), NS, "claim-xyz", 60); + stream.push({ + type: "MODIFIED", + object: { + metadata: { name: "claim-xyz" }, + status: { conditions: [{ type: "Ready", status: "True" }] }, + }, + }); + await expect(p).resolves.toEqual({ + sandboxName: "claim-xyz", + podName: "claim-xyz", + }); + }); + + it("ignores non-Ready conditions and keeps watching", async () => { + const stream = ndJsonResponse(200); + fetchImpl = async () => stream.resp; + const p = waitForSandboxReady(makeKc(), NS, "claim-xyz", 60); + // Emit a non-Ready condition — should not settle. + stream.push({ + type: "MODIFIED", + object: { + metadata: { name: "claim-xyz" }, + status: { conditions: [{ type: "Progressing", status: "True" }] }, + }, + }); + const sentinel = Symbol("still-pending"); + const winner = await Promise.race([ + p, + new Promise((r) => setTimeout(() => r(sentinel), 10)), + ]); + expect(winner).toBe(sentinel); + + stream.push({ + type: "MODIFIED", + object: { + metadata: { name: "claim-xyz" }, + status: { conditions: [{ type: "Ready", status: "True" }] }, + }, + }); + await expect(p).resolves.toEqual({ + sandboxName: "claim-xyz", + podName: "claim-xyz", + }); + }); + + it("rejects with SandboxTimeoutError after the deadline", async () => { + // Server accepts the connection but never emits — simulate a watch that + // just hangs. 0-second timeout fires on the next tick. + const stream = ndJsonResponse(200); + fetchImpl = async () => stream.resp; + const p = waitForSandboxReady(makeKc(), NS, "claim-xyz", 0); + await expect(p).rejects.toThrow(/did not become ready within 0 seconds/); + }); + + it("rejects if the watch handshake itself fails", async () => { + fetchImpl = async () => { + throw new Error("kube-apiserver unreachable"); + }; + const p = waitForSandboxReady(makeKc(), NS, "claim-xyz", 60); + await expect(p).rejects.toThrow( + /Failed to start watch for sandbox readiness/, + ); + }); + + it("rejects when the Sandbox object has no metadata.name", async () => { + const stream = ndJsonResponse(200); + fetchImpl = async () => stream.resp; + const p = waitForSandboxReady(makeKc(), NS, "claim-xyz", 60); + stream.push({ + type: "MODIFIED", + object: { + // no metadata.name + status: { conditions: [{ type: "Ready", status: "True" }] }, + }, + }); + await expect(p).rejects.toThrow(/Sandbox metadata or name is missing/); + }); + + it("rejects on ERROR frames from the watch stream", async () => { + const stream = ndJsonResponse(200); + fetchImpl = async () => stream.resp; + const p = waitForSandboxReady(makeKc(), NS, "claim-xyz", 60); + stream.push({ + type: "ERROR", + object: { + kind: "Status", + status: "Failure", + reason: "Expired", + message: "watch channel expired", + }, + }); + await expect(p).rejects.toThrow( + /Watch stream error while waiting for sandbox: watch channel expired/, + ); + }); +}); diff --git a/packages/sandbox/server/runner/agent-sandbox/client.ts b/packages/sandbox/server/runner/agent-sandbox/client.ts new file mode 100644 index 0000000000..6cc68890a1 --- /dev/null +++ b/packages/sandbox/server/runner/agent-sandbox/client.ts @@ -0,0 +1,517 @@ +/** + * Low-level CRUD + readiness watch for agent-sandbox SandboxClaim / Sandbox. + * + * Talks to the k8s REST API directly via the runtime's native `fetch` with + * `{ tls: { cert, key, ca } }`. Credentials (client cert + CA) are extracted + * from the active `KubeConfig` context using the library's own + * `applyToHTTPSOptions` helper. + * + * Why not `kc.makeApiClient(CustomObjectsApi)` like admin does: + * `@kubernetes/client-node` 1.x's generated clients ship an + * `IsomorphicFetchHttpLibrary` that calls `fetch(url, { agent })` — a + * node-fetch signal that Node's https.Agent (cert/key/ca) should be used + * for the TLS handshake. Bun's node-fetch polyfill silently drops the + * `agent` option: TLS verification fails and, if bypassed, the cluster + * sees `system:anonymous` (no client cert). The fix is Bun-native: + * `fetch(url, { tls: { cert, key, ca } })`. The library's Watch hits + * the same bug, so readiness is rebuilt from scratch here too. + * + * Surface intentionally minimal: create/delete/get/waitForReady. Higher-level + * "ensure ready" flows live on the runner, not here. + */ + +import { + type KubeConfig, + type V1Status as V1StatusUpstream, +} from "@kubernetes/client-node"; +import { K8S_CONSTANTS, SandboxError, SandboxTimeoutError } from "./constants"; + +type V1Status = Partial & { reason?: string }; + +/** + * Subset of SandboxClaim `spec.env[]`. The CRD accepts only literal + * `{name, value}` pairs — no `valueFrom`/`secretKeyRef`. That's why Stage 2.1 + * injects `DAEMON_TOKEN` here directly rather than via a Secret reference. + */ +export interface SandboxClaimEnvVar { + name: string; + value: string; + containerName?: string; +} + +export interface SandboxClaim { + apiVersion: string; + kind: "SandboxClaim"; + metadata: { + name: string; + namespace?: string; + labels?: Record; + annotations?: Record; + }; + spec: { + sandboxTemplateRef: { name: string }; + env?: SandboxClaimEnvVar[]; + /** + * Pod-level metadata the operator merges onto the spawned Pod (CRD field, + * see sandboxclaims.extensions.agents.x-k8s.io v1alpha1). Used to attach + * tenant labels for downstream metrics attribution. + */ + additionalPodMetadata?: { + labels?: Record; + annotations?: Record; + }; + /** + * `"none"` forces a fresh pod per claim — required when `spec.env` is + * set because the operator rejects custom env when the claim would + * come from a warm pool (warm pods are pre-started, can't take new + * env). Passing `undefined` lets the CRD default ("default") apply. + */ + warmpool?: "none" | "default" | string; + lifecycle?: { + shutdownTime?: string; + shutdownPolicy?: "Delete" | "Retain"; + }; + }; +} + +export interface SandboxCondition { + type: string; + status: string; + reason?: string; + message?: string; +} + +export interface SandboxResource { + metadata?: { + name?: string; + labels?: Record; + annotations?: Record; + }; + /** + * Present when this came back from `getSandboxClaim` (CRD has a spec); + * absent from Sandbox-kind resources because `waitForSandboxReady` only + * projects out metadata/status. `adopt()` reads `spec.env` to recover the + * per-claim DAEMON_TOKEN it originally injected. + */ + spec?: { + sandboxTemplateRef?: { name?: string }; + env?: SandboxClaimEnvVar[]; + lifecycle?: { + shutdownTime?: string; + shutdownPolicy?: "Delete" | "Retain"; + }; + }; + status?: { + conditions?: SandboxCondition[]; + }; +} + +type WatchEvent = { + type: "ADDED" | "MODIFIED" | "DELETED" | "BOOKMARK" | "ERROR"; + object: SandboxResource | V1Status; +}; + +// ---- Transport -------------------------------------------------------------- + +/** Resolved auth + TLS material for the active kubeconfig context. */ +interface KubeAuth { + server: string; + headers: Record; + tls: { + cert?: string; + key?: string; + ca?: string; + rejectUnauthorized?: boolean; + }; +} + +async function resolveKubeAuth(kc: KubeConfig): Promise { + const cluster = kc.getCurrentCluster(); + if (!cluster) throw new SandboxError("No active cluster in kubeconfig"); + + // `applyToHTTPSOptions` mutates a plain options object, threading through the + // authenticator chain (token files, exec plugins, etc.). We harvest the bits + // we care about — headers (bearer/impersonation), cert/key/ca — and discard + // the `agent` it leaves behind since we route around node-fetch entirely. + const opts: Record = {}; + await kc.applyToHTTPSOptions(opts); + + const headers: Record = {}; + const optHeaders = (opts.headers ?? {}) as Record; + for (const [k, v] of Object.entries(optHeaders)) { + if (Array.isArray(v)) headers[k] = v.join(", "); + else if (v !== undefined) headers[k] = String(v); + } + if (typeof opts.auth === "string" && !headers.Authorization) { + headers.Authorization = `Basic ${Buffer.from(opts.auth).toString("base64")}`; + } + + return { + server: cluster.server.replace(/\/+$/, ""), + headers, + tls: { + cert: bufferLike(opts.cert), + key: bufferLike(opts.key), + ca: bufferLike(opts.ca), + rejectUnauthorized: cluster.skipTLSVerify ? false : undefined, + }, + }; +} + +function bufferLike(v: unknown): string | undefined { + if (v == null) return undefined; + if (typeof v === "string") return v; + if (Buffer.isBuffer(v)) return v.toString("utf8"); + return String(v); +} + +interface KubeFetchInit { + method: "GET" | "POST" | "DELETE" | "PATCH"; + path: string; + body?: unknown; + signal?: AbortSignal; + /** Extra Accept / query hints. Merged with auth headers. */ + headers?: Record; + /** + * Required iff `method === "PATCH"`. Drives the patch content-type: + * RFC 7396 merge-patch (CRDs) vs. strategic-merge (built-in types). + */ + patchType?: "merge" | "strategic-merge"; +} + +/** + * Thin wrapper around `fetch` that threads TLS + auth from the kubeconfig. + * Returns the raw `Response` so streaming callers (watch) can consume the + * body themselves; non-streaming callers parse JSON explicitly. + */ +async function kubeFetch( + kc: KubeConfig, + init: KubeFetchInit, +): Promise { + const auth = await resolveKubeAuth(kc); + const headers: Record = { ...auth.headers, ...init.headers }; + if (init.method === "PATCH") { + headers["content-type"] = + init.patchType === "strategic-merge" + ? "application/strategic-merge-patch+json" + : "application/merge-patch+json"; + } else if (init.body !== undefined && !("content-type" in headers)) { + headers["content-type"] = "application/json"; + } + const reqInit: RequestInit & { tls?: typeof auth.tls } = { + method: init.method, + headers, + body: init.body === undefined ? undefined : JSON.stringify(init.body), + signal: init.signal, + tls: auth.tls, + }; + return fetch(`${auth.server}${init.path}`, reqInit as RequestInit); +} + +/** HTTP error carrier used for the 404 fast-path before SandboxError wrapping. */ +class KubeHttpError extends Error { + constructor( + readonly status: number, + readonly body: V1Status | null, + message: string, + ) { + super(message); + this.name = "KubeHttpError"; + } +} + +async function readStatusBody(resp: Response): Promise { + try { + return (await resp.json()) as V1Status; + } catch { + return null; + } +} + +async function ensureOk(resp: Response, action: string): Promise { + if (resp.ok) return; + const body = await readStatusBody(resp); + const message = + body?.message ?? `${action} failed: ${resp.status} ${resp.statusText}`; + throw new KubeHttpError(resp.status, body, message); +} + +/** + * Issue a kube call where 404 is *not* an error (the resource was already + * gone; mesh's next ensure() recreates it). On 404, returns `null`. On 2xx, + * returns the parsed JSON body — or `null` for callers that don't need it. + * All other errors are wrapped in `SandboxError` with `wrapMessage` as the + * surfaced label. + */ +async function callSwallowing404( + kc: KubeConfig, + init: KubeFetchInit, + action: string, + wrapMessage: string, + parse: "json" | "none" = "none", +): Promise { + try { + const resp = await kubeFetch(kc, init); + if (resp.status === 404) return null; + await ensureOk(resp, action); + if (parse === "json") return (await resp.json()) as T; + return null; + } catch (error) { + throw new SandboxError(wrapMessage, error); + } +} + +// ---- Public surface --------------------------------------------------------- + +const CLAIM_PATH_PREFIX = `/apis/${K8S_CONSTANTS.CLAIM_API_GROUP}/${K8S_CONSTANTS.CLAIM_API_VERSION}/namespaces`; + +export async function createSandboxClaim( + kc: KubeConfig, + namespace: string, + claim: SandboxClaim, +): Promise { + const path = `${CLAIM_PATH_PREFIX}/${encodeURIComponent(namespace)}/${K8S_CONSTANTS.CLAIM_PLURAL}`; + try { + const resp = await kubeFetch(kc, { method: "POST", path, body: claim }); + await ensureOk(resp, "createSandboxClaim"); + } catch (error) { + throw new SandboxError( + `Failed to create SandboxClaim: ${claim.metadata.name}`, + error, + ); + } +} + +function claimPath(namespace: string, claimName: string): string { + return `${CLAIM_PATH_PREFIX}/${encodeURIComponent(namespace)}/${K8S_CONSTANTS.CLAIM_PLURAL}/${encodeURIComponent(claimName)}`; +} + +/** + * Update the claim's idle-reap clock. The agent-sandbox operator honors + * `spec.lifecycle.shutdownTime` with `shutdownPolicy: Delete`: once the + * wall clock passes `shutdownTime`, the operator deletes the claim + pod. + * + * Mesh calls this on every `ensure()` hit so an active sandbox continuously + * pushes its deadline forward; an abandoned one hits the deadline and the + * operator reaps it. No mesh-side cron/reconcile needed. + * + * Uses merge-patch (RFC 7396), which is the documented patch format for + * CRDs — strategic-merge only works on built-in types that ship merge + * keys. 404 is swallowed because a deleted-since-lookup claim is not an + * error from mesh's perspective; the caller's next ensure() will + * re-provision. + */ +export async function patchSandboxClaimShutdown( + kc: KubeConfig, + namespace: string, + claimName: string, + shutdownTime: string, +): Promise { + await callSwallowing404( + kc, + { + method: "PATCH", + path: claimPath(namespace, claimName), + patchType: "merge", + body: { + spec: { lifecycle: { shutdownPolicy: "Delete", shutdownTime } }, + }, + }, + "patchSandboxClaimShutdown", + `Failed to patch SandboxClaim shutdownTime: ${claimName}`, + ); +} + +export async function deleteSandboxClaim( + kc: KubeConfig, + namespace: string, + claimName: string, +): Promise { + await callSwallowing404( + kc, + { method: "DELETE", path: claimPath(namespace, claimName) }, + "deleteSandboxClaim", + `Failed to delete SandboxClaim: ${claimName}`, + ); +} + +export async function getSandboxClaim( + kc: KubeConfig, + namespace: string, + claimName: string, +): Promise { + const found = await callSwallowing404( + kc, + { method: "GET", path: claimPath(namespace, claimName) }, + "getSandboxClaim", + `Failed to get SandboxClaim: ${claimName}`, + "json", + ); + return found ?? undefined; +} + +export interface WaitForSandboxReadyResult { + sandboxName: string; + podName: string; +} + +/** + * Resolves on the first `Ready=True` condition on the Sandbox matching + * `claimName`; rejects on stream error, missing name metadata, or timeout. + * The watch is aborted exactly once via `settle()`; callers get deterministic + * teardown regardless of which branch fires first. + */ +export function waitForSandboxReady( + kc: KubeConfig, + namespace: string, + claimName: string, + timeoutSeconds = 180, +): Promise { + const path = `/apis/${K8S_CONSTANTS.SANDBOX_API_GROUP}/${K8S_CONSTANTS.SANDBOX_API_VERSION}/namespaces/${encodeURIComponent(namespace)}/${K8S_CONSTANTS.SANDBOX_PLURAL}?watch=true&fieldSelector=${encodeURIComponent(`metadata.name=${claimName}`)}`; + + const { resolve, reject, promise } = + Promise.withResolvers(); + + const controller = new AbortController(); + let settled = false; + const timeoutHandle = setTimeout(() => { + if (settled) return; + settled = true; + controller.abort(); + reject( + new SandboxTimeoutError( + `Sandbox did not become ready within ${timeoutSeconds} seconds`, + ), + ); + }, timeoutSeconds * 1000); + + const settleWith = (fn: () => void) => { + if (settled) return; + settled = true; + clearTimeout(timeoutHandle); + controller.abort(); + fn(); + }; + + (async () => { + let resp: Response; + try { + resp = await kubeFetch(kc, { + method: "GET", + path, + signal: controller.signal, + headers: { accept: "application/json" }, + }); + } catch (err) { + settleWith(() => + reject( + new SandboxError("Failed to start watch for sandbox readiness", err), + ), + ); + return; + } + + if (!resp.ok || !resp.body) { + const body = await readStatusBody(resp).catch(() => null); + settleWith(() => + reject( + new SandboxError( + `Watch handshake failed (${resp.status}): ${body?.message ?? resp.statusText}`, + ), + ), + ); + return; + } + + try { + for await (const event of readNdJson(resp.body)) { + if (settled) return; + // Bookmark/ERROR/DELETED are never a "ready" signal. ERROR carries a + // V1Status payload rather than a SandboxResource; treating it as a + // fatal stream error mirrors client-go's behaviour. + if (event.type === "ERROR") { + const status = event.object as V1Status; + settleWith(() => + reject( + new SandboxError( + `Watch stream error while waiting for sandbox: ${status.message ?? "unknown"}`, + ), + ), + ); + return; + } + if (event.type !== "ADDED" && event.type !== "MODIFIED") continue; + + const sandbox = event.object as SandboxResource; + const ready = sandbox.status?.conditions?.find( + (c) => c.type === "Ready" && c.status === "True", + ); + if (!ready) continue; + + const sandboxName = sandbox.metadata?.name; + if (!sandboxName) { + settleWith(() => + reject(new SandboxError("Sandbox metadata or name is missing")), + ); + return; + } + const podName = + sandbox.metadata?.annotations?.[K8S_CONSTANTS.POD_NAME_ANNOTATION] ?? + sandboxName; + settleWith(() => resolve({ sandboxName, podName })); + return; + } + // Stream ended before Ready observed — treat as transient failure so the + // caller can retry rather than wait out the timeout. + settleWith(() => + reject( + new SandboxError("Watch stream closed before sandbox became ready"), + ), + ); + } catch (err) { + if (settled) return; + // AbortError during in-flight stream is the timeout path above; don't + // double-reject. + if ( + err instanceof Error && + (err.name === "AbortError" || controller.signal.aborted) + ) + return; + settleWith(() => + reject( + new SandboxError("Watch stream error while waiting for sandbox", err), + ), + ); + } + })(); + + return promise; +} + +/** ND-JSON line reader over a WHATWG ReadableStream. */ +async function* readNdJson( + stream: ReadableStream, +): AsyncGenerator { + const reader = stream.getReader(); + const decoder = new TextDecoder(); + let buf = ""; + try { + while (true) { + const { value, done } = await reader.read(); + if (done) break; + buf += decoder.decode(value, { stream: true }); + let newline: number; + // biome-ignore lint/suspicious/noAssignInExpressions: idiomatic line loop + while ((newline = buf.indexOf("\n")) >= 0) { + const line = buf.slice(0, newline).trim(); + buf = buf.slice(newline + 1); + if (!line) continue; + yield JSON.parse(line) as T; + } + } + const tail = buf.trim(); + if (tail) yield JSON.parse(tail) as T; + } finally { + reader.releaseLock(); + } +} diff --git a/packages/sandbox/server/runner/agent-sandbox/constants.ts b/packages/sandbox/server/runner/agent-sandbox/constants.ts new file mode 100644 index 0000000000..8ee6d68bb6 --- /dev/null +++ b/packages/sandbox/server/runner/agent-sandbox/constants.ts @@ -0,0 +1,38 @@ +/** + * agent-sandbox CRD identifiers + error classes. Pinned verbatim from + * kubernetes-sigs/agent-sandbox via deco-cx/admin/clients/agent-sandbox/types.ts. + * When the operator widens to a new API version, change here once — every + * call site reads through these constants. + */ + +export const K8S_CONSTANTS = { + CLAIM_API_GROUP: "extensions.agents.x-k8s.io", + CLAIM_API_VERSION: "v1alpha1", + CLAIM_PLURAL: "sandboxclaims", + + SANDBOX_API_GROUP: "agents.x-k8s.io", + SANDBOX_API_VERSION: "v1alpha1", + SANDBOX_PLURAL: "sandboxes", + + POD_NAME_ANNOTATION: "agents.x-k8s.io/pod-name", +} as const; + +export class SandboxError extends Error { + override readonly cause?: unknown; + + constructor(message: string, cause?: unknown) { + super(message); + this.name = "SandboxError"; + this.cause = cause; + if (cause instanceof Error && cause.stack) { + this.stack = `${this.stack}\nCaused by: ${cause.stack}`; + } + } +} + +export class SandboxTimeoutError extends SandboxError { + constructor(message: string, cause?: unknown) { + super(message, cause); + this.name = "SandboxTimeoutError"; + } +} diff --git a/packages/sandbox/server/runner/agent-sandbox/index.ts b/packages/sandbox/server/runner/agent-sandbox/index.ts new file mode 100644 index 0000000000..711ee05b3e --- /dev/null +++ b/packages/sandbox/server/runner/agent-sandbox/index.ts @@ -0,0 +1,19 @@ +// Re-exported so external tooling (e.g. deploy/k8s-sandbox/local/smoke.ts) +// can build a KubeConfig without declaring @kubernetes/client-node itself. +export { KubeConfig } from "@kubernetes/client-node"; +export { K8S_CONSTANTS, SandboxError, SandboxTimeoutError } from "./constants"; +export { + createSandboxClaim, + deleteSandboxClaim, + getSandboxClaim, + waitForSandboxReady, +} from "./client"; +export type { + SandboxClaim, + SandboxClaimEnvVar, + SandboxCondition, + SandboxResource, + WaitForSandboxReadyResult, +} from "./client"; +export { AgentSandboxRunner, HANDLE_PREFIX } from "./runner"; +export type { AgentSandboxRunnerOptions } from "./runner"; diff --git a/packages/sandbox/server/runner/agent-sandbox/runner.ts b/packages/sandbox/server/runner/agent-sandbox/runner.ts new file mode 100644 index 0000000000..6ed67a069f --- /dev/null +++ b/packages/sandbox/server/runner/agent-sandbox/runner.ts @@ -0,0 +1,1385 @@ +/** + * Agent-sandbox runner. + * + * Provisions one SandboxClaim per (user, projectRef) against the + * kubernetes-sigs/agent-sandbox operator. Mesh runs outside the cluster + * (Stage 1 / local-dev via kind), so traffic reaches the pod via a single + * lazily-opened 127.0.0.1 TCP listener that tunnels each inbound connection + * to the daemon container port through the apiserver as a fresh WebSocket. + * + * The daemon owns the public surface: it serves `/_decopilot_vm/*` + `/health` + * in-process and reverse-proxies everything else to in-pod localhost:DEV_PORT + * (CSP/X-Frame stripping + HMR bootstrap injection live in that proxy). One + * port-forward per pod is therefore enough; opening a second forwarder for + * the dev port would bypass the daemon and break SSE + iframe embedding. + * + * Stage 3 replaces the port-forward path with real ingress: when + * `previewUrlPattern` is set, no forwarder is opened for preview traffic and + * the preview URL is synthesized from the handle. + * + * Token model: each claim carries a freshly-generated DAEMON_TOKEN injected + * via `SandboxClaim.spec.env`. One leak compromises one sandbox. + * `valueFrom.secretKeyRef` isn't supported on SandboxClaim env; RBAC on + * the namespace is the secrecy boundary. + */ + +import { createHash, randomBytes, randomUUID } from "node:crypto"; +import * as net from "node:net"; +import { PassThrough } from "node:stream"; +import { + type KubeConfig, + KubeConfig as KubeConfigClass, + PortForward, +} from "@kubernetes/client-node"; +import type { + Counter, + Histogram, + Meter, + UpDownCounter, +} from "@opentelemetry/api"; +import { + daemonBash, + probeDaemonHealth, + proxyDaemonRequest, + waitForDaemonReady, +} from "../../daemon-client"; +import { + Inflight, + applyPreviewPattern, + hashSandboxId, + withSandboxLock, +} from "../shared"; +import type { RunnerStateStore, RunnerStateStoreOps } from "../state-store"; +import type { + EnsureOptions, + ExecInput, + ExecOutput, + ProxyRequestInit, + Sandbox, + SandboxId, + SandboxRunner, + Workload, +} from "../types"; +import { + createSandboxClaim, + deleteSandboxClaim, + getSandboxClaim, + patchSandboxClaimShutdown, + waitForSandboxReady, + type SandboxClaim, + type SandboxResource, +} from "./client"; +import { K8S_CONSTANTS } from "./constants"; + +const RUNNER_KIND = "agent-sandbox" as const; +const LOG_LABEL = "AgentSandboxRunner"; + +// Shared-namespace topology for MVP; tenancy enforced by unguessable claim +// names (sha256(userId:projectRef)). Per-org namespaces are deferred. +const DEFAULT_NAMESPACE = "agent-sandbox-system"; +const DEFAULT_TEMPLATE_NAME = "studio-sandbox"; + +const DAEMON_CONTAINER_PORT = 9000; +// In-pod port the daemon's reverse proxy targets. Mesh never connects here +// directly — everything funnels through the daemon container port — but the +// value is propagated to the daemon via DEV_PORT so it knows where the dev +// server will bind. +const DEFAULT_DEV_PORT = 3000; +const DEFAULT_WORKDIR = "/app"; + +// 32 bytes matches Docker's generation so audit logs don't vary by runner. +const DAEMON_TOKEN_BYTES = 32; + +/** + * Env keys mesh owns and a caller's `opts.env` MUST NOT shadow. DAEMON_TOKEN + * is the secrecy boundary; the rest configure the daemon's bootstrap and + * silently overriding any of them would break clone/install/dev-server start. + */ +const RESERVED_ENV_KEYS = new Set([ + "DAEMON_TOKEN", + "DAEMON_BOOT_ID", + "APP_ROOT", + "PROXY_PORT", + "DEV_PORT", + "RUNTIME", + "CLONE_URL", + "REPO_NAME", + "BRANCH", + "GIT_USER_NAME", + "GIT_USER_EMAIL", + "PACKAGE_MANAGER", +]); + +// Default idle-reap TTL: 15 min from each ensure() hit. Every code-initiated +// request flows through ensure() (or touches a record via getRecord, which +// bumps the TTL on the K8s side), so an active sandbox pushes this forward; +// abandoned sandboxes roll off at T+15m via the operator. +const DEFAULT_IDLE_TTL_MS = 15 * 60 * 1000; + +/** Handle prefix + 16-hex hash = 24 chars, well under K8s's 63-char label cap. */ +export const HANDLE_PREFIX = "studio-sb-"; +const HANDLE_HASH_LEN = 16; + +/** + * Headers stripped before re-issuing the preview proxy fetch. Hop-by-hop per + * RFC 7230 + cookies (preview is per-handle, not per-user — no callee session + * leak) + accept-encoding (Bun fetch auto-decompresses, so a downstream + * content-encoding would mismatch the actual body). + */ +const PREVIEW_STRIP_REQUEST_HEADERS = [ + "cookie", + "host", + "connection", + "keep-alive", + "proxy-authenticate", + "proxy-authorization", + "te", + "trailer", + "transfer-encoding", + "accept-encoding", + "content-length", + "upgrade", +]; + +/** + * Stripped from the proxied response. content-encoding/length would mismatch + * after Bun fetch auto-decompresses; CSP/X-Frame-Options the daemon already + * rewrote — re-passing them defeats the iframe-embedding fix the daemon + * installed. + */ +const PREVIEW_STRIP_RESPONSE_HEADERS = [ + "connection", + "keep-alive", + "transfer-encoding", + "content-encoding", + "content-length", +]; + +// Deterministic local-port range for port-forward listeners. Same +// (handle, containerPort) pair → same host port across mesh restarts, so +// `previewUrl` cached in the thread's vmMap stays valid when the mesh +// process recycles. Birthday-collision probability stays <1% up to ~140 +// concurrent forwarders. EADDRINUSE walks the range forward until bind. +const PORT_RANGE_START = 40000; +const PORT_RANGE_SIZE = 10000; +const PORT_WALK_LIMIT = 256; + +// Structural type for the WebSocket returned by PortForward.portForward — we +// only need close/on to manage lifecycle; a direct `isomorphic-ws` dep for +// one type isn't worth it. +interface ForwardWebSocket { + close: () => void; + on: (event: "close" | "error", handler: () => void) => void; +} + +interface PortForwarder { + server: net.Server; + localPort: number; +} + +interface RunnerTenant { + orgId: string; + userId: string; +} + +interface K8sRecord { + id: SandboxId; + handle: string; + podName: string; + token: string; + workdir: string; + daemonUrl: string; + daemonForward: PortForwarder; + workload: Workload | null; + /** + * Per-boot UUID the daemon reports on /health. Generated mesh-side and + * injected via env; re-read from /health on rehydrate so we pick up + * pod restarts (the daemon's orchestrator handles resume-on-restart + * itself, this is purely informational on the mesh side). + */ + daemonBootId: string; + /** + * Tenant identity carried through for metric attribution on subsequent + * operations (proxy, exec, delete) where the caller only has a handle. + * Null when ensure() was called without tenant context (smoke tests, + * adopt fallback when claim labels were absent). + */ + tenant: RunnerTenant | null; + /** + * The original options the caller passed to `ensure()`. Persisted so + * `resurrectByHandle` can re-provision an evicted sandbox autonomously + * (15-min idle TTL deletes the claim — without these we'd come back as + * an empty pod with no repo cloned). Null on adopt paths where we can't + * recover the original opts; resurrection falls back to throwing/404 in + * that case so the caller's normal VM_START flow can repopulate them. + */ + ensureOpts: EnsureOptions | null; +} + +interface PersistedK8sState { + podName: string; + token: string; + workdir: string; + workload?: Workload | null; + daemonBootId?: string; + tenant?: RunnerTenant | null; + /** + * Original `EnsureOptions`. Persisted so `resurrectByHandle` can re-ensure + * after the operator deletes the claim on idle TTL. Optional for + * back-compat: rows written before this field existed lack it; resurrection + * returns null in that case and the caller surfaces 404 (UI's existing + * VM_START reprovision flow then runs with full opts). + */ + ensureOpts?: EnsureOptions; + [k: string]: unknown; +} + +export interface AgentSandboxRunnerOptions { + stateStore?: RunnerStateStore; + previewUrlPattern?: string; + /** Defaults to `new KubeConfig().loadFromDefault()`. Tests pass a stub. */ + kubeConfig?: KubeConfig; + /** Shared namespace for both SandboxTemplate and SandboxClaims. */ + namespace?: string; + /** SandboxTemplate all claims reference. */ + sandboxTemplateName?: string; + /** + * Deterministic DAEMON_TOKEN override — tests inject a fixed value so + * recorded fetch payloads are stable. Prod leaves this undefined. + */ + tokenGenerator?: () => string; + /** + * Idle-reap window (ms). Every `ensure()` hit pushes the claim's + * `spec.lifecycle.shutdownTime` to `now + idleTtlMs`; the operator + * deletes claim + pod when the wall clock passes that. + */ + idleTtlMs?: number; + /** + * OpenTelemetry meter for runner-level metrics (active gauge, ensure + * outcome counter, proxy duration histogram). Optional — when absent, + * runner is fully functional but emits no metrics. Tests typically pass + * undefined; mesh wires `metrics.getMeter("mesh", "1.0.0")`. + */ + meter?: Meter; +} + +export class AgentSandboxRunner implements SandboxRunner { + readonly kind = RUNNER_KIND; + + private readonly records = new Map(); + private readonly inflight = new Inflight(); + private readonly stateStore: RunnerStateStore | null; + private readonly previewUrlPattern: string | null; + private readonly kubeConfig: KubeConfig; + private readonly portForward: PortForward; + private readonly namespace: string; + private readonly sandboxTemplateName: string; + private readonly tokenGenerator: () => string; + private readonly idleTtlMs: number; + /** + * Instruments are null when no meter was provided. All emit-paths must + * null-check; the alternative — passing the OTel API's no-op meter — would + * still allocate and dispatch on every call. + */ + private readonly metrics: RunnerMetrics | null; + + constructor(opts: AgentSandboxRunnerOptions = {}) { + this.stateStore = opts.stateStore ?? null; + this.previewUrlPattern = opts.previewUrlPattern ?? null; + this.kubeConfig = opts.kubeConfig ?? loadDefaultKubeConfig(); + this.portForward = new PortForward(this.kubeConfig); + this.namespace = opts.namespace ?? DEFAULT_NAMESPACE; + this.sandboxTemplateName = + opts.sandboxTemplateName ?? DEFAULT_TEMPLATE_NAME; + this.tokenGenerator = + opts.tokenGenerator ?? + (() => randomBytes(DAEMON_TOKEN_BYTES).toString("hex")); + this.idleTtlMs = opts.idleTtlMs ?? DEFAULT_IDLE_TTL_MS; + this.metrics = opts.meter ? buildRunnerMetrics(opts.meter) : null; + } + + // ---- SandboxRunner surface ------------------------------------------------ + + async ensure(id: SandboxId, opts: EnsureOptions = {}): Promise { + const handle = this.computeHandle(id); + return this.inflight.run(handle, () => + withSandboxLock(this.stateStore, id, RUNNER_KIND, (ops) => + this.ensureLocked(id, handle, opts, ops), + ), + ); + } + + async exec(handle: string, input: ExecInput): Promise { + const rec = await this.requireRecord(handle); + return daemonBash(rec.daemonUrl, rec.token, input); + } + + async delete(handle: string): Promise { + const rec = await this.getRecord(handle); + this.records.delete(handle); + if (rec) { + this.closeForwarder(rec.daemonForward); + // Decrement only when we actually held the record — getRecord can be + // null after restart-without-state-store, in which case the gauge + // was never incremented for this handle in this process. + this.metrics?.active.add(-1, tenantAttrs(rec.tenant)); + } + await deleteSandboxClaim(this.kubeConfig, this.namespace, handle); + if (this.stateStore) { + if (rec) await this.stateStore.delete(rec.id, RUNNER_KIND); + else await this.stateStore.deleteByHandle(RUNNER_KIND, handle); + } + } + + async alive(handle: string): Promise { + const claim = await getSandboxClaim( + this.kubeConfig, + this.namespace, + handle, + ).catch(() => undefined); + return claim ? isSandboxReady(claim) : false; + } + + async getPreviewUrl(handle: string): Promise { + const rec = await this.getRecord(handle); + if (!rec) return null; + return this.composePreviewUrl(rec); + } + + async proxyDaemonRequest( + handle: string, + path: string, + init: ProxyRequestInit, + ): Promise { + const rec = await this.getRecord(handle); + if (!rec) { + return new Response(JSON.stringify({ error: "sandbox not found" }), { + status: 404, + headers: { "content-type": "application/json" }, + }); + } + const start = performance.now(); + let status = 0; + try { + const resp = await proxyDaemonRequest( + rec.daemonUrl, + rec.token, + path, + init, + ); + status = resp.status; + return resp; + } finally { + this.recordProxyDuration( + "daemon", + status, + rec, + performance.now() - start, + ); + } + } + + /** + * Resolves the HTTP base URL for a sandbox's daemon. Used by the preview + * reverse-proxy at the mesh edge. + * + * Two modes: + * 1. `previewUrlPattern` set (Stage 3 / in-cluster mesh): synthesize the + * in-cluster Service URL straight from the handle. No record lookup, no + * port-forward, no health probe — the cluster DNS + downstream fetch + * are the source of truth. Crucially this means a cold mesh pod (or one + * that just restarted with an empty records map) still serves preview + * traffic without first having to rehydrate every claim. If the Service + * doesn't exist for that handle, the downstream fetch fails and the + * caller surfaces a 502. + * 2. `previewUrlPattern` unset (dev / mesh-outside-cluster): fall back to + * the 127.0.0.1 port-forwarder opened by `getRecord`. Returns null when + * the record can't be found or rehydrated — the caller surfaces 404. + * + * Preview must always land on port 9000 (daemon) — never 3000 (dev server) + * — because the daemon's reverse proxy strips CSP/X-Frame headers and + * injects the HMR bootstrap script that vite needs to function inside the + * studio iframe. Bypassing it breaks SSE + iframe embedding. + */ + async resolvePreviewUpstreamUrl(handle: string): Promise { + if (this.previewUrlPattern) { + // Production mode: synthesize the in-cluster Service URL from the + // handle. We deliberately don't pre-validate that the claim is still + // alive — every preview request would pay a K8s API call. When the + // sandbox has been evicted, the downstream fetch fails and + // `proxyPreviewRequest` catches it + drives resurrection from there. + return `http://${handle}.${this.namespace}.svc.cluster.local:${DAEMON_CONTAINER_PORT}`; + } + const rec = await this.getRecord(handle); + if (rec) return rec.daemonUrl; + // Dev mode: cold cache + state-store miss. Try resurrection before + // surfacing 404 — the pod may have been operator-evicted on idle TTL + // and the caller (preview iframe, SSE EventSource probe) needs the + // sandbox back to make any progress. + const resurrected = await this.resurrectByHandle(handle); + return resurrected ? resurrected.daemonUrl : null; + } + + /** + * Reverse-proxies an inbound preview HTTP request to the sandbox's daemon. + * Unauthenticated by design — preview URLs are open the same way Vercel + * preview URLs are; the *handle* is the secret. + * + * `/_decopilot_vm/*` access policy at the edge: + * - **GET** is allowed through. The daemon's `/events` SSE and `/scripts` + * are intentionally unauthenticated and CORS-enabled (`Allow-Origin: *`) + * because the studio UI consumes them cross-origin from the preview + * URL — that's the only path it has to live setup state. Stripping + * them here would break the studio UI's setup tab and SSE event feed. + * - **Non-GET** (POST/PUT/DELETE/etc) is rejected as defense-in-depth. + * The daemon enforces bearer auth on the mutating endpoints + * (read/write/edit/grep/glob/bash/exec/kill), but the only legitimate + * caller for those is mesh itself via the internal port-forward; the + * preview surface should never see them. + */ + async proxyPreviewRequest( + handle: string, + request: Request, + ): Promise { + const start = performance.now(); + // In-memory cache only — preview is the hot path; a state-store hit per + // request would dominate latency. Tenant attribution is best-effort: when + // the records map is cold (mesh just restarted) the metric still records + // duration with empty tenant attrs. cAdvisor on the pod side covers + // bandwidth attribution authoritatively via pod labels. + const cachedRec = this.records.get(handle) ?? null; + let status = 0; + try { + const upstreamBase = await this.resolvePreviewUpstreamUrl(handle); + if (!upstreamBase) { + status = 404; + return jsonResponse(404, { error: "sandbox not found" }); + } + + const reqUrl = new URL(request.url); + const isAdminPath = + reqUrl.pathname === "/_decopilot_vm" || + reqUrl.pathname.startsWith("/_decopilot_vm/"); + if (isAdminPath && request.method !== "GET") { + status = 404; + return jsonResponse(404, { error: "not found" }); + } + + const reqTarget = (base: string) => + `${base}${reqUrl.pathname}${reqUrl.search}`; + const headers = new Headers(request.headers); + for (const h of PREVIEW_STRIP_REQUEST_HEADERS) headers.delete(h); + + const hasBody = request.method !== "GET" && request.method !== "HEAD"; + const init: RequestInit & { duplex?: string } = { + method: request.method, + headers, + body: hasBody ? request.body : undefined, + redirect: "manual", + signal: request.signal, + duplex: hasBody ? "half" : undefined, + }; + + let upstream: Response; + try { + upstream = await fetch(reqTarget(upstreamBase), init as RequestInit); + } catch (err) { + // Truncate to host+pathname — query strings can carry secrets + // (magic-link tokens, signed URLs) and would otherwise end up in + // mesh stdout → kubectl logs → log aggregator. + const safeTarget = `${upstreamBase}${reqUrl.pathname}`; + console.warn( + `[${LOG_LABEL}] preview fetch to ${safeTarget} failed: ${err instanceof Error ? err.message : String(err)}`, + ); + + // Recover from operator-driven eviction (15-min idle TTL): the + // claim + Service are gone but our records cache (or the + // synthesized prod-mode URL) still pointed at the stale endpoint. + // Drop the cache and resurrect via state-store. Retry only for + // replay-safe methods — `init.body` is a stream that's been + // consumed by the failed fetch; replaying a POST would silently + // send an empty body. The browser/caller can retry the mutating + // request after this 502 surfaces; the resurrected sandbox will + // be ready for that next attempt. + if (request.method === "GET" || request.method === "HEAD") { + this.invalidateRecord(handle); + const resurrected = await this.resurrectByHandle(handle).catch( + () => null, + ); + if (resurrected) { + const retryBase = await this.resolvePreviewUpstreamUrl(handle); + if (retryBase) { + try { + upstream = await fetch( + reqTarget(retryBase), + init as RequestInit, + ); + const responseHeaders = new Headers(); + for (const [k, v] of upstream.headers.entries()) { + if ( + !PREVIEW_STRIP_RESPONSE_HEADERS.includes(k.toLowerCase()) + ) { + responseHeaders.set(k, v); + } + } + status = upstream.status; + return new Response(upstream.body, { + status: upstream.status, + statusText: upstream.statusText, + headers: responseHeaders, + }); + } catch (retryErr) { + console.warn( + `[${LOG_LABEL}] preview fetch retry to ${safeTarget} failed: ${retryErr instanceof Error ? retryErr.message : String(retryErr)}`, + ); + } + } + } + } else { + // Non-replay-safe method: still drop the stale cache so the next + // request goes through fresh validation. + this.invalidateRecord(handle); + } + + status = 502; + return jsonResponse(502, { error: "sandbox daemon unreachable" }); + } + + const responseHeaders = new Headers(); + for (const [k, v] of upstream.headers.entries()) { + if (!PREVIEW_STRIP_RESPONSE_HEADERS.includes(k.toLowerCase())) { + responseHeaders.set(k, v); + } + } + status = upstream.status; + return new Response(upstream.body, { + status: upstream.status, + statusText: upstream.statusText, + headers: responseHeaders, + }); + } finally { + this.recordProxyDuration( + "preview", + status, + cachedRec, + performance.now() - start, + handle, + ); + } + } + + // ---- Ensure flow ---------------------------------------------------------- + + private async ensureLocked( + id: SandboxId, + handle: string, + opts: EnsureOptions, + ops: RunnerStateStoreOps | null, + ): Promise { + if (opts.image) { + console.warn( + `[${LOG_LABEL}] opts.image ignored (template ${this.sandboxTemplateName} pins image): got ${opts.image}`, + ); + } + + // 1. State-store resume. + if (ops) { + const persisted = await ops.get(id, RUNNER_KIND); + if (persisted) { + const rec = await this.rehydrate(id, handle, persisted); + if (rec) + return this.finish( + rec, + ops, + /* persistNow */ false, + /* patchTtl */ true, + "resume", + ); + await ops.delete(id, RUNNER_KIND); + } + } + // 2. Cluster-side adopt: state store empty but a claim with our + // deterministic name already exists. + const existing = await getSandboxClaim( + this.kubeConfig, + this.namespace, + handle, + ).catch(() => undefined); + if (existing) { + const adopted = await this.adopt(id, handle, existing).catch((err) => { + console.warn( + `[${LOG_LABEL}] adopt ${handle} failed, recreating: ${err instanceof Error ? err.message : String(err)}`, + ); + return null; + }); + if (adopted) + return this.finish( + adopted, + ops, + /* persistNow */ true, + /* patchTtl */ true, + "adopt", + ); + await deleteSandboxClaim(this.kubeConfig, this.namespace, handle).catch( + () => {}, + ); + } + // 3. Fresh provision. + const fresh = await this.provision(id, handle, opts); + return this.finish( + fresh, + ops, + /* persistNow */ true, + /* patchTtl */ false, + "fresh", + ); + } + + private async finish( + rec: K8sRecord, + ops: RunnerStateStoreOps | null, + persistNow: boolean, + patchTtl: boolean, + outcome: "fresh" | "resume" | "adopt", + ): Promise { + const wasCached = this.records.has(rec.handle); + this.records.set(rec.handle, rec); + if (persistNow) await this.persist(ops, rec); + // Fresh provision set a shutdownTime in the claim spec already; resumes + // and adopts rely on this patch to stay alive. + if (patchTtl) { + await patchSandboxClaimShutdown( + this.kubeConfig, + this.namespace, + rec.handle, + this.computeShutdownTime(), + ).catch((err) => + console.warn( + `[${LOG_LABEL}] TTL refresh failed for ${rec.handle}: ${err instanceof Error ? err.message : String(err)}`, + ), + ); + } + if (this.metrics) { + const attrs = tenantAttrs(rec.tenant); + this.metrics.ensureOutcome.add(1, { ...attrs, outcome }); + // Only increment the active gauge on first observation to avoid + // double-counting when the same handle is rehydrated multiple times + // (mesh-process internal cache hit; ensureLocked is invoked again). + if (!wasCached) this.metrics.active.add(1, attrs); + } + return this.toSandbox(rec); + } + + /** + * Compose the env block the daemon's orchestrator reads to clone, install, + * and start the dev server. Mirrors the docker runner's contract; reader is + * `packages/sandbox/daemon/config.ts`. + * + * Caller-supplied `opts.env` is layered first so the bootstrap keys defined + * here (and listed in RESERVED_ENV_KEYS) always win — an intercepted + * DAEMON_TOKEN would compromise the sandbox; an intercepted DEV_PORT would + * just break the boot. We warn — not throw — to match the docker runner's + * permissive shape. + */ + private buildEnvMap( + opts: EnsureOptions, + boot: { token: string; daemonBootId: string; workdir: string }, + ): Record { + const callerEnv: Record = {}; + const dropped: string[] = []; + for (const [k, v] of Object.entries(opts.env ?? {})) { + if (RESERVED_ENV_KEYS.has(k)) dropped.push(k); + else callerEnv[k] = v; + } + if (dropped.length > 0) { + console.warn( + `[${LOG_LABEL}] opts.env keys overlap reserved bootstrap names and were dropped: ${dropped.join(",")}`, + ); + } + + const repo = opts.repo; + const repoLabel = repo + ? (repo.displayName ?? deriveRepoLabel(repo.cloneUrl)) + : null; + + return { + ...callerEnv, + DAEMON_TOKEN: boot.token, + DAEMON_BOOT_ID: boot.daemonBootId, + APP_ROOT: boot.workdir, + PROXY_PORT: String(DAEMON_CONTAINER_PORT), + DEV_PORT: String(opts.workload?.devPort ?? DEFAULT_DEV_PORT), + RUNTIME: opts.workload?.runtime ?? "node", + ...(repo + ? { + CLONE_URL: repo.cloneUrl, + REPO_NAME: repoLabel ?? "", + BRANCH: repo.branch ?? "", + GIT_USER_NAME: repo.userName, + GIT_USER_EMAIL: repo.userEmail, + } + : {}), + ...(opts.workload?.packageManager + ? { PACKAGE_MANAGER: opts.workload.packageManager } + : {}), + }; + } + + private buildClaim( + handle: string, + opts: EnsureOptions, + boot: { token: string; daemonBootId: string; workdir: string }, + ): SandboxClaim { + const envMap = this.buildEnvMap(opts, boot); + return { + apiVersion: `${K8S_CONSTANTS.CLAIM_API_GROUP}/${K8S_CONSTANTS.CLAIM_API_VERSION}`, + kind: "SandboxClaim", + metadata: { + name: handle, + namespace: this.namespace, + // Tenant duplicated on the claim itself (not just the pod) so the + // adopt path can recover orgId/userId after a state-store wipe; + // adopt() reads claim.metadata.labels, not pod labels. + labels: { + "app.kubernetes.io/name": "studio-sandbox", + "app.kubernetes.io/managed-by": "studio", + ...buildTenantLabels(opts.tenant), + }, + }, + spec: { + sandboxTemplateRef: { name: this.sandboxTemplateName }, + // additionalPodMetadata.labels is the operator's pod-label propagation + // hook (CRD field, not a generic patch). Tenant labels here flow to + // the pod and become joinable in cAdvisor/kubelet metrics. `role` + // distinguishes claimed pods from warm-pool pods (template sets + // role=sandbox-pod by default). + additionalPodMetadata: { + labels: buildTenantLabels(opts.tenant, { + [LABEL_KEYS.role]: "claimed", + [LABEL_KEYS.sandboxHandle]: handle, + }), + }, + // `valueFrom.secretKeyRef` isn't supported on SandboxClaim env; RBAC + // on the namespace is the secrecy boundary. Warm-pool off because the + // operator rejects custom env on warm-pooled claims. Sorted by name + // so `kubectl diff` / claim audit entries don't churn across runs + // that pass the same env in different insertion orders. + env: Object.entries(envMap) + .sort(([a], [b]) => (a < b ? -1 : a > b ? 1 : 0)) + .map(([name, value]) => ({ name, value })), + warmpool: "none", + lifecycle: { + shutdownPolicy: "Delete", + shutdownTime: this.computeShutdownTime(), + }, + }, + }; + } + + private async provision( + id: SandboxId, + handle: string, + opts: EnsureOptions, + ): Promise { + const token = this.tokenGenerator(); + const daemonBootId = randomUUID(); + const workdir = DEFAULT_WORKDIR; + + const claim = this.buildClaim(handle, opts, { + token, + daemonBootId, + workdir, + }); + await createSandboxClaim(this.kubeConfig, this.namespace, claim); + const { podName } = await waitForSandboxReady( + this.kubeConfig, + this.namespace, + handle, + ); + + const daemonForward = await this.openForwarder( + podName, + DAEMON_CONTAINER_PORT, + handle, + ); + const daemonUrl = `http://127.0.0.1:${daemonForward.localPort}`; + try { + await waitForDaemonReady(daemonUrl); + } catch (err) { + this.closeForwarder(daemonForward); + await deleteSandboxClaim(this.kubeConfig, this.namespace, handle).catch( + () => {}, + ); + throw err; + } + + return { + id, + handle, + podName, + token, + workdir, + daemonUrl, + daemonForward, + workload: opts.workload ?? null, + daemonBootId, + tenant: opts.tenant ?? null, + ensureOpts: stripEnsureOpts(opts), + }; + } + + /** + * Reconstruct a record from persisted state. After this returns, the record + * is ready for any of the six methods — the daemon port-forward is open and + * its `/health` has been re-probed. Returns null on any mismatch; caller + * purges and falls through to adopt/provision. + */ + private async rehydrate( + id: SandboxId, + handle: string, + persisted: { handle: string; state: Record }, + ): Promise { + const state = persisted.state as Partial; + if (!state.podName || !state.token) return null; + + const claim = await getSandboxClaim( + this.kubeConfig, + this.namespace, + handle, + ).catch(() => undefined); + if (!claim || !isSandboxReady(claim)) return null; + + // Pod name may have changed (operator recreated the pod). Trust the claim + // annotation over the persisted value. + const currentPodName = readPodName(claim) ?? state.podName; + + const live = await this.openAndProbeDaemon(currentPodName, handle); + if (!live) return null; + + // Pod bounced but the daemon's orchestrator handles re-bootstrap itself + // on boot (resume-on-restart). Just refresh our copy of bootId. + if (state.daemonBootId && state.daemonBootId !== live.bootId) { + console.warn( + `[${LOG_LABEL}] daemon restart detected (handle=${handle}): stored bootId=${state.daemonBootId} live bootId=${live.bootId}`, + ); + } + + return { + id, + handle, + podName: currentPodName, + token: state.token, + workdir: state.workdir ?? DEFAULT_WORKDIR, + daemonUrl: live.daemonUrl, + daemonForward: live.daemonForward, + workload: state.workload ?? null, + daemonBootId: live.bootId, + tenant: state.tenant ?? null, + ensureOpts: state.ensureOpts ?? null, + }; + } + + private async adopt( + id: SandboxId, + handle: string, + claim: SandboxResource, + ): Promise { + if (!isSandboxReady(claim)) return null; + const podName = readPodName(claim); + if (!podName) return null; + const token = readClaimDaemonToken(claim); + if (!token) return null; + + const live = await this.openAndProbeDaemon(podName, handle); + if (!live) return null; + + return { + id, + handle, + podName, + token, + workdir: DEFAULT_WORKDIR, + daemonUrl: live.daemonUrl, + daemonForward: live.daemonForward, + workload: null, + daemonBootId: live.bootId, + // Recovered from claim labels written at provision time. Null if the + // claim pre-dates tenant labelling (back-compat with already-running + // sandboxes when this code rolls out). + tenant: readClaimTenant(claim), + // Adopt happens when the state-store is empty but a claim with our + // deterministic name still exists in the cluster (e.g. mesh restart + // without state-store, or state-store wipe). The original opts aren't + // recoverable from the claim alone, so resurrection on this record + // can't autonomously re-provision; falls back to the caller's + // VM_START path. + ensureOpts: null, + }; + } + + /** + * Open the daemon port-forward and probe `/health`. Closes the forwarder + * and returns null on any failure so the caller can fall through to + * recreate. Both `rehydrate` and `adopt` share this shape — the only + * difference is whether the bootId match is checked. + */ + private async openAndProbeDaemon( + podName: string, + handle: string, + ): Promise<{ + daemonForward: PortForwarder; + daemonUrl: string; + bootId: string; + } | null> { + const daemonForward = await this.openForwarder( + podName, + DAEMON_CONTAINER_PORT, + handle, + ).catch(() => null); + if (!daemonForward) return null; + const daemonUrl = `http://127.0.0.1:${daemonForward.localPort}`; + // probeDaemonHealth returns null when /health is unreachable OR lacks a + // bootId (older daemon shape). Either way, purge + re-provision. + const health = await probeDaemonHealth(daemonUrl); + if (!health) { + this.closeForwarder(daemonForward); + return null; + } + return { daemonForward, daemonUrl, bootId: health.bootId }; + } + + // ---- Handle resolution (post-restart) ------------------------------------- + + private async getRecord(handle: string): Promise { + const cached = this.records.get(handle); + if (cached) return cached; + if (!this.stateStore) return null; + const persisted = await this.stateStore.getByHandle(RUNNER_KIND, handle); + if (!persisted) return null; + const rec = await this.rehydrate(persisted.id, handle, persisted); + if (rec) this.records.set(handle, rec); + return rec; + } + + /** + * Re-ensure a sandbox after operator-driven eviction (15-min idle TTL deletes + * claim + pod). Looks up the SandboxId from the state-store by handle, then + * runs the standard `ensure()` path with the persisted `EnsureOptions` so the + * fresh provision rehydrates with the same repo/env/workload. + * + * Returns null when: + * - no state-store (test runners) — caller surfaces 404, + * - handle has no row (truly unknown) — caller surfaces 404, + * - row predates `ensureOpts` persistence (back-compat: rows from before + * this change). Resurrecting with empty opts would create an empty pod + * with no repo cloned, which is worse than 404. UI's existing + * notFound→VM_START flow re-supplies opts in that case. + */ + private async resurrectByHandle(handle: string): Promise { + if (!this.stateStore) return null; + const row = await this.stateStore.getByHandle(RUNNER_KIND, handle); + if (!row) return null; + const persistedOpts = (row.state as Partial).ensureOpts; + if (!persistedOpts) return null; + // ensure() is idempotent + advisory-locked, so concurrent resurrections + // for the same handle collapse to a single provision. The lock is keyed + // on (userId, projectRef, kind), the same identity our state-store row + // is keyed on. + await this.ensure(row.id, persistedOpts); + return this.records.get(handle) ?? null; + } + + private async requireRecord(handle: string): Promise { + const rec = await this.getRecord(handle); + if (rec) return rec; + const resurrected = await this.resurrectByHandle(handle); + if (resurrected) return resurrected; + throw new Error(`unknown sandbox handle ${handle}`); + } + + /** + * Drop the in-memory record cache for `handle`. Called when the cached + * `daemonUrl` proves stale (e.g. fetch fails with connection refused after + * the operator deleted the underlying pod). The next access goes through + * the state-store + rehydrate or resurrection path. + */ + private invalidateRecord(handle: string): void { + const rec = this.records.get(handle); + if (!rec) return; + this.records.delete(handle); + this.closeForwarder(rec.daemonForward); + } + + // ---- Metric helpers ------------------------------------------------------- + + private recordProxyDuration( + source: "daemon" | "preview", + statusCode: number, + rec: K8sRecord | null, + durationMs: number, + fallbackHandle?: string, + ): void { + if (!this.metrics) return; + this.metrics.proxyDurationMs.record(durationMs, { + ...tenantAttrs(rec?.tenant ?? null), + source, + sandbox_handle: rec?.handle ?? fallbackHandle ?? "", + status_code: statusCode || 0, + }); + } + + // ---- Identity + preview URL ---------------------------------------------- + + private computeHandle(id: SandboxId): string { + return `${HANDLE_PREFIX}${hashSandboxId(id, HANDLE_HASH_LEN)}`; + } + + // Local mode: route preview traffic through the daemon port-forward, not + // a separate dev forwarder. The daemon serves /_decopilot_vm/* + /health + // in-process and reverse-proxies everything else to in-pod localhost:DEV_PORT + // (with CSP/X-Frame stripping + HMR bootstrap injection). Pointing the URL + // straight at the dev port would bypass that proxy and break SSE + iframe + // embedding. Production mode (previewUrlPattern set) goes through the + // ingress-terminated URL the operator emits. + private composePreviewUrl(rec: K8sRecord): string { + if (this.previewUrlPattern) { + return applyPreviewPattern(this.previewUrlPattern, rec.handle); + } + return `http://127.0.0.1:${rec.daemonForward.localPort}/`; + } + + private toSandbox(rec: K8sRecord): Sandbox { + return { + handle: rec.handle, + workdir: rec.workdir, + previewUrl: this.composePreviewUrl(rec), + }; + } + + // ---- Persistence ---------------------------------------------------------- + + private async persist( + ops: RunnerStateStoreOps | null, + rec: K8sRecord, + ): Promise { + if (!ops) return; + const state: PersistedK8sState = { + podName: rec.podName, + token: rec.token, + workdir: rec.workdir, + workload: rec.workload, + daemonBootId: rec.daemonBootId, + tenant: rec.tenant, + ...(rec.ensureOpts ? { ensureOpts: rec.ensureOpts } : {}), + }; + await ops.put(rec.id, RUNNER_KIND, { handle: rec.handle, state }); + } + + // ---- TTL helpers ---------------------------------------------------------- + + private computeShutdownTime(): string { + return new Date(Date.now() + this.idleTtlMs).toISOString(); + } + + // ---- Port-forwarding ------------------------------------------------------ + + /** + * Opens a 127.0.0.1 TCP listener whose connections tunnel to + * `podName:containerPort` via the apiserver. Each TCP connection spawns a + * fresh WebSocket — matches `kubectl port-forward`'s semantics. Lifecycle + * is mutual: client socket close → close the k8s WS; WS close → destroy + * the client socket. + */ + private openForwarder( + podName: string, + containerPort: number, + // `handle` is passed separately so the deterministic port survives pod + // recreation (operator-driven): vmMap's cached previewUrl stays valid. + handle: string = podName, + ): Promise { + const startPort = deterministicLocalPort(handle, containerPort); + return new Promise((resolve, reject) => { + const tryBind = (port: number, attempt: number) => { + const server = net.createServer((socket) => + this.handleForwardedConnection(socket, podName, containerPort), + ); + server.once("error", (err: NodeJS.ErrnoException) => { + if (err.code === "EADDRINUSE" && attempt < PORT_WALK_LIMIT) { + // Release the failed listener before walking forward — listen() + // failure leaves the Server object holding the connection handler + // closure; closing makes the leak trivially visible to GC. + try { + server.close(); + } catch {} + const next = + PORT_RANGE_START + + ((port - PORT_RANGE_START + 1) % PORT_RANGE_SIZE); + tryBind(next, attempt + 1); + return; + } + reject(err); + }); + server.listen(port, "127.0.0.1", () => { + const address = server.address(); + if (!address || typeof address === "string") { + server.close(); + reject(new Error("port-forward listener failed to bind")); + return; + } + resolve({ server, localPort: address.port }); + }); + }; + tryBind(startPort, 0); + }); + } + + private handleForwardedConnection( + socket: net.Socket, + podName: string, + containerPort: number, + ): void { + // Inbound bytes pipe through a PassThrough rather than the socket + // directly: `portForward` attaches its 'data' listener only after the + // WebSocket opens (async); on Bun, bytes arriving in that window are + // dropped. Piping synchronously into a PassThrough buffers those bytes + // until the library drains it. + const inbound = new PassThrough(); + let ws: ForwardWebSocket | null = null; + let closed = false; + + const cleanup = () => { + if (closed) return; + closed = true; + inbound.destroy(); + if (ws) { + try { + ws.close(); + } catch {} + } + if (!socket.destroyed) socket.destroy(); + }; + + socket.pipe(inbound); + socket.on("error", cleanup); + socket.on("close", cleanup); + + this.portForward + .portForward( + this.namespace, + podName, + [containerPort], + socket, + null, + inbound, + ) + .then((res) => { + // retryCount=0 (default) → raw WebSocket; retryCount>0 → factory fn. + const opened = typeof res === "function" ? res() : res; + if (!opened) { + cleanup(); + return; + } + ws = opened as ForwardWebSocket; + ws.on("close", cleanup); + ws.on("error", cleanup); + if (closed) { + try { + ws.close(); + } catch {} + } + }) + .catch((err: unknown) => { + console.warn( + `[${LOG_LABEL}] port-forward to ${podName}:${containerPort} failed: ${err instanceof Error ? err.message : String(err)}`, + ); + cleanup(); + }); + } + + private closeForwarder(forwarder: PortForwarder): void { + forwarder.server.close((err) => { + if (err) { + console.warn( + `[${LOG_LABEL}] port-forward close on :${forwarder.localPort} errored: ${err instanceof Error ? err.message : String(err)}`, + ); + } + }); + } +} + +// ---- Helpers ---------------------------------------------------------------- + +interface RunnerMetrics { + active: UpDownCounter; + ensureOutcome: Counter; + proxyDurationMs: Histogram; +} + +function buildRunnerMetrics(meter: Meter): RunnerMetrics { + return { + active: meter.createUpDownCounter("studio.sandbox.active", { + description: + "Active sandbox count, by runner kind and owning org. Cross-checks the cAdvisor-derived count from the cluster — divergence between the two indicates orphaned claims (mesh deleted but K8s didn't reap) or unattributed pods.", + unit: "{sandbox}", + }), + ensureOutcome: meter.createCounter("studio.sandbox.ensure.outcome", { + description: + "Outcome of each ensure() call: fresh provision, resume from state-store after restart, or adopt of a cluster-side claim mesh didn't know about. Cold-start ratio is the primary input for warm-pool sizing.", + unit: "{call}", + }), + proxyDurationMs: meter.createHistogram("studio.sandbox.proxy.duration_ms", { + description: + "Wall-clock latency of mesh-mediated requests to the sandbox daemon: tool exec proxies (source=daemon) and preview iframe traffic (source=preview).", + unit: "ms", + }), + }; +} + +function loadDefaultKubeConfig(): KubeConfig { + const kc = new KubeConfigClass(); + kc.loadFromDefault(); + return kc; +} + +function isSandboxReady(resource: SandboxResource): boolean { + return Boolean( + resource.status?.conditions?.some( + (c) => c.type === "Ready" && c.status === "True", + ), + ); +} + +function readClaimDaemonToken(claim: SandboxResource): string | null { + const env = claim.spec?.env; + if (!env) return null; + for (const entry of env) { + if (entry.name === "DAEMON_TOKEN" && entry.value) return entry.value; + } + return null; +} + +function readPodName(resource: SandboxResource): string | null { + return ( + resource.metadata?.annotations?.[K8S_CONSTANTS.POD_NAME_ANNOTATION] ?? + resource.metadata?.name ?? + null + ); +} + +function deterministicLocalPort(handle: string, containerPort: number): number { + const hash = createHash("sha256") + .update(`${handle}:${containerPort}`) + .digest(); + return PORT_RANGE_START + (hash.readUInt32BE(0) % PORT_RANGE_SIZE); +} + +// CORS headers on synthesized preview-proxy responses. The studio iframe +// renders under the studio origin and fetches the preview origin cross-site +// (SSE at `/_decopilot_vm/events`, plus the EventSource probeMissing fetch); +// without ACAO the browser blocks the response *and* hides the actual status, +// so a 404 from us looks like an opaque CORS failure in devtools. The daemon +// already sets ACAO on its own responses — these headers only fire on errors +// we synthesize before reaching the daemon. +function jsonResponse(status: number, body: unknown): Response { + return new Response(JSON.stringify(body), { + status, + headers: { + "content-type": "application/json", + "access-control-allow-origin": "*", + }, + }); +} + +// K8s label keys mesh attaches. Centralized so writers (buildTenantLabels) +// and the reader (readClaimTenant) can't drift. +const LABEL_KEYS = { + role: "studio.decocms.com/role", + sandboxHandle: "studio.decocms.com/sandbox-handle", + orgId: "studio.decocms.com/org-id", + userId: "studio.decocms.com/user-id", +} as const; + +// K8s label values: ≤63 chars, must match `(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?`. +// Org/user IDs are UUIDs in mesh and pass through unchanged; the regex check +// + truncation is defensive against future ID-shape changes (the operator will +// reject the claim outright if a label value is invalid). +const LABEL_VALUE_RE = /^([A-Za-z0-9]([-A-Za-z0-9_.]*[A-Za-z0-9])?)?$/; +const MAX_LABEL_VALUE_LEN = 63; + +function sanitizeLabelValue(value: string): string { + const truncated = value.slice(0, MAX_LABEL_VALUE_LEN); + return LABEL_VALUE_RE.test(truncated) ? truncated : ""; +} + +/** + * Tenant labels for `adopt()` recovery + cost attribution. Used on both the + * claim (so `kubectl get sandboxclaim` shows ownership and adopt() can read + * orgId/userId after a state-store wipe) and the pod (where cAdvisor / + * kubelet metrics pick them up). Pass `extra` for pod-only fields like + * `role` and `sandbox-handle`. + */ +function buildTenantLabels( + tenant: EnsureOptions["tenant"], + extra: Record = {}, +): Record { + const labels: Record = { ...extra }; + if (tenant) { + const orgId = sanitizeLabelValue(tenant.orgId); + const userId = sanitizeLabelValue(tenant.userId); + if (orgId) labels[LABEL_KEYS.orgId] = orgId; + if (userId) labels[LABEL_KEYS.userId] = userId; + } + return labels; +} + +/** Read tenant back from a claim's metadata.labels (adopt path). */ +function readClaimTenant(claim: SandboxResource): RunnerTenant | null { + const labels = claim.metadata?.labels; + if (!labels) return null; + const orgId = labels[LABEL_KEYS.orgId]; + const userId = labels[LABEL_KEYS.userId]; + if (!orgId || !userId) return null; + return { orgId, userId }; +} + +/** + * Convert tenant struct to OTel attribute keys. `runner_kind` is constant for + * a given runner instance but included on every attrs set so downstream + * dashboards can pivot across runners (k8s vs docker) without re-aggregating. + */ +function tenantAttrs(tenant: RunnerTenant | null): { + org_id: string; + user_id: string; + runner_kind: string; +} { + return { + org_id: tenant?.orgId ?? "", + user_id: tenant?.userId ?? "", + runner_kind: RUNNER_KIND, + }; +} + +/** + * Subset of `EnsureOptions` worth persisting for resurrection. Drops `image` + * (k8s ignores it — template pins the image) and any nullish entries so the + * persisted blob stays small. + */ +function stripEnsureOpts(opts: EnsureOptions): EnsureOptions | null { + const out: EnsureOptions = {}; + if (opts.repo) out.repo = opts.repo; + if (opts.workload) out.workload = opts.workload; + if (opts.env && Object.keys(opts.env).length > 0) out.env = opts.env; + if (opts.tenant) out.tenant = opts.tenant; + return Object.keys(out).length > 0 ? out : null; +} + +/** Fallback for when callers don't provide `repo.displayName`. */ +function deriveRepoLabel(cloneUrl: string): string { + try { + const u = new URL(cloneUrl); + const trimmed = u.pathname.replace(/^\/+/, "").replace(/\.git$/, ""); + return trimmed || u.hostname; + } catch { + return cloneUrl; + } +} diff --git a/packages/sandbox/server/runner/index.ts b/packages/sandbox/server/runner/index.ts index e9b1281d8a..f822178b37 100644 --- a/packages/sandbox/server/runner/index.ts +++ b/packages/sandbox/server/runner/index.ts @@ -1,7 +1,8 @@ /** * Public surface. Ships `DockerSandboxRunner` only via the default entry; - * Freestyle sits behind its own subpath export (./runner/freestyle) because - * its SDK is heavy and not every deploy needs it. + * Freestyle and agent-sandbox sit behind their own subpath exports (./runner/ + * freestyle, ./runner/agent-sandbox) because their SDKs are heavy and not + * every deploy needs them. */ import { spawnSync } from "node:child_process"; @@ -79,17 +80,22 @@ function isDockerInstalled(): boolean { /** * Rules: - * 1. `MESH_SANDBOX_RUNNER=docker|freestyle` — honored. + * 1. `STUDIO_SANDBOX_RUNNER=docker|freestyle|agent-sandbox` — honored. * 2. No explicit value, `FREESTYLE_API_KEY` set — pick freestyle. * 3. Production w/o explicit value and no freestyle key — null. * 4. Dev w/o explicit value — docker if CLI present, else null. + * + * agent-sandbox is explicit-only: never auto-selected — callers must opt in + * with `STUDIO_SANDBOX_RUNNER=agent-sandbox` so docker-only dev stays the default. */ export function tryResolveRunnerKindFromEnv(): RunnerKind | null { - const raw = process.env.MESH_SANDBOX_RUNNER; - if (raw === "docker" || raw === "freestyle") return raw; + const raw = process.env.STUDIO_SANDBOX_RUNNER; + if (raw === "docker" || raw === "freestyle" || raw === "agent-sandbox") { + return raw; + } if (raw && raw.length > 0) { throw new Error( - `Unknown MESH_SANDBOX_RUNNER="${raw}" — expected "docker" or "freestyle".`, + `Unknown STUDIO_SANDBOX_RUNNER="${raw}" — expected "docker", "freestyle", or "agent-sandbox".`, ); } if (process.env.FREESTYLE_API_KEY) return "freestyle"; @@ -103,12 +109,12 @@ export function resolveRunnerKindFromEnv(): RunnerKind { if (kind) return kind; if (process.env.NODE_ENV === "production") { throw new Error( - `MESH_SANDBOX_RUNNER must be set explicitly in production — ` + - `choose "docker" or "freestyle" (or set FREESTYLE_API_KEY).`, + `STUDIO_SANDBOX_RUNNER must be set explicitly in production — ` + + `choose "docker", "freestyle", or "agent-sandbox" (or set FREESTYLE_API_KEY).`, ); } throw new Error( `No sandbox runner available: Docker CLI not found on PATH. ` + - `Install Docker for local dev, or set MESH_SANDBOX_RUNNER explicitly.`, + `Install Docker for local dev, or set STUDIO_SANDBOX_RUNNER explicitly.`, ); } diff --git a/packages/sandbox/server/runner/types.ts b/packages/sandbox/server/runner/types.ts index 7e6463fd74..4b52e847dd 100644 --- a/packages/sandbox/server/runner/types.ts +++ b/packages/sandbox/server/runner/types.ts @@ -48,6 +48,17 @@ export interface EnsureOptions { workload?: Workload; /** Frozen for the sandbox's lifetime — changing requires recreate. */ env?: Record; + /** + * Tenant identity for cost attribution. Runners MAY surface these as + * platform-native metadata (k8s pod labels, Docker container labels) so + * downstream metrics pipelines can attribute resource usage to the owning + * org/user. Optional — callers without an org context (smoke tests, internal + * tool sandboxes) leave it unset and pods get only platform-level labels. + */ + tenant?: { + orgId: string; + userId: string; + }; } export interface ExecInput { @@ -75,7 +86,7 @@ export interface ProxyRequestInit { * Persisted on `vmMap` and `sandbox_runner_state.runner_kind`. When widening, * keep `VmMapEntry.runnerKind` in sync. */ -export type RunnerKind = "docker" | "freestyle"; +export type RunnerKind = "docker" | "freestyle" | "agent-sandbox"; export interface SandboxRunner { readonly kind: RunnerKind;