diff --git a/.github/workflows/public-testability-macos.yml b/.github/workflows/public-testability-macos.yml
new file mode 100644
index 0000000..1b5728d
--- /dev/null
+++ b/.github/workflows/public-testability-macos.yml
@@ -0,0 +1,45 @@
+name: Public Testability (macOS)
+
+on:
+ push:
+ pull_request:
+
+permissions:
+ contents: read
+
+jobs:
+ verify-macos:
+ runs-on: macos-latest
+
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: go.mod
+
+ - name: Cache Go build and module directories
+ uses: actions/cache@v4
+ with:
+ path: |
+ ~/go/pkg/mod
+ ~/Library/Caches/go-build
+ key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
+ restore-keys: |
+ ${{ runner.os }}-go-
+
+ - name: Install macOS verification prerequisites
+ run: |
+ brew update
+ brew install jq docker docker-compose
+ mkdir -p ~/.docker/cli-plugins
+ ln -sf "$(brew --prefix docker-compose)/bin/docker-compose" ~/.docker/cli-plugins/docker-compose
+ docker compose version
+
+ - name: Build supported beta-track binaries
+ run: make build-supported
+
+ - name: Run supported public verification
+ run: make verify-beta
diff --git a/.github/workflows/public-testability-windows.yml b/.github/workflows/public-testability-windows.yml
new file mode 100644
index 0000000..26392ca
--- /dev/null
+++ b/.github/workflows/public-testability-windows.yml
@@ -0,0 +1,60 @@
+name: Public Testability (Windows)
+
+on:
+ push:
+ pull_request:
+
+permissions:
+ contents: read
+
+jobs:
+ verify-windows-broker:
+ runs-on: windows-latest
+
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: go.mod
+
+ - name: Cache Go build and module directories
+ uses: actions/cache@v4
+ with:
+ path: |
+ ~/go/pkg/mod
+ ~/AppData/Local/go-build
+ key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
+ restore-keys: |
+ ${{ runner.os }}-go-
+
+ - name: Install GNU Make
+ shell: pwsh
+ run: choco install make -y
+
+ - name: Verify make availability
+ shell: bash
+ run: make --version
+
+ - name: Build Windows agent-broker only
+ shell: bash
+ run: make build-broker
+
+ - name: Run nested broker tests (BG-011 still secondary and allowed to fail)
+ id: broker_tests
+ continue-on-error: true
+ shell: bash
+ working-directory: cmd/broker
+ run: go test ./...
+
+ - name: Report nested broker test status
+ if: always()
+ shell: bash
+ run: |
+ if [ "${{ steps.broker_tests.outcome }}" = "success" ]; then
+ echo "Nested cmd/broker tests passed in this run."
+ else
+ echo "BG-011 remains open: nested cmd/broker tests still fail, so the workflow only enforces the documented Windows claim that agent-broker builds on windows-latest."
+ fi
diff --git a/.github/workflows/public-testability.yml b/.github/workflows/public-testability.yml
new file mode 100644
index 0000000..fe21280
--- /dev/null
+++ b/.github/workflows/public-testability.yml
@@ -0,0 +1,32 @@
+name: Public Testability
+
+on:
+ push:
+ pull_request:
+
+permissions:
+ contents: read
+
+jobs:
+ verify:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: go.mod
+
+ - name: Install CGO build prerequisites
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y --no-install-recommends build-essential
+
+ - name: Build supported beta-track binaries
+ run: make build-supported
+
+ - name: Run supported public verification
+ run: ./scripts/verify_beta.sh
diff --git a/.gitignore b/.gitignore
index c73cd06..a603b10 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,8 +1,20 @@
# Build artifacts
bin/
+node_modules/
+extension/out/
orchestratorctl
!cmd/orchestratorctl/
!cmd/orchestratorctl/*.go
+/cmd/broker/agent-broker
+/cmd/broker/agent-broker.exe
+/cmd/codencer-connectord/codencer-connectord
+/cmd/codencer-connectord/codencer-connectord.exe
+/cmd/codencer-relayd/codencer-relayd
+/cmd/codencer-relayd/codencer-relayd.exe
+/cmd/orchestratord/orchestratord
+/cmd/orchestratord/orchestratord.exe
+/cmd/orchestratorctl/orchestratorctl
+/cmd/orchestratorctl/orchestratorctl.exe
*.test
*.out
service.test
diff --git a/AGENTS.md b/AGENTS.md
index 1488535..c1f9b25 100644
--- a/AGENTS.md
+++ b/AGENTS.md
@@ -9,9 +9,10 @@ You are a principal engineer implementing a production-oriented local orchestrat
## General rules
- follow the docs strictly
+- treat current code, tests, and smoke results as release truth when historical docs disagree
- implement one phase at a time
- do not widen scope
-- do not add cloud
+- do not add new cloud product scope; existing self-host cloud control-plane code is in scope for truthful maintenance
- do not skip tests
- do not skip docs when behavior changes
- do not bypass service boundaries
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 90d6d08..65cf5f7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,17 +7,43 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
+## [0.2.0-beta] - 2026-04-23
+
### Added
+- Self-hostable v2 relay path with:
+ - stable daemon instance identity and manifest-backed discovery
+ - outbound authenticated connector sessions with explicit shared-instance allowlists
+ - self-host relay planner API, enrollment token flow, audit persistence, and relay-side MCP tools
+- Cloud control-plane self-host surface with `codencer-cloudd`, `codencer-cloudctl`, and `codencer-cloudworkerd`, plus bootstrap/status/org/workspace/project/token/install/event/audit flows.
+- Cloud installation enable/disable routes and matching `cloudctl install enable|disable` subcommands.
+- Truthful cloud docs and smoke guidance for the bootstrap and control-plane path.
- **OpenClaw (acpx) Adapter**: ๐งช Experimental support for OpenClaw-compatible executors via the standardized ACP bridge.
- Official sequential wrapper examples for bash/zsh, PowerShell, and Python under `examples/automation/`.
- Wrapper-friendly sample task lists and prompt/task inputs for ordered execution.
- New `scripts/smoke_test_v1.sh` for verifying all 6 primary submission modes.
+- Public beta tester guide in `docs/BETA_TESTING.md` with exact local, relay, cloud, planner/client, and provider test-track entrypoints.
+- `make build-supported`, `make verify-beta`, and `make verify-beta-docker` as explicit repo-level verification entrypoints for the supported tracks.
+- Planner-client walkthroughs for ChatGPT, Claude Desktop plus `claude.ai`, and Gemini CLI under `docs/mcp/integrations/`.
+- Per-platform setup walkthroughs for macOS, Windows plus `agent-broker`, WSL, and remote VPS dev-server layouts.
+- Consolidated operator boundary reference in `docs/KNOWN_LIMITATIONS.md`.
+- Operator-facing beta launch notes in `docs/RELEASE_NOTES_v0.2.0-beta.md`.
+- Cross-platform public-testability CI on `macos-latest` and `windows-latest`.
### Changed
-- **Unified v1 Documentation Truth-Pass**: Cleaned and synchronized all public-facing docs (README, AI Guide, Runbook, Automation) for 100% alignment with the CLI contract.
+- Adopted `v0.2.0-beta` as the truthful build/version string for the current v2 local/self-host beta repo state.
+- Rewrote operator-facing v2 docs to match the implemented local/self-host path and current runtime truth.
+- Clarified that the relay is the public remote HTTP/MCP surface and the daemon-local `/mcp/call` endpoint is only a local compatibility/admin surface.
+- Documented current self-host beta boundaries explicitly: best-effort abort, bounded artifact transport, static-token auth, and relay routing that now probes only authorized online shared instances before failing closed.
+- Removed duplicate public connector/relay binary surfaces in favor of the canonical `codencer-connectord` and `codencer-relayd` entrypoints.
+- Tightened abort reporting so Codencer only reports success when the active step really reaches `cancelled`.
+- Removed committed extension dependency/build output directories and kept only the extension manifests plus source.
+- Added relay admin/status routes, connector local status snapshots, and a practical self-host smoke flow for daily operator use.
+- **Unified Documentation Truth-Pass**: Cleaned and synchronized current public-facing docs (README, AI Guide, Runbook, Automation) for alignment with the implemented CLI and relay surfaces.
- Expanded automation documentation to make the shell-planner story explicit and machine-oriented.
- Clarified that ordered task execution in v1 is wrapper-based and not a native workflow engine.
- Hardened smoke/example guidance around strict JSON parsing and machine-safe CLI usage.
+- Clarified the public test-track boundaries so local, relay/runtime, cloud, planner/client, and provider testing route to the right docs without mixing surfaces.
+- Parameterized the Docker cloud image build version through the compose environment instead of hardcoding it only inside the Dockerfile.
## [0.1.0-beta] - 2026-03-28
@@ -49,3 +75,4 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
---
[0.1.0-beta]: https://github.com/lookmanrays/codencer/releases/tag/v0.1.0-beta
+[0.2.0-beta]: https://github.com/lookmanrays/codencer/releases/tag/v0.2.0-beta
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index f5f2e2c..c9e6fe3 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,6 +1,6 @@
# Contributing to Codencer
-Thank you for your interest in contributing to Codencer! As a **Public Beta (v0.1.0-beta)** project, we are actively looking for feedback on our orchestration protocols, CLI ergonomics, and adapter reliability.
+Thank you for your interest in contributing to Codencer! As an **open-source beta (`v0.2.0-beta`)** project, we are actively looking for feedback on the current local/self-host execution path, CLI ergonomics, and adapter reliability.
## ๐ The Relay Philosophy
Before contributing, please remember that Codencer is a **Defensive Relay**, not a "Brain." We prioritize:
diff --git a/Makefile b/Makefile
index 645826e..8dd24fe 100644
--- a/Makefile
+++ b/Makefile
@@ -1,10 +1,39 @@
+VERSION ?= v0.2.0-beta
+LDFLAGS := -X agent-bridge/internal/app.Version=$(VERSION)
+
all: lint test build
+build-supported: build build-cloud build-mcp-sdk-smoke
+
build:
+ @mkdir -p bin
@echo "==> Building orchestratord..."
- @go build -ldflags "-X agent-bridge/internal/app.Version=v1.0-release-candidate" -o bin/orchestratord ./cmd/orchestratord
+ @go build -ldflags "$(LDFLAGS)" -o bin/orchestratord ./cmd/orchestratord
@echo "==> Building orchestratorctl..."
- @go build -ldflags "-X agent-bridge/internal/app.Version=v1.0-release-candidate" -o bin/orchestratorctl ./cmd/orchestratorctl
+ @go build -ldflags "$(LDFLAGS)" -o bin/orchestratorctl ./cmd/orchestratorctl
+ @echo "==> Building codencer-connectord..."
+ @go build -ldflags "$(LDFLAGS)" -o bin/codencer-connectord ./cmd/codencer-connectord
+ @echo "==> Building codencer-relayd..."
+ @go build -ldflags "$(LDFLAGS)" -o bin/codencer-relayd ./cmd/codencer-relayd
+
+build-cloud:
+ @mkdir -p bin
+ @echo "==> Building codencer-cloudctl..."
+ @go build -ldflags "$(LDFLAGS)" -o bin/codencer-cloudctl ./cmd/codencer-cloudctl
+ @echo "==> Building codencer-cloudd..."
+ @go build -ldflags "$(LDFLAGS)" -o bin/codencer-cloudd ./cmd/codencer-cloudd
+ @echo "==> Building codencer-cloudworkerd..."
+ @go build -ldflags "$(LDFLAGS)" -o bin/codencer-cloudworkerd ./cmd/codencer-cloudworkerd
+
+build-broker:
+ @mkdir -p bin
+ @echo "==> Building agent-broker (nested module)..."
+ @cd cmd/broker && go build -o ../../bin/agent-broker ./...
+
+build-mcp-sdk-smoke:
+ @mkdir -p bin
+ @echo "==> Building mcp-sdk-smoke (official MCP SDK proof helper)..."
+ @go build -o bin/mcp-sdk-smoke ./cmd/mcp-sdk-smoke
test:
@echo "==> Running tests..."
@@ -116,6 +145,38 @@ smoke: build
@echo "==> Running automated smoke test..."
@./scripts/smoke_test.sh
+self-host-smoke: build
+ @echo "==> Running self-host relay/connector smoke test..."
+ @./scripts/self_host_smoke.sh
+
+self-host-smoke-all: build build-mcp-sdk-smoke
+ @echo "==> Running self-host relay/connector smoke test with all optional scenarios..."
+ @SMOKE_SCENARIOS=all ./scripts/self_host_smoke.sh
+
+self-host-smoke-mcp: build build-mcp-sdk-smoke
+ @echo "==> Running self-host relay/connector smoke test with MCP coverage..."
+ @SMOKE_SCENARIOS=status,audit,mcp,mcp-sdk ./scripts/self_host_smoke.sh
+
+cloud-smoke: build-cloud
+ @echo "==> Running cloud control-plane smoke test..."
+ @./scripts/cloud_smoke.sh
+
+cloud-stack-config:
+ @ENV_FILE=deploy/cloud/.env; \
+ if [ ! -f "$$ENV_FILE" ]; then ENV_FILE=deploy/cloud/.env.example; fi; \
+ echo "==> Validating docker compose cloud stack with $$ENV_FILE..."; \
+ docker compose --env-file "$$ENV_FILE" -f deploy/cloud/docker-compose.yml config > /dev/null
+
+cloud-stack-smoke:
+ @echo "==> Running docker-compose cloud stack smoke test..."
+ @./deploy/cloud/smoke.sh
+
+verify-beta: build-supported
+ @./scripts/verify_beta.sh
+
+verify-beta-docker: build-supported
+ @./scripts/verify_beta.sh --docker
+
validate: build
@echo "==> Running Codex validation scenario (Internal Version Bump)..."
@./bin/orchestratorctl run start validation-run-01 validation-project || true
diff --git a/README.md b/README.md
index 921826d..48fe1dc 100644
--- a/README.md
+++ b/README.md
@@ -1,41 +1,177 @@
-# Codencer: The Tactical Orchestration Bridge
+
-Codencer is a tactical orchestration bridge that manages execution, isolation, and high-fidelity audit trails for coding agents. It serves as the **system of record** between a high-level **Planner** (human or LLM) and tactical **Coding Agents** (Codex, Claude).
+# Codencer: The Planner-to-Executor Bridge
-Designed for **local-first, self-hosted developer toolchains**, Codencer provides the missing "relay" layer that ensures every task attempt is isolated, provisioned, and validated before it ever reaches your production branch.
+Codencer is a planner-to-executor bridge (Tactical Orchestration Bridge) for coding work that needs an honest system of record.
+Its core visible asset is the persisted execution trail: runs, steps, attempts, artifacts, validations, and gates.
+
+Instead of planning for the operator, Codencer keeps execution local, isolates each attempt, records the state machine truth, and exposes the evidence a planner or human needs to decide what happens next.
> [!IMPORTANT]
-> **Project Status: Public Beta (v1.0-release-candidate)**.
-> Codencer is a hardened, production-oriented local orchestration bridge. While the core engine is stable and has been verified through rigorous internal audit paths, the API and protocols are finalized for the v1.0 release.
+> **Project Status: Open-source beta for the v2 local/self-host path (`v0.2.0-beta`)**.
+> Codencer is publicly testable for the supported local, relay/runtime, cloud, planner/client, and provider tracks documented in [docs/BETA_TESTING.md](docs/BETA_TESTING.md). Compatibility-only and deferred surfaces remain explicitly outside that beta promise.
+
+## Supported Beta Test Tracks
+
+Use [docs/BETA_TESTING.md](docs/BETA_TESTING.md) as the repo-level tester guide. The quick chooser is:
+
+| Track | Start doc | Build | Proof command | Current boundary |
+| --- | --- | --- | --- | --- |
+| Local-only daemon + CLI | [docs/SETUP.md](docs/SETUP.md) | `make build` | `./scripts/smoke_test_v1.sh` then `make smoke` | Canonical local proof is simulation-first; live adapter claims stay narrow. |
+| Self-host relay / runtime | [docs/SELF_HOST_REFERENCE.md](docs/SELF_HOST_REFERENCE.md) | `make build` | `PLANNER_TOKEN= make self-host-smoke-mcp` | Canonical remote self-host path. |
+| Self-host cloud control plane | [docs/CLOUD_SELF_HOST.md](docs/CLOUD_SELF_HOST.md) | `make build-cloud` | `make cloud-smoke` | Docker baseline and binary-native composed proof are separate. |
+| Planner / client integrations | [docs/mcp/integrations.md](docs/mcp/integrations.md) | `make build build-cloud build-mcp-sdk-smoke` | self-host or cloud smoke with MCP/SDK enabled | ChatGPT-style and Claude-style product paths stay compatibility-only. |
+| Provider connectors | [docs/CLOUD_CONNECTORS.md](docs/CLOUD_CONNECTORS.md) | `make build-cloud` | `make cloud-smoke` plus provider tests | Slack is strongest; Jira is polling-first; the rest remain narrower. |
+
+For a supported non-Docker repo pass:
+
+```bash
+make build-supported
+make verify-beta
+```
+
+For the Docker-backed cloud baseline on a Docker-capable host:
+
+```bash
+make verify-beta-docker
+```
---
## ๐ The Bridge Doctrine
-Codencer is a **Tactical Orchestration Bridge**, not a strategic planner. It handles the **Execution Layer** (isolation, provisioning, monitoring, and evidence) while the **Brain Layer** (human or LLM) handles strategy and decision-making.
+Codencer keeps the planner-to-executor bridge boundary explicit:
+
+- the planner decides what to do
+- the executor does the work
+- Codencer owns the persistent execution truth between them
+
+The state-of-record model is the product surface:
+
+| Record | What it answers |
+| --- | --- |
+| Run | What larger unit of work is being tracked? |
+| Step | What concrete task was submitted? |
+| Attempt | What isolated execution instance tried to satisfy that step? |
+| Artifact | What file, diff, prompt, or result payload was produced? |
+| Validation | What test, lint, or policy check ran, and what happened? |
+| Gate | Where does an operator need to approve, reject, or intervene? |
-- **What it is**: A system of record, a workspace isolator, a validator, and a provider of immutable artifacts.
-- **What it is not**: A planner, a chat UI, a cloud service, or an AI "agent" that thinks about what to do next.
+- **What it is**: A local control plane, state machine, validator, and evidence store for coding execution.
+- **What it is not**: A planner, a chat UI, a workflow brain, or a generic remote shell surface.
```text
-[ Planner (Brain) ] <---------- (ResultSpec) ---------+
- | |
- (TaskSpec) [ Bridge (Codencer) ]
- | |
- +-------------------> [ Agent (Worker) ] <-----+
- (File Edits)
+[ Planner ] ---- submit task ----> [ Codencer ] ---- execute ----> [ Adapter / Executor ]
+ ^ |
+ | v
+ +----- results, artifacts, validations, gates ---- persisted state of record
```
### Core Roles
-- **Planner (Brain)**: You, a Chat UI, or an agentic planner. Decides **what** to do.
-- **Bridge (Codencer)**: Receives the `TaskSpec`, manages workspace isolation (Git Worktrees), enforces policies, and monitors execution.
-- **Coding Agent (Worker)**: The tactical tool performing the actual work (e.g., `codex-agent`, `claude`).
+- **Planner**: Human, script, chat UI, or remote client that decides what to do next.
+- **Codencer**: Accepts the task, creates or locates the run and step, provisions the attempt workspace, enforces the runtime contract, and persists evidence.
+- **Adapter / Executor**: Local execution path such as `codex`, `claude`, `qwen`, `antigravity*`, or `openclaw-acpx`.
+
+## V2 Remote Path
+
+The v2 path keeps the same control-plane split while adding a self-hostable remote surface:
+
+```text
+Planner / Chat
+ -> Relay MCP / Planner API
+ -> Relay Server
+ -> Authenticated Connector (outbound websocket)
+ -> Local Codencer Daemon
+ -> Local Adapter / Executor
+```
+
+Key constraints remain unchanged:
+- planning stays outside Codencer
+- execution stays local
+- the relay is transport and audit, not a planner
+- the connector exposes only a narrow allowlisted proxy to the local daemon
+- no raw remote shell or arbitrary filesystem surface is exposed
+
+### New Binaries
+
+- `bin/codencer-connectord`: enroll with a relay and maintain the outbound authenticated connector session
+- `bin/codencer-relayd`: run the self-hostable relay server, planner-facing API, connector websocket endpoint, and relay-side MCP surface
+- `bin/codencer-cloudctl`: admin CLI for cloud bootstrap, status, org/workspace/project, token, installation, runtime-connector, runtime-instance, event, and audit flows
+- `bin/codencer-cloudd`: cloud control-plane server; can optionally start an internal relay runtime bridge for tenant-scoped Codencer runtime control
+- `bin/codencer-cloudworkerd`: cloud worker for background connector maintenance; Jira is polling-first in the current beta track
+- `bin/agent-broker`: build separately with `make build-broker` when you need the Windows-side agent-broker; it lives under the nested `cmd/broker` module
+
+### Self-Host Quickstart
+
+1. Build the main binaries with `make build`.
+2. Build the Windows-side `agent-broker` separately with `make build-broker` if you need the Windows bridge.
+3. Create a relay config and local planner token:
+ `./bin/codencer-relayd planner-token create --config .codencer/relay/config.json --write-config --name operator --scope '*'`
+4. Start the relay:
+ `./bin/codencer-relayd --config .codencer/relay/config.json`
+5. Start the local daemon near the repo with `make start` or `make start-sim`.
+6. Mint a one-time enrollment token from the running relay:
+ `./bin/codencer-relayd enrollment-token create --config .codencer/relay/config.json --label local-dev --json`
+7. Enroll and run the connector in WSL/Linux next to the daemon:
+ `./bin/codencer-connectord enroll --relay-url http://127.0.0.1:8090 --daemon-url http://127.0.0.1:8085 --enrollment-token `
+ `./bin/codencer-connectord run`
+8. Inspect and control sharing explicitly:
+ `./bin/codencer-connectord discover --config .codencer/connector/config.json`
+ `./bin/codencer-connectord list`
+ `./bin/codencer-connectord share --daemon-url http://127.0.0.1:8085`
+ `./bin/codencer-connectord unshare --instance-id `
+ `./bin/codencer-connectord config`
+9. Inspect relay status, connectors, and advertised instances:
+ `./bin/codencer-relayd status --config .codencer/relay/config.json`
+ `./bin/codencer-relayd connectors --config .codencer/relay/config.json`
+ `./bin/codencer-relayd instances --config .codencer/relay/config.json`
+ `./bin/codencer-relayd audit --config .codencer/relay/config.json --limit 20`
+10. Run the documented smoke path with `make self-host-smoke`, `make self-host-smoke-mcp`, or `make self-host-smoke-all` once the daemon and relay are already running. `self-host-smoke-mcp` includes the official MCP SDK proof helper; `self-host-smoke-all` adds share-control and multi-instance coverage.
+
+Planner-facing relay routes live under `/api/v2`, and the relay-hosted MCP entrypoint is `/mcp` with `/mcp/call` kept as a compatibility path.
+The connector now persists a local Ed25519 identity, `connector_id`, `machine_id`, and an explicit shared-instance allowlist under `.codencer/connector/config.json`.
+The connector also persists a local `.codencer/connector/status.json` snapshot so operators can inspect session state, last heartbeat, and the currently shared instance set without contacting the relay.
+Direct relay lookups for steps, artifacts, and gates now probe only authorized online instances and persist the discovered route, so planner HTTP and MCP flows do not depend on prior observation of those IDs.
+Planner evidence retrieval through the relay now covers result, validations, logs, artifact lists, and artifact content.
+For the end-to-end self-host flow and operating notes, see [docs/SELF_HOST_REFERENCE.md](docs/SELF_HOST_REFERENCE.md), [docs/CONNECTOR.md](docs/CONNECTOR.md), [docs/RELAY.md](docs/RELAY.md), and [docs/mcp/relay_tools.md](docs/mcp/relay_tools.md).
+
+Daemon discovery and evidence notes:
+- `GET /api/v1/instance` now exposes stable repo-local daemon identity plus manifest-backed discovery metadata.
+- The daemon writes a repo-local instance manifest under `.codencer/instance.json` on startup and after Antigravity bind changes.
+- `PATCH /api/v1/runs/{id}` remains best-effort abort. It returns success only when the active step actually reaches `cancelled`; otherwise Codencer leaves an explicit non-cancelled outcome and returns an error instead of claiming a hard kill.
+
+### Cloud Control Plane (Beta Track)
+
+The cloud surface is subordinate to the core bridge model. It adds tenancy, control-plane state, and provider-installation operations; it does not replace the local daemon, the relay bridge, or the run/step/attempt evidence path.
+
+- Build the cloud binaries with `make build-cloud`.
+- Start the cloud server with `./bin/codencer-cloudd --config .codencer/cloud/config.json`.
+- Start it with `--relay-config` when you want cloud to claim and control Codencer runtime connectors and shared instances through the internal relay bridge.
+- Use `./bin/codencer-cloudctl bootstrap` to seed org, workspace, project, membership, and API token state directly in the cloud store.
+- Use `./bin/codencer-cloudctl status|orgs|workspaces|projects|memberships|tokens|install|runtime-connectors|runtime-instances|events|audit` for remote control-plane operations.
+- Run `./bin/codencer-cloudworkerd` only when you have connector installations that need background polling. Jira is polling-first and requires `config.jql` or `config.project_key`; webhook ingest remains deferred in the current beta track.
+- When cloud is running with the relay bridge, the cloud-scoped remote surface is:
+ - HTTP under `/api/cloud/v1/runtime/*`
+ - MCP under `/api/cloud/v1/mcp` with `/api/cloud/v1/mcp/call` kept as a compatibility alias
+- Relay `/mcp` remains the self-host relay MCP surface. Cloud `/api/cloud/v1/mcp` is the tenant-scoped cloud contract.
+- A Docker-based self-host baseline now lives under `deploy/cloud/` and can be smoke-checked with `make cloud-stack-smoke`.
+
+For the cloud docs and status matrix, see [docs/CLOUD.md](docs/CLOUD.md), [docs/CLOUD_SELF_HOST.md](docs/CLOUD_SELF_HOST.md), and [docs/CLOUD_CONNECTORS.md](docs/CLOUD_CONNECTORS.md).
---
## ๐ The Canonical "Day 0" Path (Human-First)
-The standard sequence for performing an audited tactical task:
+The shortest honest path is:
+
+1. Clone a real git checkout and build the binaries.
+2. Start the daemon in simulation or real mode.
+3. Start a run.
+4. Submit a step.
+5. Wait for the step result.
+6. Inspect artifacts, validations, and gates before deciding what happens next.
+
+Concrete sequence:
1. **Clone & Build**: `git clone` the repo โ `make setup build`.
2. **Start the Bridge**: `make start-sim` (for testing) or `make start` (for real agents).
@@ -56,11 +192,11 @@ The standard sequence for performing an audited tactical task:
### Core Guarantees
-- **Step-Isolation**: Each step executes in its own git worktree, preventing cross-task interference.
+- **Step Isolation**: Each step executes in its own git worktree, preventing cross-task interference.
- **Immutable Evidence**: All logs, results, and artifacts are namespaced by Run, Step, and Attempt ID under `.codencer/artifacts////`, ensuring full auditability of repeated attempts.
-- **Workspace Provisioning**: Automatically prepares attempt worktree environments (copying `.env`, symlinking `node_modules`, running `post_create` hooks). Codencer includes an **optional Grove-compatible subset** for environment preparation; it does not depend on the Grove CLI and is designed to coexist with existing `.groverc.json` or `grove.yaml` files.
- - *Inspiration*: This layer was inspired in part by [Grove](https://github.com/verbaux/grove).
- - *Thanks*: Special thanks to [@verbaux](https://github.com/verbaux) for the conceptual foundation of local workspace preparation.
+- **Workspace Provisioning**: Codencer prepares attempt worktrees by copying configured files, wiring allowed symlinks, and running optional hooks.
+- **Deterministic Submission Normalization**: Direct input is normalized into a canonical `TaskSpec`, and both the original input and normalized task are preserved as attempt artifacts.
+- **Operator Control**: Gates, retries, approvals, and best-effort abort outcomes are explicit state transitions rather than hidden side effects.
> **Execution Path Note**: Codencer depends on Git Worktrees for isolating task attempts. Therefore, cloning the repository via `git clone` is the **only supported execution path**. Downloading a ZIP source archive will fail during targeted execution.
@@ -68,7 +204,7 @@ The standard sequence for performing an audited tactical task:
## โก๏ธ Quickstart: Local Setup
-Get up and running in simulation mode to verify the orchestrator logic.
+Use this path when you want the canonical local beta flow and its evidence surfaces.
### 1. Build & Setup
```bash
@@ -92,10 +228,12 @@ make start
For Claude, Codencer invokes the installed CLI as `claude -p --output-format json`, sends the step prompt on `stdin`, and runs from the isolated attempt workspace as the process `cwd`.
-Current support level for the Claude adapter is **Supported (Beta)**: the wrapper contract is implemented and covered by prompt, normalization, lifecycle, fake-binary integration, and simulation conformance tests, but the repo test suite does not run a live authenticated Claude service call.
+The Claude adapter wrapper path is implemented and test-covered in this repo: prompt shaping, normalization, lifecycle behavior, fake-binary integration, and simulation conformance are exercised, but the repo test suite does not run a live authenticated Claude service call. Treat `/api/v1/compatibility` plus your actual runtime environment as the source of truth for local adapter readiness.
+
+`/api/v1/compatibility` is a runtime diagnostic surface, not a beta-support certificate. It reports current binary availability, simulation mode, and local binding state; it does not promote an adapter into the beta promise by itself.
### 3. Run Your First Tactical Task
-Submit a task and wait for the bridge to report results. For the full auditing sequence, see the **[Canonical Local Runbook](docs/EXAMPLES.md)**.
+Submit a task, wait for the step, then inspect the recorded result. For the full local operator sequence, see the **[Canonical Local Runbook](docs/EXAMPLES.md)**.
```bash
# 1. Start a new mission (System of Record)
@@ -112,6 +250,14 @@ Submit a task and wait for the bridge to report results. For the full auditing s
./bin/orchestratorctl step result
```
+For the repo-proven legacy same-run local parity path, run the six-input smoke directly. If no daemon is already reachable, the script auto-starts a temporary simulation daemon and exercises the current local wait/result contract end to end:
+
+```bash
+./scripts/smoke_test_v1.sh
+./scripts/smoke_test_v1.sh
+make smoke
+```
+
### 3.2 Standard Submission Flows
Codencer supports both structured and convenience input via terminal:
@@ -120,8 +266,8 @@ Codencer supports both structured and convenience input via terminal:
Ideal for large, human-readable prompts without creating a file:
```bash
cat <<'EOF' | ./bin/orchestratorctl submit run-01 --stdin --title "Fix Lints" --adapter codex --wait --json
-Fix all lint errors in the internal/app package.
-Exclude the test files.
+Fix all lint errors in the internal/app package.
+Exclude the test files.
Use the 'go-lint' tool.
EOF
```
@@ -133,7 +279,7 @@ echo '{"version":"v1","goal":"Update README"}' | ./bin/orchestratorctl submit ru
```
#### C. Broker-Backed Execution
-Directly target an IDE-bound agent via the Antigravity Broker using direct input:
+Directly target an IDE-bound agent via the agent-broker bridge using direct input:
```bash
./bin/orchestratorctl submit run-01 --goal "Check UI" --adapter antigravity-broker --wait --json
```
@@ -151,11 +297,11 @@ Relay tasks to an OpenClaw-compatible executor via the standardized ACP bridge.
## ๐ The Audit Trail (Authoritative Evidence)
-Codencer ensures that every tactical execution is backed by high-fidelity evidence. Follow the **Canonical Sequence** in `EXAMPLES.md` to audit your task:
+The fastest way to understand Codencer is to inspect the evidence surfaces in order:
-1. **Authoritative Summary**: `step result ` (Start here).
-2. **Raw Execution Trail**: `step logs ` (The agent's brain).
-3. **Audit Evidence**: `step artifacts ` and `step validations ` (The proof).
+1. **Authoritative Summary**: `step result ` shows the persisted run/step outcome.
+2. **Raw Execution Trail**: `step logs ` shows the executor output captured for that step attempt.
+3. **Audit Evidence**: `step artifacts ` and `step validations ` show the files and checks Codencer recorded.
- **`completed`**: Goal met, all tests passed.
- **`completed_with_warnings`**: Success, but with non-critical issues (lint/tests).
@@ -183,7 +329,7 @@ For Claude attempts specifically, the standard evidence set is:
Codencer supports two submit styles:
1. **Canonical TaskSpec**: submit a full YAML or JSON task definition when you need rich structure.
-2. **Direct convenience input**: submit a prompt/goal directly and let the CLI deterministically normalize it into `TaskSpec`.
+2. **Direct convenience input**: submit a prompt or goal directly and let the CLI deterministically normalize it into `TaskSpec`.
Direct input is intentionally narrow. It does not plan, decompose work, merge multiple sources, or invent strategy.
@@ -237,7 +383,7 @@ Official wrapper examples live in [examples/automation](examples/automation):
- [run_tasks.ps1](examples/automation/run_tasks.ps1)
- [run_tasks.py](examples/automation/run_tasks.py)
-This keeps Codencer sharp and narrow as a bridge rather than a workflow brain.
+This keeps Codencer narrow as a bridge rather than a workflow brain.
For a deeper dive into agent installation and advanced configuration, see the **[Environmental Reference Guide](docs/SETUP.md)**.
@@ -245,54 +391,48 @@ For a deeper dive into agent installation and advanced configuration, see the **
## ๐ก Why Codencer?
-Agent-driven coding is non-deterministic. Codencer provides the guardrails:
+Codencer exists to keep tactical coding work inspectable and recoverable:
1. **Workspace Safety**: Agents run in isolated Git Worktrees. Diffs are captured and validated before any commit.
2. **Audit-Proof Ledger**: Every attempt is recorded in a local SQLite database (embedded via CGO) with SHA-256 hashes of all artifacts.
-3. **Idempotency**: Interrupted tasks can be resumed or securely analyzed post-crash.
-4. **Validation-First**: Tasks only "complete" when your defined validation commands (tests, linters) pass.
+3. **Idempotent Recovery Posture**: Interrupted tasks can be retried or analyzed from persisted run, step, and attempt state.
+4. **Validation-First Outcomes**: Tasks only complete when the recorded validation contract passes.
+5. **Remote Control Without Remote Execution**: Relay and cloud surfaces expose narrow planner APIs while execution and artifacts remain on the daemon side.
---
-## โ ๏ธ Known Limitations (Beta/MVP)
+## โ ๏ธ Known Limitations (Public Beta)
-As a local-first Beta/MVP, Codencer has the following constraints:
-- **Relay Only**: The bridge does not "think" or plan; it only executes what the Planner instructs.
-- **Single-User**: Designed for local development; no multi-user or cloud concurrency.
-- **Static Extension Routing**: The experimental VS Code extension assumes the daemon binds at `127.0.0.1:8085`. Dynamic connection configuration for running instances on multiple ports is not yet natively surfaced in the IDE client.
-- **Agent Dependency**: "Real Mode" efficacy is strictly bound to the quality of the underlying agent (Codex, Claude, etc.).
-- **Manual Decisions**: The bridge reports terminal states; all recovery or retry decisions remain with the human operator or external planner.
-- **No Native Workflow Engine**: Ordered task lists are handled by wrappers/scripts outside Codencer core in v1.
+Codencer is beta-track ready within the documented surfaces, but the repo keeps several boundaries explicit:
+- **No Planner In Core**: Codencer never decomposes, prioritizes, or decides strategy. The planner still owns those decisions.
+- **Best-Effort Abort**: `PATCH /api/v1/runs/{id}` and relay abort flows are honest but not universal hard-kill guarantees. A run is only reported cancelled when the adapter actually stops.
+- **Opportunistic Remote Routing**: Relay step, gate, and artifact routing still learns and persists route hints opportunistically, but direct remote lookups also probe authorized online shared instances before failing closed.
+- **Bounded Artifact Transport**: Connector transport rejects oversized artifact bodies instead of turning the relay into a bulk file tunnel. Large binary transfer is intentionally limited.
+- **Static Self-Host Auth**: Planner auth is static bearer-token based, suitable for narrow self-host beta use but not enterprise IAM.
+- **Ordered Execution Is Wrapper-Based**: Sequential workflows remain wrapper- or planner-driven outside Codencer core.
----
+For the consolidated operator-facing boundary list, see [docs/KNOWN_LIMITATIONS.md](docs/KNOWN_LIMITATIONS.md).
-### ๐ Maturity & Capability Matrix
+### Runtime Capability Truth
-Codencer is in **Public Beta (v1.0-release-candidate)**. Use this to understand the hard stability contract versus experimental value-adds.
+Adapter availability is runtime-derived, not a hardcoded support matrix. The source of truth is:
+- `GET /api/v1/compatibility`
+- `GET /api/v1/instance`
+- `./bin/orchestratorctl instance --json`
-#### ๐ v1 Stable Core (The Release Contract)
-| Feature Area | Status | Description |
-| :--- | :--- | :--- |
-| **Local Bridge Core** | โ
**Stable** | Persistence, state machine, Git Worktrees. |
-| **Provisioning Layer**| โ
**Stable** | Native copy/symlink layer; optional Grove subset. |
-| **Codex Adapter** | โ
**Stable** | Primary high-fidelity relay for `codex-agent`. |
-| **Antigravity Metadata** | โ
**Stable** | Broker-backed context, task IDs, and provenance. |
-| **Antigravity Broker** | โ
**Stable** | Cross-side (WSL/Windows) bridge for IDE instances. |
-| **OpenClaw ACPX** | ๐งช **Experimental (Alpha)** | Standardized ACP bridge to OpenClaw ecosystem. |
-| **Simulation Mode** | โ
**Stable** | Stub-based validation (Bridge-only smoke tests). |
-
-#### ๐งช Experimental Extensions (Outside v1 Contract)
-| Feature Area | Status | Description |
-| :--- | :--- | :--- |
-| **IDE Chat Bridge** | ๐งช **Prototype** | Proxy-mediated file access via VS Code. |
-| **Cloud / Multi-User** | ๐ซ **Non-Goal** | Codencer is strictly local-first and self-hosted. |
+Those surfaces reflect actual registered adapters, simulation mode, binary availability, and Antigravity binding state at runtime.
+
+### WSL / Windows / Antigravity
+
+The practical cross-side model is:
+- daemon, repos, worktrees, and artifacts in WSL/Linux
+- connector on the same side as the daemon by default
+- agent-broker and IDE on Windows when needed
+- relay as a separate remote control plane only
+Use `orchestratorctl antigravity bind ` to bind this repo to an active Antigravity instance. Binding selects the repo-scoped target, but execution still stays local and still depends on the chosen adapter profile.
-### ๐ Direct-Local Antigravity Integration
-The `antigravity` adapter uses a **direct-local** model to control active Antigravity instances via RPC (Connect over HTTPS).
-- **Primary Model**: Codencer and Antigravity usually run on the **same OS side** (e.g., both in Linux or both in Windows).
-- **WSL โ Windows (Experimental)**: Cross-side communication is supported via the shared loopback (`127.0.0.1`). Codencer in WSL can discover Windows-side instances if the host's `.gemini` directory is reachable (e.g., via `/mnt/c`).
-- **Binding**: Use `orchestratorctl antigravity bind ` to link this repository to an active Antigravity process. Binding establishes repo-scoped target identity and connectivity; execution still depends on the task's explicit `adapter_profile`.
+For the full trust-boundary and topology guidance, see [docs/WSL_WINDOWS_ANTIGRAVITY.md](docs/WSL_WINDOWS_ANTIGRAVITY.md).
### ๐ Terminal Step States
Codencer distinguishes between different failure modes to help you recover faster:
@@ -308,22 +448,37 @@ Codencer distinguishes between different failure modes to help you recover faste
## ๐งช Simulation vs. Real Execution
-1. **Simulation Mode** (`make start-sim`): Only validates the **Orchestrator**. It tests if the ledger, state machine, and CLI are working. It does **not** test if the agent can actually code.
-2. **Real Mode**: Tests the full end-to-end loop with real agents. **Codex-agent** is the primary supported path; others are in early beta.
+1. **Simulation Mode** (`make start-sim`): Validates the daemon, state machine, CLI, and evidence path without requiring a live executor.
+2. **Real Mode**: Exercises the end-to-end loop with real agents. `codex` is the primary intended local beta adapter, but the checked-in repo proof remains simulation-heavy; other adapters stay narrower unless your runtime proves them ready.
+
+Current local adapter proof is intentionally narrow:
+- `codex`: primary intended local beta adapter, but current repo proof is still simulation-heavy rather than live-binary proven.
+- `claude`: strongest adapter-specific wrapper proof in repo, but still fake-binary and non-authenticated.
+- `qwen`: simulation/conformance proof only; kept as secondary.
+- `antigravity` and `antigravity-broker`: secondary, environment-specific proof only.
+- `openclaw-acpx` and `ide-chat`: experimental/deferred, not part of the local beta promise.
+- daemon-local `/mcp/call`: compatibility/admin bridge only, not the public planner MCP contract.
---
## ๐ Documentation
-Review the following guides to get started with Codencer.
+Use the following docs to choose the right supported surface quickly.
### โก๏ธ User Guidance (Start Here)
+- **[Public Beta Test Tracks](docs/BETA_TESTING.md)** โ Fastest way to choose the right supported test lane and command set.
+- **[Environmental Reference](docs/SETUP.md)** โ Setup hub, platform chooser, native Linux guidance, and track routing.
+- **[Known Limitations](docs/KNOWN_LIMITATIONS.md)** โ Consolidated operator-grade beta boundaries from current repo truth.
- **[Operator Runbook](docs/OPERATOR_RUNBOOK.md)** โ The canonical "Day 0" flow for humans.
- **[AI Operator Guide](docs/AI_OPERATOR_GUIDE.md)** โ Canonical rules for AI planners and assistants.
- **[CLI Automation Patterns](docs/CLI_AUTOMATION.md)** โ Machine-safe JSON mode and sequential loops.
-- **[Environmental Reference](docs/SETUP.md)** โ Prerequisites, configuration, and daemon management.
+- **[Self-Host Relay / Runtime Guide](docs/SELF_HOST_REFERENCE.md)** โ End-to-end relay/connector operator flow.
+- **[Self-Host Cloud Control Plane Guide](docs/CLOUD_SELF_HOST.md)** โ Bootstrap, smoke, and composed cloud runtime guidance.
+- **[Planner / Client Integration Notes](docs/mcp/integrations.md)** โ Relay/cloud HTTP + MCP compatibility matrix.
+- **[Cloud Connector Matrix](docs/CLOUD_CONNECTORS.md)** โ Per-provider install/test depth and limitations.
- **[Troubleshooting Guide](docs/TROUBLESHOOTING.md)** โ Resolving infrastructure vs goal failures.
-- **[Architecture Overview](docs/02_architecture.md)** โ Deep dive into the Bridge model.
+- **[Architecture Overview](docs/02_architecture.md)** โ Current daemon, connector, relay, and trust-boundary model.
+- **[WSL / Windows / Antigravity Topology](docs/WSL_WINDOWS_ANTIGRAVITY.md)** โ Practical cross-side deployment guidance.
### ๐ Project Governance & Maintenance (Internal)
- **[Gap Audit & Roadmap](docs/internal/GAP_AUDIT.md)** โ Current V1 release blockers and debt.
@@ -334,7 +489,11 @@ Review the following guides to get started with Codencer.
---
## โ License
+
+Codencer is released under the **MIT License**. See the [LICENSE](LICENSE) file for the full text.
+
## ๐ One-Repo-One-Instance Model
+
Codencer is designed around an explicit, repo-bound execution model:
- **1 Git Clone = 1 Daemon Instance**: Each repository checkout manages its own ledger and workspaces.
- **Explicit Targeting**: Start the daemon with `--repo-root ` to anchor all relative state (DB, artifacts) to that project, regardless of the startup directory.
@@ -346,5 +505,3 @@ Codencer is designed around an explicit, repo-bound execution model:
- **Identity Verification**: Always use `./bin/orchestratorctl instance --json` to verify which repository and port a daemon is serving before submitting tasks.
For more details, see **[Setup & Multi-Instance Workflows](docs/SETUP.md)**.
-
-Codencer is released under the **MIT License**. See the [LICENSE](LICENSE) file for the full text.
diff --git a/cmd/broker/README.md b/cmd/broker/README.md
index 7008ff0..894ee8d 100644
--- a/cmd/broker/README.md
+++ b/cmd/broker/README.md
@@ -1,6 +1,6 @@
-# Antigravity Discovery Broker
+# Agent Broker
-A lightweight same-side discovery and execution service for bridging Codencer (WSL) and Antigravity (Windows). The broker acts as a headless, local-only proxy that handles LS discovery, workspace binding, and cascade execution.
+A lightweight same-side discovery and execution service for bridging Codencer (WSL) and Windows IDE-side agent contexts. The agent-broker acts as a headless, local-only proxy that handles LS discovery, workspace binding, and cascade execution.
## Configuration
@@ -12,10 +12,10 @@ The broker is configured via environment variables:
## Build & Run
### 1. Build (Windows Host)
-It is recommended to run the broker natively on the Windows host where the IDE is running.
+It is recommended to run the agent-broker natively on the Windows host where the IDE is running.
```powershell
# In PowerShell (Windows)
-go build -o agent-broker.exe main.go
+make build-broker
.\agent-broker.exe
```
@@ -25,15 +25,20 @@ Point Codencer to the broker's endpoint:
export CODENCER_ANTIGRAVITY_BROKER_URL=http://localhost:8088
```
+Practical placement:
+- keep the repo, daemon, connector, worktrees, and artifacts in WSL/Linux
+- keep the broker on the Windows/IDE side
+- keep the relay separate; it is not the broker
+
## API Reference
### Health & Discovery
- `GET /health`: Basic health check.
- `GET /version`: Version info.
-- `GET /instances`: Lists all discovered Antigravity instances on the host.
+- `GET /instances`: Lists all discovered agent-broker instances on the host.
### Binding Management
-Binding is repo-specific. Each repository on the guest machine can be bound to a separate Antigravity instance.
+Binding is repo-specific. Each repository on the guest machine can be bound to a separate IDE-side instance.
- `GET /binding?repo_root=`: Returns the active service instance for the repo.
- `POST /binding`: Bind repo to an instance (JSON: `{"pid": , "repo_root": ""}`).
@@ -49,4 +54,4 @@ Binding is repo-specific. Each repository on the guest machine can be bound to a
## Persistence
Binding state is persisted to `~/.gemini/antigravity/broker_binding.json` on the host machine.
-Task sessions are currently kept in-memory; restarting the broker will orphan active tasks.
+Task sessions are currently kept in-memory; restarting the agent-broker will orphan active tasks.
diff --git a/cmd/broker/agent-broker b/cmd/broker/agent-broker
deleted file mode 100755
index b512c8f..0000000
Binary files a/cmd/broker/agent-broker and /dev/null differ
diff --git a/cmd/codencer-cloudctl/main.go b/cmd/codencer-cloudctl/main.go
new file mode 100644
index 0000000..9f1b149
--- /dev/null
+++ b/cmd/codencer-cloudctl/main.go
@@ -0,0 +1,849 @@
+package main
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+
+ "agent-bridge/internal/cloud"
+ cloudconnectors "agent-bridge/internal/cloud/connectors"
+)
+
+func main() {
+ if err := run(os.Args[1:], os.Stdout, os.Stderr); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+
+func run(args []string, stdout, stderr io.Writer) error {
+ if len(args) == 0 {
+ return fmt.Errorf("usage: codencer-cloudctl [flags]")
+ }
+ switch args[0] {
+ case "bootstrap":
+ return runBootstrap(args[1:], stdout, stderr)
+ case "status":
+ target, asJSON, err := parseSimpleTarget("status", args[1:], stderr)
+ if err != nil {
+ return err
+ }
+ return target.get("/api/cloud/v1/status", asJSON, stdout)
+ case "orgs":
+ return runResource("orgs", "/api/cloud/v1/orgs", args[1:], stdout, stderr)
+ case "workspaces":
+ return runResource("workspaces", "/api/cloud/v1/workspaces", args[1:], stdout, stderr)
+ case "projects":
+ return runResource("projects", "/api/cloud/v1/projects", args[1:], stdout, stderr)
+ case "memberships":
+ return runMemberships(args[1:], stdout, stderr)
+ case "tokens":
+ return runTokens(args[1:], stdout, stderr)
+ case "install":
+ return runInstall(args[1:], stdout, stderr)
+ case "runtime-connectors":
+ return runRuntimeConnectors(args[1:], stdout, stderr)
+ case "runtime-instances":
+ return runRuntimeInstances(args[1:], stdout, stderr)
+ case "events":
+ return runEvents(args[1:], stdout, stderr)
+ case "audit":
+ return runAudit(args[1:], stdout, stderr)
+ default:
+ return fmt.Errorf("unknown cloudctl command %q", args[0])
+ }
+}
+
+func runBootstrap(args []string, stdout, stderr io.Writer) error {
+ fs := flag.NewFlagSet("bootstrap", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ configPath := fs.String("config", "", "Cloud config path")
+ orgSlug := fs.String("org-slug", "default-org", "Organization slug")
+ orgName := fs.String("org-name", "Default Org", "Organization name")
+ workspaceSlug := fs.String("workspace-slug", "default-workspace", "Workspace slug")
+ workspaceName := fs.String("workspace-name", "Default Workspace", "Workspace name")
+ projectSlug := fs.String("project-slug", "default-project", "Project slug")
+ projectName := fs.String("project-name", "Default Project", "Project name")
+ tokenName := fs.String("token-name", "operator", "Bootstrap token name")
+ memberName := fs.String("member-name", "Bootstrap Owner", "Bootstrap membership display name")
+ memberEmail := fs.String("member-email", "", "Bootstrap membership email")
+ asJSON := fs.Bool("json", false, "Print JSON output")
+ var scopes multiFlag
+ fs.Var(&scopes, "scope", "Token scope; repeatable")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+ if len(scopes) == 0 {
+ scopes = append(scopes,
+ "cloud:admin", "cloud:read",
+ "orgs:read", "orgs:write",
+ "workspaces:read", "workspaces:write",
+ "projects:read", "projects:write",
+ "memberships:read", "memberships:write",
+ "tokens:read", "tokens:write",
+ "installations:read", "installations:write",
+ "runtime_connectors:read", "runtime_connectors:write",
+ "runtime_instances:read",
+ "runs:read", "runs:write",
+ "steps:read", "steps:write",
+ "artifacts:read",
+ "gates:read", "gates:write",
+ "events:read", "audit:read",
+ )
+ }
+
+ cfg, err := cloud.LoadConfig(*configPath)
+ if err != nil {
+ return err
+ }
+ store, err := cloud.OpenStore(cfg.DBPath, cfg.MasterKey)
+ if err != nil {
+ return err
+ }
+ defer store.Close()
+
+ ctx := context.Background()
+
+ org, err := store.CreateOrg(ctx, cloud.Org{Slug: *orgSlug, Name: *orgName})
+ if err != nil && !strings.Contains(err.Error(), "UNIQUE") {
+ return err
+ }
+ if org == nil {
+ org, err = findOrCreateOrg(ctx, store, *orgSlug)
+ if err != nil {
+ return err
+ }
+ }
+ workspace, err := store.CreateWorkspace(ctx, cloud.Workspace{OrgID: org.ID, Slug: *workspaceSlug, Name: *workspaceName})
+ if err != nil && !strings.Contains(err.Error(), "UNIQUE") {
+ return err
+ }
+ if workspace == nil {
+ workspace, err = findOrCreateWorkspace(ctx, store, org.ID, *workspaceSlug)
+ if err != nil {
+ return err
+ }
+ }
+ project, err := store.CreateProject(ctx, cloud.Project{OrgID: org.ID, WorkspaceID: workspace.ID, Slug: *projectSlug, Name: *projectName})
+ if err != nil && !strings.Contains(err.Error(), "UNIQUE") {
+ return err
+ }
+ if project == nil {
+ project, err = findOrCreateProject(ctx, store, workspace.ID, *projectSlug)
+ if err != nil {
+ return err
+ }
+ }
+ membership, err := store.CreateMembership(ctx, cloud.Membership{
+ OrgID: org.ID,
+ WorkspaceID: workspace.ID,
+ ProjectID: project.ID,
+ Name: *memberName,
+ Email: *memberEmail,
+ Role: cloud.RoleOrgOwner,
+ })
+ if err != nil {
+ return err
+ }
+ rawToken, err := cloud.GenerateAPIToken()
+ if err != nil {
+ return err
+ }
+ token, err := store.CreateAPIToken(ctx, cloud.APIToken{
+ OrgID: org.ID,
+ WorkspaceID: workspace.ID,
+ ProjectID: project.ID,
+ MembershipID: membership.ID,
+ Name: *tokenName,
+ SubjectType: "membership",
+ SubjectName: membership.Name,
+ Scopes: append([]string(nil), scopes...),
+ }, rawToken)
+ if err != nil {
+ return err
+ }
+ payload := map[string]any{
+ "org": org,
+ "workspace": workspace,
+ "project": project,
+ "membership": membership,
+ "token": rawToken,
+ "record": token,
+ }
+ return printOutput(stdout, payload, *asJSON)
+}
+
+func runResource(name, path string, args []string, stdout, stderr io.Writer) error {
+ if len(args) == 0 || args[0] == "list" {
+ target, query, asJSON, err := parseTargetWithFilter(name, trimListArgs(args), stderr)
+ if err != nil {
+ return err
+ }
+ return target.get(path+query, asJSON, stdout)
+ }
+ if args[0] != "create" {
+ return fmt.Errorf("usage: codencer-cloudctl %s [list|create] [flags]", name)
+ }
+ target, asJSON, body, err := parseCreateResourceTarget(name, args[1:], stderr)
+ if err != nil {
+ return err
+ }
+ return target.post(path, body, asJSON, stdout)
+}
+
+func runTokens(args []string, stdout, stderr io.Writer) error {
+ if len(args) == 0 || args[0] == "list" {
+ target, query, asJSON, err := parseTargetWithFilter("tokens", trimListArgs(args), stderr)
+ if err != nil {
+ return err
+ }
+ return target.get("/api/cloud/v1/tokens"+query, asJSON, stdout)
+ }
+ switch args[0] {
+ case "create":
+ target, asJSON, body, err := parseCreateTokenTarget(args[1:], stderr)
+ if err != nil {
+ return err
+ }
+ return target.post("/api/cloud/v1/tokens", body, asJSON, stdout)
+ case "revoke":
+ fs := flag.NewFlagSet("tokens revoke", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ tokenID := fs.String("token-id", "", "Token record id")
+ target, asJSON, err := parseHTTPFlags(fs, args[1:])
+ if err != nil {
+ return err
+ }
+ if strings.TrimSpace(*tokenID) == "" {
+ return fmt.Errorf("--token-id is required")
+ }
+ return target.post("/api/cloud/v1/tokens/"+*tokenID+"/revoke", nil, asJSON, stdout)
+ default:
+ return fmt.Errorf("usage: codencer-cloudctl tokens [list|create|revoke]")
+ }
+}
+
+func runMemberships(args []string, stdout, stderr io.Writer) error {
+ if len(args) == 0 || args[0] == "list" {
+ target, query, asJSON, err := parseTargetWithFilter("memberships", trimListArgs(args), stderr)
+ if err != nil {
+ return err
+ }
+ return target.get("/api/cloud/v1/memberships"+query, asJSON, stdout)
+ }
+ switch args[0] {
+ case "create":
+ target, asJSON, body, err := parseCreateMembershipTarget(args[1:], stderr)
+ if err != nil {
+ return err
+ }
+ return target.post("/api/cloud/v1/memberships", body, asJSON, stdout)
+ case "get":
+ fs := flag.NewFlagSet("memberships get", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ membershipID := fs.String("membership-id", "", "Membership id")
+ target, asJSON, err := parseHTTPFlags(fs, args[1:])
+ if err != nil {
+ return err
+ }
+ if strings.TrimSpace(*membershipID) == "" {
+ return fmt.Errorf("--membership-id is required")
+ }
+ return target.get("/api/cloud/v1/memberships/"+*membershipID, asJSON, stdout)
+ case "enable", "disable":
+ fs := flag.NewFlagSet("memberships "+args[0], flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ membershipID := fs.String("membership-id", "", "Membership id")
+ target, asJSON, err := parseHTTPFlags(fs, args[1:])
+ if err != nil {
+ return err
+ }
+ if strings.TrimSpace(*membershipID) == "" {
+ return fmt.Errorf("--membership-id is required")
+ }
+ return target.post("/api/cloud/v1/memberships/"+*membershipID+"/"+args[0], nil, asJSON, stdout)
+ default:
+ return fmt.Errorf("usage: codencer-cloudctl memberships [list|create|get|enable|disable]")
+ }
+}
+
+func runInstall(args []string, stdout, stderr io.Writer) error {
+ if len(args) == 0 || args[0] == "list" {
+ target, query, asJSON, err := parseTargetWithFilter("install", trimListArgs(args), stderr)
+ if err != nil {
+ return err
+ }
+ return target.get("/api/cloud/v1/installations"+query, asJSON, stdout)
+ }
+ switch args[0] {
+ case "create":
+ target, asJSON, body, err := parseCreateInstallTarget(args[1:], stderr)
+ if err != nil {
+ return err
+ }
+ return target.post("/api/cloud/v1/installations", body, asJSON, stdout)
+ case "get":
+ fs := flag.NewFlagSet("install get", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ installationID := fs.String("installation-id", "", "Installation id")
+ target, asJSON, err := parseHTTPFlags(fs, args[1:])
+ if err != nil {
+ return err
+ }
+ if *installationID == "" {
+ return fmt.Errorf("--installation-id is required")
+ }
+ return target.get("/api/cloud/v1/installations/"+*installationID, asJSON, stdout)
+ case "validate":
+ fs := flag.NewFlagSet("install validate", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ installationID := fs.String("installation-id", "", "Installation id")
+ target, asJSON, err := parseHTTPFlags(fs, args[1:])
+ if err != nil {
+ return err
+ }
+ if *installationID == "" {
+ return fmt.Errorf("--installation-id is required")
+ }
+ return target.post("/api/cloud/v1/installations/"+*installationID+"/validate", nil, asJSON, stdout)
+ case "enable", "disable":
+ fs := flag.NewFlagSet("install "+args[0], flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ installationID := fs.String("installation-id", "", "Installation id")
+ target, asJSON, err := parseHTTPFlags(fs, args[1:])
+ if err != nil {
+ return err
+ }
+ if *installationID == "" {
+ return fmt.Errorf("--installation-id is required")
+ }
+ return target.post("/api/cloud/v1/installations/"+*installationID+"/"+args[0], nil, asJSON, stdout)
+ case "action":
+ target, installationID, asJSON, body, err := parseInstallActionTarget(args[1:], stderr)
+ if err != nil {
+ return err
+ }
+ return target.post("/api/cloud/v1/installations/"+installationID+"/actions", body, asJSON, stdout)
+ default:
+ return fmt.Errorf("usage: codencer-cloudctl install [list|create|get|validate|enable|disable|action]")
+ }
+}
+
+func runRuntimeConnectors(args []string, stdout, stderr io.Writer) error {
+ if len(args) == 0 || args[0] == "list" {
+ target, query, asJSON, err := parseTargetWithFilter("runtime-connectors", trimListArgs(args), stderr)
+ if err != nil {
+ return err
+ }
+ return target.get("/api/cloud/v1/runtime/connectors"+query, asJSON, stdout)
+ }
+ switch args[0] {
+ case "claim":
+ target, asJSON, body, err := parseRuntimeConnectorClaimTarget(args[1:], stderr)
+ if err != nil {
+ return err
+ }
+ return target.post("/api/cloud/v1/runtime/connectors", body, asJSON, stdout)
+ case "get":
+ fs := flag.NewFlagSet("runtime-connectors get", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ runtimeConnectorID := fs.String("runtime-connector-id", "", "Cloud runtime connector record id")
+ target, asJSON, err := parseHTTPFlags(fs, args[1:])
+ if err != nil {
+ return err
+ }
+ if strings.TrimSpace(*runtimeConnectorID) == "" {
+ return fmt.Errorf("--runtime-connector-id is required")
+ }
+ return target.get("/api/cloud/v1/runtime/connectors/"+*runtimeConnectorID, asJSON, stdout)
+ case "enable", "disable", "sync":
+ fs := flag.NewFlagSet("runtime-connectors "+args[0], flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ runtimeConnectorID := fs.String("runtime-connector-id", "", "Cloud runtime connector record id")
+ target, asJSON, err := parseHTTPFlags(fs, args[1:])
+ if err != nil {
+ return err
+ }
+ if strings.TrimSpace(*runtimeConnectorID) == "" {
+ return fmt.Errorf("--runtime-connector-id is required")
+ }
+ return target.post("/api/cloud/v1/runtime/connectors/"+*runtimeConnectorID+"/"+args[0], nil, asJSON, stdout)
+ case "instances":
+ fs := flag.NewFlagSet("runtime-connectors instances", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ runtimeConnectorID := fs.String("runtime-connector-id", "", "Cloud runtime connector record id")
+ includeUnshared := fs.Bool("include-unshared", false, "Include remembered but currently unshared instances")
+ target, asJSON, err := parseHTTPFlags(fs, args[1:])
+ if err != nil {
+ return err
+ }
+ if strings.TrimSpace(*runtimeConnectorID) == "" {
+ return fmt.Errorf("--runtime-connector-id is required")
+ }
+ query := ""
+ if *includeUnshared {
+ query = "?include_unshared=true"
+ }
+ return target.get("/api/cloud/v1/runtime/connectors/"+*runtimeConnectorID+"/instances"+query, asJSON, stdout)
+ default:
+ return fmt.Errorf("usage: codencer-cloudctl runtime-connectors [list|claim|get|enable|disable|sync|instances]")
+ }
+}
+
+func runRuntimeInstances(args []string, stdout, stderr io.Writer) error {
+ if len(args) == 0 || args[0] == "list" {
+ target, query, asJSON, err := parseRuntimeInstanceListTarget(trimListArgs(args), stderr)
+ if err != nil {
+ return err
+ }
+ return target.get("/api/cloud/v1/runtime/instances"+query, asJSON, stdout)
+ }
+ if args[0] != "get" {
+ return fmt.Errorf("usage: codencer-cloudctl runtime-instances [list|get]")
+ }
+ fs := flag.NewFlagSet("runtime-instances get", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ instanceID := fs.String("instance-id", "", "Runtime instance id")
+ target, asJSON, err := parseHTTPFlags(fs, args[1:])
+ if err != nil {
+ return err
+ }
+ if strings.TrimSpace(*instanceID) == "" {
+ return fmt.Errorf("--instance-id is required")
+ }
+ return target.get("/api/cloud/v1/runtime/instances/"+*instanceID, asJSON, stdout)
+}
+
+func runEvents(args []string, stdout, stderr io.Writer) error {
+ target, query, asJSON, err := parseTargetWithFilter("events", trimListArgs(args), stderr)
+ if err != nil {
+ return err
+ }
+ return target.get("/api/cloud/v1/events"+query, asJSON, stdout)
+}
+
+func runAudit(args []string, stdout, stderr io.Writer) error {
+ target, query, asJSON, err := parseTargetWithFilter("audit", trimListArgs(args), stderr)
+ if err != nil {
+ return err
+ }
+ return target.get("/api/cloud/v1/audit"+query, asJSON, stdout)
+}
+
+type target struct {
+ cloudURL string
+ token string
+}
+
+func (t target) get(path string, asJSON bool, stdout io.Writer) error {
+ req, err := http.NewRequest(http.MethodGet, strings.TrimRight(t.cloudURL, "/")+path, nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Authorization", "Bearer "+t.token)
+ return t.do(req, asJSON, stdout)
+}
+
+func parseSimpleTarget(name string, args []string, stderr io.Writer) (target, bool, error) {
+ fs := flag.NewFlagSet(name, flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ return parseHTTPFlags(fs, args)
+}
+
+func (t target) post(path string, body []byte, asJSON bool, stdout io.Writer) error {
+ req, err := http.NewRequest(http.MethodPost, strings.TrimRight(t.cloudURL, "/")+path, bytes.NewReader(body))
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Authorization", "Bearer "+t.token)
+ if body != nil {
+ req.Header.Set("Content-Type", "application/json")
+ }
+ return t.do(req, asJSON, stdout)
+}
+
+func (t target) do(req *http.Request, asJSON bool, stdout io.Writer) error {
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ data, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode >= 400 {
+ return fmt.Errorf("%s: %s", resp.Status, strings.TrimSpace(string(data)))
+ }
+ if asJSON {
+ _, err = fmt.Fprintln(stdout, string(data))
+ return err
+ }
+ var pretty bytes.Buffer
+ if err := json.Indent(&pretty, data, "", " "); err != nil {
+ _, err = fmt.Fprintln(stdout, string(data))
+ return err
+ }
+ _, err = fmt.Fprintln(stdout, pretty.String())
+ return err
+}
+
+func parseHTTPFlags(fs *flag.FlagSet, args []string) (target, bool, error) {
+ cloudURL := fs.String("cloud-url", "http://127.0.0.1:8190", "Cloud base URL")
+ token := fs.String("token", "", "Cloud API bearer token")
+ asJSON := fs.Bool("json", false, "Print raw JSON")
+ if err := fs.Parse(args); err != nil {
+ return target{}, false, err
+ }
+ if strings.TrimSpace(*token) == "" {
+ return target{}, false, fmt.Errorf("--token is required")
+ }
+ return target{cloudURL: *cloudURL, token: *token}, *asJSON, nil
+}
+
+func parseTargetWithFilter(name string, args []string, stderr io.Writer) (target, string, bool, error) {
+ fs := flag.NewFlagSet(name, flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ orgID := fs.String("org-id", "", "Organization id")
+ workspaceID := fs.String("workspace-id", "", "Workspace id")
+ projectID := fs.String("project-id", "", "Project id")
+ installationID := fs.String("installation-id", "", "Installation id")
+ limit := fs.Int("limit", 0, "List limit")
+ target, asJSON, err := parseHTTPFlags(fs, args)
+ if err != nil {
+ return target, "", false, err
+ }
+ query := []string{}
+ if *orgID != "" {
+ query = append(query, "org_id="+*orgID)
+ }
+ if *workspaceID != "" {
+ query = append(query, "workspace_id="+*workspaceID)
+ }
+ if *projectID != "" {
+ query = append(query, "project_id="+*projectID)
+ }
+ if *installationID != "" {
+ query = append(query, "installation_id="+*installationID)
+ }
+ if *limit > 0 {
+ query = append(query, fmt.Sprintf("limit=%d", *limit))
+ }
+ if len(query) == 0 {
+ return target, "", asJSON, nil
+ }
+ return target, "?" + strings.Join(query, "&"), asJSON, nil
+}
+
+func parseCreateResourceTarget(name string, args []string, stderr io.Writer) (target, bool, []byte, error) {
+ fs := flag.NewFlagSet(name+" create", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ id := fs.String("id", "", "Optional id")
+ orgID := fs.String("org-id", "", "Organization id")
+ workspaceID := fs.String("workspace-id", "", "Workspace id")
+ slug := fs.String("slug", "", "Slug")
+ displayName := fs.String("name", "", "Display name")
+ target, asJSON, err := parseHTTPFlags(fs, args)
+ if err != nil {
+ return target, false, nil, err
+ }
+ payload := map[string]string{
+ "id": *id,
+ "org_id": *orgID,
+ "workspace_id": *workspaceID,
+ "slug": *slug,
+ "name": *displayName,
+ }
+ data, err := json.Marshal(payload)
+ return target, asJSON, data, err
+}
+
+func parseCreateTokenTarget(args []string, stderr io.Writer) (target, bool, []byte, error) {
+ fs := flag.NewFlagSet("tokens create", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ orgID := fs.String("org-id", "", "Organization id")
+ workspaceID := fs.String("workspace-id", "", "Workspace id")
+ projectID := fs.String("project-id", "", "Project id")
+ membershipID := fs.String("membership-id", "", "Membership id")
+ name := fs.String("name", "", "Token name")
+ kind := fs.String("kind", "", "Token kind")
+ var scopes multiFlag
+ fs.Var(&scopes, "scope", "Token scope; repeatable")
+ target, asJSON, err := parseHTTPFlags(fs, args)
+ if err != nil {
+ return target, false, nil, err
+ }
+ payload := map[string]any{
+ "org_id": *orgID,
+ "workspace_id": *workspaceID,
+ "project_id": *projectID,
+ "membership_id": *membershipID,
+ "name": *name,
+ "kind": *kind,
+ "scopes": []string(scopes),
+ }
+ data, err := json.Marshal(payload)
+ return target, asJSON, data, err
+}
+
+func parseCreateInstallTarget(args []string, stderr io.Writer) (target, bool, []byte, error) {
+ fs := flag.NewFlagSet("install create", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ orgID := fs.String("org-id", "", "Organization id")
+ workspaceID := fs.String("workspace-id", "", "Workspace id")
+ projectID := fs.String("project-id", "", "Project id")
+ connectorKey := fs.String("connector", "", "Connector key")
+ name := fs.String("name", "", "Installation name")
+ externalInstallationID := fs.String("external-installation-id", "", "External installation id")
+ externalAccount := fs.String("external-account", "", "External account")
+ var configs kvFlag
+ var secrets kvFlag
+ fs.Var(&configs, "config", "Config key=value; repeatable")
+ fs.Var(&secrets, "secret", "Secret key=value; repeatable")
+ target, asJSON, err := parseHTTPFlags(fs, args)
+ if err != nil {
+ return target, false, nil, err
+ }
+ payload := map[string]any{
+ "org_id": *orgID,
+ "workspace_id": *workspaceID,
+ "project_id": *projectID,
+ "connector_key": *connectorKey,
+ "name": *name,
+ "external_installation_id": *externalInstallationID,
+ "external_account": *externalAccount,
+ "config": map[string]string(configs),
+ "secrets": map[string]string(secrets),
+ }
+ data, err := json.Marshal(payload)
+ return target, asJSON, data, err
+}
+
+func parseCreateMembershipTarget(args []string, stderr io.Writer) (target, bool, []byte, error) {
+ fs := flag.NewFlagSet("memberships create", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ orgID := fs.String("org-id", "", "Organization id")
+ workspaceID := fs.String("workspace-id", "", "Workspace id")
+ projectID := fs.String("project-id", "", "Project id")
+ name := fs.String("name", "", "Member name")
+ email := fs.String("email", "", "Member email")
+ role := fs.String("role", cloud.RoleProjectOperator, "Membership role")
+ target, asJSON, err := parseHTTPFlags(fs, args)
+ if err != nil {
+ return target, false, nil, err
+ }
+ payload := map[string]any{
+ "org_id": *orgID,
+ "workspace_id": *workspaceID,
+ "project_id": *projectID,
+ "name": *name,
+ "email": *email,
+ "role": *role,
+ }
+ data, err := json.Marshal(payload)
+ return target, asJSON, data, err
+}
+
+func parseInstallActionTarget(args []string, stderr io.Writer) (target, string, bool, []byte, error) {
+ fs := flag.NewFlagSet("install action", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ installationID := fs.String("installation-id", "", "Installation id")
+ action := fs.String("action", "", "Connector action")
+ repository := fs.String("repository", "", "Repository (owner/name or namespaced project)")
+ project := fs.String("project", "", "Project identifier")
+ issueNumber := fs.Int("issue-number", 0, "Issue number")
+ issueKey := fs.String("issue-key", "", "Issue key")
+ issueID := fs.String("issue-id", "", "Issue id")
+ channel := fs.String("channel", "", "Slack channel id")
+ threadTS := fs.String("thread-ts", "", "Slack thread ts")
+ messageTS := fs.String("message-ts", "", "Slack message ts")
+ teamID := fs.String("team-id", "", "Linear team id")
+ transitionID := fs.String("transition-id", "", "Jira transition id")
+ title := fs.String("title", "", "Title")
+ description := fs.String("description", "", "Description")
+ body := fs.String("body", "", "Body")
+ target, asJSON, err := parseHTTPFlags(fs, args)
+ if err != nil {
+ return target, "", false, nil, err
+ }
+ if *installationID == "" {
+ return target, "", false, nil, fmt.Errorf("--installation-id is required")
+ }
+ payload := cloudconnectors.ActionRequest{
+ Action: cloudconnectors.ActionName(*action),
+ Repository: *repository,
+ Project: *project,
+ IssueNumber: *issueNumber,
+ IssueKey: *issueKey,
+ IssueID: *issueID,
+ Channel: *channel,
+ ThreadTS: *threadTS,
+ MessageTS: *messageTS,
+ TeamID: *teamID,
+ TransitionID: *transitionID,
+ Title: *title,
+ Description: *description,
+ Body: *body,
+ }
+ data, err := json.Marshal(payload)
+ return target, *installationID, asJSON, data, err
+}
+
+func parseRuntimeConnectorClaimTarget(args []string, stderr io.Writer) (target, bool, []byte, error) {
+ fs := flag.NewFlagSet("runtime-connectors claim", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ orgID := fs.String("org-id", "", "Organization id")
+ workspaceID := fs.String("workspace-id", "", "Workspace id")
+ projectID := fs.String("project-id", "", "Project id")
+ connectorID := fs.String("connector-id", "", "Relay connector id")
+ target, asJSON, err := parseHTTPFlags(fs, args)
+ if err != nil {
+ return target, false, nil, err
+ }
+ payload := map[string]string{
+ "org_id": *orgID,
+ "workspace_id": *workspaceID,
+ "project_id": *projectID,
+ "connector_id": *connectorID,
+ }
+ data, err := json.Marshal(payload)
+ return target, asJSON, data, err
+}
+
+func parseRuntimeInstanceListTarget(args []string, stderr io.Writer) (target, string, bool, error) {
+ fs := flag.NewFlagSet("runtime-instances list", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ orgID := fs.String("org-id", "", "Organization id")
+ workspaceID := fs.String("workspace-id", "", "Workspace id")
+ projectID := fs.String("project-id", "", "Project id")
+ runtimeConnectorID := fs.String("runtime-connector-id", "", "Cloud runtime connector record id")
+ includeUnshared := fs.Bool("include-unshared", false, "Include remembered but currently unshared instances")
+ target, asJSON, err := parseHTTPFlags(fs, args)
+ if err != nil {
+ return target, "", false, err
+ }
+ query := []string{}
+ if *orgID != "" {
+ query = append(query, "org_id="+*orgID)
+ }
+ if *workspaceID != "" {
+ query = append(query, "workspace_id="+*workspaceID)
+ }
+ if *projectID != "" {
+ query = append(query, "project_id="+*projectID)
+ }
+ if *runtimeConnectorID != "" {
+ query = append(query, "runtime_connector_id="+*runtimeConnectorID)
+ }
+ if *includeUnshared {
+ query = append(query, "include_unshared=true")
+ }
+ if len(query) == 0 {
+ return target, "", asJSON, nil
+ }
+ return target, "?" + strings.Join(query, "&"), asJSON, nil
+}
+
+func trimListArgs(args []string) []string {
+ if len(args) > 0 && args[0] == "list" {
+ return args[1:]
+ }
+ return args
+}
+
+type multiFlag []string
+
+func (m *multiFlag) String() string { return strings.Join(*m, ",") }
+
+func (m *multiFlag) Set(value string) error {
+ value = strings.TrimSpace(value)
+ if value != "" {
+ *m = append(*m, value)
+ }
+ return nil
+}
+
+type kvFlag map[string]string
+
+func (k *kvFlag) String() string {
+ if k == nil {
+ return ""
+ }
+ data, _ := json.Marshal(*k)
+ return string(data)
+}
+
+func (k *kvFlag) Set(value string) error {
+ if *k == nil {
+ *k = map[string]string{}
+ }
+ key, val, ok := strings.Cut(value, "=")
+ if !ok || strings.TrimSpace(key) == "" {
+ return fmt.Errorf("expected key=value, got %q", value)
+ }
+ (*k)[strings.TrimSpace(key)] = val
+ return nil
+}
+
+func printOutput(stdout io.Writer, payload any, asJSON bool) error {
+ data, err := json.Marshal(payload)
+ if err != nil {
+ return err
+ }
+ if asJSON {
+ _, err = fmt.Fprintln(stdout, string(data))
+ return err
+ }
+ var pretty bytes.Buffer
+ if err := json.Indent(&pretty, data, "", " "); err != nil {
+ _, err = fmt.Fprintln(stdout, string(data))
+ return err
+ }
+ _, err = fmt.Fprintln(stdout, pretty.String())
+ return err
+}
+
+func findOrCreateOrg(ctx context.Context, store *cloud.Store, slug string) (*cloud.Org, error) {
+ orgs, err := store.ListOrgs(ctx)
+ if err != nil {
+ return nil, err
+ }
+ for _, org := range orgs {
+ if org.Slug == slug {
+ return &org, nil
+ }
+ }
+ return nil, fmt.Errorf("org %q not found after create attempt", slug)
+}
+
+func findOrCreateWorkspace(ctx context.Context, store *cloud.Store, orgID, slug string) (*cloud.Workspace, error) {
+ workspaces, err := store.ListWorkspaces(ctx, orgID)
+ if err != nil {
+ return nil, err
+ }
+ for _, workspace := range workspaces {
+ if workspace.Slug == slug {
+ return &workspace, nil
+ }
+ }
+ return nil, fmt.Errorf("workspace %q not found after create attempt", slug)
+}
+
+func findOrCreateProject(ctx context.Context, store *cloud.Store, workspaceID, slug string) (*cloud.Project, error) {
+ projects, err := store.ListProjects(ctx, workspaceID)
+ if err != nil {
+ return nil, err
+ }
+ for _, project := range projects {
+ if project.Slug == slug {
+ return &project, nil
+ }
+ }
+ return nil, fmt.Errorf("project %q not found after create attempt", slug)
+}
diff --git a/cmd/codencer-cloudctl/main_test.go b/cmd/codencer-cloudctl/main_test.go
new file mode 100644
index 0000000..c6e678a
--- /dev/null
+++ b/cmd/codencer-cloudctl/main_test.go
@@ -0,0 +1,159 @@
+package main
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "agent-bridge/internal/cloud"
+)
+
+func TestRunBootstrapCreatesScopedStoreAndToken(t *testing.T) {
+ t.Setenv("CODENCER_CLOUD_DB_PATH", "")
+ t.Setenv("CODENCER_CLOUD_HOST", "")
+ t.Setenv("CODENCER_CLOUD_PORT", "")
+ t.Setenv("CODENCER_CLOUD_MASTER_KEY", "")
+ t.Setenv("CODENCER_CLOUD_RELAY_CONFIG", "")
+
+ tempDir := t.TempDir()
+ cfgPath := filepath.Join(tempDir, "cloud.json")
+ cfg := cloud.DefaultConfig()
+ cfg.DBPath = filepath.Join(tempDir, "cloud.db")
+ cfg.MasterKey = "cloud-master-key"
+ data, err := json.Marshal(cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := os.WriteFile(cfgPath, data, 0o600); err != nil {
+ t.Fatal(err)
+ }
+
+ var stdout, stderr bytes.Buffer
+ args := []string{
+ "bootstrap",
+ "--config", cfgPath,
+ "--org-slug", "acme",
+ "--workspace-slug", "platform",
+ "--project-slug", "core",
+ "--token-name", "operator",
+ "--scope", "cloud:read",
+ "--json",
+ }
+ if err := run(args, &stdout, &stderr); err != nil {
+ t.Fatalf("bootstrap failed: %v stderr=%s", err, stderr.String())
+ }
+
+ var payload struct {
+ Org cloud.Org `json:"org"`
+ Workspace cloud.Workspace `json:"workspace"`
+ Project cloud.Project `json:"project"`
+ Token string `json:"token"`
+ Record cloud.APIToken `json:"record"`
+ }
+ if err := json.Unmarshal(stdout.Bytes(), &payload); err != nil {
+ t.Fatalf("decode bootstrap payload: %v body=%s", err, stdout.String())
+ }
+ if payload.Org.Slug != "acme" || payload.Workspace.Slug != "platform" || payload.Project.Slug != "core" {
+ t.Fatalf("unexpected bootstrap scope: %+v", payload)
+ }
+ if !strings.HasPrefix(payload.Token, "cct_") {
+ t.Fatalf("expected generated cloud token, got %q", payload.Token)
+ }
+ if payload.Record.Name != "operator" {
+ t.Fatalf("unexpected token record: %+v", payload.Record)
+ }
+
+ store, err := cloud.OpenStore(cfg.DBPath, cfg.MasterKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer store.Close()
+
+ found, err := store.LookupAPIToken(context.Background(), payload.Token)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if found == nil || found.ID == "" || found.Name != "operator" {
+ t.Fatalf("expected persisted bootstrap token, got %+v", found)
+ }
+}
+
+func TestRunStatusUsesAuthAndCloudURL(t *testing.T) {
+ var seenMethod, seenPath, seenAuth string
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ seenMethod = r.Method
+ seenPath = r.URL.Path
+ seenAuth = r.Header.Get("Authorization")
+ w.Header().Set("Content-Type", "application/json")
+ _, _ = w.Write([]byte(`{"ok":true,"relay_composed":false}`))
+ }))
+ defer srv.Close()
+
+ var stdout, stderr bytes.Buffer
+ if err := run([]string{"status", "--cloud-url", srv.URL, "--token", "tok", "--json"}, &stdout, &stderr); err != nil {
+ t.Fatalf("status command failed: %v stderr=%s", err, stderr.String())
+ }
+ if seenMethod != http.MethodGet || seenPath != "/api/cloud/v1/status" {
+ t.Fatalf("unexpected request: method=%s path=%s", seenMethod, seenPath)
+ }
+ if seenAuth != "Bearer tok" {
+ t.Fatalf("unexpected authorization header: %q", seenAuth)
+ }
+ if got := strings.TrimSpace(stdout.String()); got != `{"ok":true,"relay_composed":false}` {
+ t.Fatalf("unexpected status output: %s", got)
+ }
+}
+
+func TestRunRuntimeConnectorCommandsUseCloudRuntimePaths(t *testing.T) {
+ var seen []string
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ seen = append(seen, r.Method+" "+r.URL.RequestURI()+" "+r.Header.Get("Authorization"))
+ w.Header().Set("Content-Type", "application/json")
+ switch {
+ case r.Method == http.MethodGet && r.URL.Path == "/api/cloud/v1/runtime/connectors":
+ _, _ = w.Write([]byte(`[]`))
+ case r.Method == http.MethodPost && r.URL.Path == "/api/cloud/v1/runtime/connectors":
+ _, _ = w.Write([]byte(`{"id":"rconn_1","connector_id":"conn-1"}`))
+ case r.Method == http.MethodGet && r.URL.Path == "/api/cloud/v1/runtime/instances":
+ _, _ = w.Write([]byte(`[]`))
+ default:
+ http.NotFound(w, r)
+ }
+ }))
+ defer srv.Close()
+
+ var stdout, stderr bytes.Buffer
+ if err := run([]string{"runtime-connectors", "list", "--cloud-url", srv.URL, "--token", "tok", "--org-id", "org-1", "--json"}, &stdout, &stderr); err != nil {
+ t.Fatalf("runtime-connectors list failed: %v stderr=%s", err, stderr.String())
+ }
+ stdout.Reset()
+ stderr.Reset()
+ if err := run([]string{"runtime-connectors", "claim", "--cloud-url", srv.URL, "--token", "tok", "--org-id", "org-1", "--workspace-id", "ws-1", "--project-id", "proj-1", "--connector-id", "conn-1", "--json"}, &stdout, &stderr); err != nil {
+ t.Fatalf("runtime-connectors claim failed: %v stderr=%s", err, stderr.String())
+ }
+ stdout.Reset()
+ stderr.Reset()
+ if err := run([]string{"runtime-instances", "list", "--cloud-url", srv.URL, "--token", "tok", "--runtime-connector-id", "rconn_1", "--include-unshared", "--json"}, &stdout, &stderr); err != nil {
+ t.Fatalf("runtime-instances list failed: %v stderr=%s", err, stderr.String())
+ }
+
+ expected := []string{
+ "GET /api/cloud/v1/runtime/connectors?org_id=org-1 Bearer tok",
+ "POST /api/cloud/v1/runtime/connectors Bearer tok",
+ "GET /api/cloud/v1/runtime/instances?runtime_connector_id=rconn_1&include_unshared=true Bearer tok",
+ }
+ if len(seen) != len(expected) {
+ t.Fatalf("unexpected request count: got %v want %v", seen, expected)
+ }
+ for i := range expected {
+ if seen[i] != expected[i] {
+ t.Fatalf("unexpected request %d: got %q want %q", i, seen[i], expected[i])
+ }
+ }
+}
diff --git a/cmd/codencer-cloudd/main.go b/cmd/codencer-cloudd/main.go
new file mode 100644
index 0000000..b505a82
--- /dev/null
+++ b/cmd/codencer-cloudd/main.go
@@ -0,0 +1,68 @@
+package main
+
+import (
+ "context"
+ "flag"
+ "log"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "agent-bridge/internal/cloud"
+ cloudconnectors "agent-bridge/internal/cloud/connectors"
+ "agent-bridge/internal/relay"
+)
+
+func main() {
+ if err := run(os.Args[1:]); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func run(args []string) error {
+ fs := flag.NewFlagSet("codencer-cloudd", flag.ContinueOnError)
+ configPath := fs.String("config", "", "Cloud config path")
+ relayConfigPath := fs.String("relay-config", "", "Relay config path to compose under the cloud service")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+
+ cfg, err := cloud.LoadConfig(*configPath)
+ if err != nil {
+ return err
+ }
+ if *relayConfigPath == "" {
+ *relayConfigPath = cfg.RelayConfigPath
+ }
+
+ store, err := cloud.OpenStore(cfg.DBPath, cfg.MasterKey)
+ if err != nil {
+ return err
+ }
+ defer store.Close()
+
+ var relayRuntime *cloud.RelayRuntime
+ var relayStore *relay.Store
+ var relayServer *relay.Server
+ if *relayConfigPath != "" {
+ relayCfg, err := relay.LoadConfig(*relayConfigPath)
+ if err != nil {
+ return err
+ }
+ relayStore, err = relay.OpenStore(relayCfg.DBPath)
+ if err != nil {
+ return err
+ }
+ defer relayStore.Close()
+ relayServer = relay.NewServer(relayCfg, relayStore)
+ relayRuntime = &cloud.RelayRuntime{
+ Server: relayServer,
+ Store: relayStore,
+ }
+ }
+
+ server := cloud.NewServer(cfg, store, cloudconnectors.NewRegistry(), relayRuntime)
+ ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
+ defer cancel()
+ return server.Start(ctx)
+}
diff --git a/cmd/codencer-cloudworkerd/main.go b/cmd/codencer-cloudworkerd/main.go
new file mode 100644
index 0000000..c1b99d1
--- /dev/null
+++ b/cmd/codencer-cloudworkerd/main.go
@@ -0,0 +1,50 @@
+package main
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+ "time"
+
+ "agent-bridge/internal/cloud"
+)
+
+func main() {
+ if err := run(os.Args[1:]); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+
+func run(args []string) error {
+ fs := flag.NewFlagSet("codencer-cloudworkerd", flag.ContinueOnError)
+ configPath := fs.String("config", "", "Cloud config path")
+ interval := fs.Duration("interval", 2*time.Minute, "Polling interval for cloud worker jobs")
+ pollLimit := fs.Int("limit", 50, "Maximum provider records to poll per installation pass")
+ once := fs.Bool("once", false, "Run one worker pass and exit")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+
+ cfg, err := cloud.LoadConfig(*configPath)
+ if err != nil {
+ return err
+ }
+ store, err := cloud.OpenStore(cfg.DBPath, cfg.MasterKey)
+ if err != nil {
+ return err
+ }
+ defer store.Close()
+
+ worker := cloud.NewWorker(store, nil, *pollLimit)
+ ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
+ defer cancel()
+
+ if *once {
+ return worker.RunOnce(ctx)
+ }
+ return worker.Run(ctx, *interval)
+}
diff --git a/cmd/codencer-connectord/main.go b/cmd/codencer-connectord/main.go
new file mode 100644
index 0000000..1a188c0
--- /dev/null
+++ b/cmd/codencer-connectord/main.go
@@ -0,0 +1,424 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "os/signal"
+ "strings"
+ "syscall"
+
+ "agent-bridge/internal/connector"
+)
+
+const defaultConnectorConfigPath = ".codencer/connector/config.json"
+
+func main() {
+ if err := run(context.Background(), os.Args[1:], os.Stdout, os.Stderr); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func run(ctx context.Context, args []string, stdout, stderr io.Writer) error {
+ if len(args) < 1 {
+ return fmt.Errorf("usage: codencer-connectord [flags]")
+ }
+
+ switch args[0] {
+ case "enroll":
+ return runEnroll(ctx, args[1:], stdout, stderr)
+ case "run":
+ return runConnector(ctx, args[1:], stderr)
+ case "status":
+ return runStatus(args[1:], stdout, stderr)
+ case "list":
+ return runList(args[1:], stdout, stderr)
+ case "discover":
+ return runDiscover(ctx, args[1:], stdout, stderr)
+ case "share":
+ return runShare(ctx, args[1:], stdout, stderr)
+ case "unshare":
+ return runUnshare(args[1:], stdout, stderr)
+ case "config":
+ return runConfig(args[1:], stdout, stderr)
+ default:
+ return fmt.Errorf("unknown connector command %s", args[0])
+ }
+}
+
+func runEnroll(ctx context.Context, args []string, stdout, stderr io.Writer) error {
+ fs := newFlagSet("enroll", stderr)
+ relayURL := fs.String("relay-url", "http://127.0.0.1:8090", "Relay base URL")
+ daemonURL := fs.String("daemon-url", "http://127.0.0.1:8085", "Local Codencer daemon URL")
+ enrollmentToken := fs.String("enrollment-token", "", "Relay enrollment token")
+ configPath := fs.String("config", defaultConnectorConfigPath, "Connector config path")
+ label := fs.String("label", "", "Optional connector label")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+
+ cfg, err := connector.Enroll(ctx, *relayURL, *daemonURL, *enrollmentToken, *label, *configPath)
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintf(stdout, "Connector enrolled: %s machine=%s\n", cfg.ConnectorID, cfg.MachineID)
+ return err
+}
+
+func runConnector(ctx context.Context, args []string, stderr io.Writer) error {
+ fs := newFlagSet("run", stderr)
+ configPath := fs.String("config", defaultConnectorConfigPath, "Connector config path")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+
+ cfg, err := connector.LoadConfig(*configPath)
+ if err != nil {
+ return err
+ }
+ client := connector.NewClient(cfg)
+
+ runCtx, cancel := signal.NotifyContext(ctx, syscall.SIGINT, syscall.SIGTERM)
+ defer cancel()
+
+ if err := client.Run(runCtx); err != nil && err != context.Canceled {
+ return err
+ }
+ return nil
+}
+
+func runStatus(args []string, stdout, stderr io.Writer) error {
+ fs := newFlagSet("status", stderr)
+ configPath := fs.String("config", defaultConnectorConfigPath, "Connector config path")
+ jsonOutput := fs.Bool("json", false, "Print raw connector status JSON")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+
+ status, err := connector.LoadStatus(*configPath)
+ if err != nil {
+ return err
+ }
+
+ if *jsonOutput {
+ data, err := os.ReadFile(connector.StatusPathForConfig(*configPath))
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintln(stdout, string(data))
+ return err
+ }
+
+ cfg, err := connector.LoadConfig(*configPath)
+ if err != nil {
+ return err
+ }
+ entries := connector.EffectiveSharedInstances(cfg)
+
+ if _, err := fmt.Fprintf(stdout, "connector=%s machine=%s relay=%s state=%s\n",
+ status.ConnectorID,
+ status.MachineID,
+ status.RelayURL,
+ status.SessionState,
+ ); err != nil {
+ return err
+ }
+ if _, err := fmt.Fprintf(stdout, "last_connect=%s last_disconnect=%s last_heartbeat=%s\n",
+ blankOrValue(status.LastConnectAt),
+ blankOrValue(status.LastDisconnectAt),
+ blankOrValue(status.LastHeartbeatAt),
+ ); err != nil {
+ return err
+ }
+ if _, err := fmt.Fprintf(stdout, "shared_now=%s\n", formatList(status.SharedInstances)); err != nil {
+ return err
+ }
+ if _, err := fmt.Fprintf(stdout, "configured_instances=%d shared_config=%d unshared_config=%d\n",
+ len(entries),
+ countInstances(entries, true),
+ countInstances(entries, false),
+ ); err != nil {
+ return err
+ }
+ if status.LastError != "" {
+ if _, err := fmt.Fprintf(stdout, "last_error=%s\n", status.LastError); err != nil {
+ return err
+ }
+ }
+ for _, entry := range entries {
+ if _, err := fmt.Fprintf(stdout, "%s\n", formatInstanceLine(entry)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func runList(args []string, stdout, stderr io.Writer) error {
+ fs := newFlagSet("list", stderr)
+ configPath := fs.String("config", defaultConnectorConfigPath, "Connector config path")
+ jsonOutput := fs.Bool("json", false, "Print shared instance config as JSON")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+
+ cfg, err := connector.LoadConfig(*configPath)
+ if err != nil {
+ return err
+ }
+ entries := connector.EffectiveSharedInstances(cfg)
+ if *jsonOutput {
+ return writeJSON(stdout, entries)
+ }
+ if len(entries) == 0 {
+ _, err := fmt.Fprintln(stdout, "no configured connector instances")
+ return err
+ }
+ for _, entry := range entries {
+ if _, err := fmt.Fprintln(stdout, formatInstanceLine(entry)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func runDiscover(ctx context.Context, args []string, stdout, stderr io.Writer) error {
+ fs := newFlagSet("discover", stderr)
+ configPath := fs.String("config", defaultConnectorConfigPath, "Connector config path")
+ jsonOutput := fs.Bool("json", false, "Print discovered instance view as JSON")
+ var roots multiStringFlag
+ fs.Var(&roots, "root", "Additional discovery root to scan (repeatable)")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+
+ cfg, err := connector.LoadConfig(*configPath)
+ if err != nil {
+ return err
+ }
+ entries, err := connector.DiscoverInstances(ctx, cfg, roots.Values(), nil)
+ if err != nil {
+ return err
+ }
+ if *jsonOutput {
+ return writeJSON(stdout, entries)
+ }
+ if len(entries) == 0 {
+ _, err := fmt.Fprintln(stdout, "no connector instances discovered")
+ return err
+ }
+ for _, entry := range entries {
+ if _, err := fmt.Fprintln(stdout, formatDiscoverLine(entry)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func runShare(ctx context.Context, args []string, stdout, stderr io.Writer) error {
+ fs := newFlagSet("share", stderr)
+ configPath := fs.String("config", defaultConnectorConfigPath, "Connector config path")
+ instanceID := fs.String("instance-id", "", "Instance ID to share")
+ daemonURL := fs.String("daemon-url", "", "Daemon URL to share")
+ manifestPath := fs.String("manifest-path", "", "Manifest path to share")
+ jsonOutput := fs.Bool("json", false, "Print the updated entry as JSON")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+
+ cfg, err := connector.LoadConfig(*configPath)
+ if err != nil {
+ return err
+ }
+ entry, err := connector.ShareInstance(ctx, cfg, connector.InstanceSelector{
+ InstanceID: *instanceID,
+ DaemonURL: *daemonURL,
+ ManifestPath: *manifestPath,
+ }, nil)
+ if err != nil {
+ return err
+ }
+ if err := connector.SaveConfig(*configPath, cfg); err != nil {
+ return err
+ }
+ if err := connector.NewStatusStore(*configPath).SyncConfig(cfg); err != nil {
+ return err
+ }
+ if *jsonOutput {
+ return writeJSON(stdout, entry)
+ }
+ _, err = fmt.Fprintf(stdout, "shared %s\n", formatInstanceLine(entry))
+ return err
+}
+
+func runUnshare(args []string, stdout, stderr io.Writer) error {
+ fs := newFlagSet("unshare", stderr)
+ configPath := fs.String("config", defaultConnectorConfigPath, "Connector config path")
+ instanceID := fs.String("instance-id", "", "Instance ID to unshare")
+ daemonURL := fs.String("daemon-url", "", "Daemon URL to unshare")
+ manifestPath := fs.String("manifest-path", "", "Manifest path to unshare")
+ jsonOutput := fs.Bool("json", false, "Print the updated entry as JSON")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+
+ cfg, err := connector.LoadConfig(*configPath)
+ if err != nil {
+ return err
+ }
+ entry, err := connector.UnshareInstance(cfg, connector.InstanceSelector{
+ InstanceID: *instanceID,
+ DaemonURL: *daemonURL,
+ ManifestPath: *manifestPath,
+ })
+ if err != nil {
+ return err
+ }
+ if err := connector.SaveConfig(*configPath, cfg); err != nil {
+ return err
+ }
+ if err := connector.NewStatusStore(*configPath).SyncConfig(cfg); err != nil {
+ return err
+ }
+ if *jsonOutput {
+ return writeJSON(stdout, entry)
+ }
+ _, err = fmt.Fprintf(stdout, "unshared %s\n", formatInstanceLine(entry))
+ return err
+}
+
+func runConfig(args []string, stdout, stderr io.Writer) error {
+ fs := newFlagSet("config", stderr)
+ configPath := fs.String("config", defaultConnectorConfigPath, "Connector config path")
+ jsonOutput := fs.Bool("json", false, "Print connector config as JSON")
+ showSecrets := fs.Bool("show-secrets", false, "Include sensitive config values in output")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+
+ cfg, err := connector.LoadConfig(*configPath)
+ if err != nil {
+ return err
+ }
+ if *jsonOutput {
+ data, err := connector.MarshalConfig(cfg, *showSecrets)
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintln(stdout, string(data))
+ return err
+ }
+
+ safeCfg := connector.RedactedConfig(cfg, *showSecrets)
+ if _, err := fmt.Fprintf(stdout, "relay=%s websocket=%s connector=%s machine=%s label=%s heartbeat_seconds=%d\n",
+ safeCfg.RelayURL,
+ blankOrValue(safeCfg.WebsocketURL),
+ blankOrValue(safeCfg.ConnectorID),
+ blankOrValue(safeCfg.MachineID),
+ blankOrValue(safeCfg.Label),
+ safeCfg.HeartbeatIntervalSeconds,
+ ); err != nil {
+ return err
+ }
+ if _, err := fmt.Fprintf(stdout, "private_key=%s public_key=%s\n",
+ blankOrValue(safeCfg.PrivateKey),
+ blankOrValue(safeCfg.PublicKey),
+ ); err != nil {
+ return err
+ }
+ if _, err := fmt.Fprintf(stdout, "discovery_roots=%s\n", formatList(safeCfg.DiscoveryRoots)); err != nil {
+ return err
+ }
+ for _, entry := range connector.EffectiveSharedInstances(safeCfg) {
+ if _, err := fmt.Fprintln(stdout, formatInstanceLine(entry)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func newFlagSet(name string, stderr io.Writer) *flag.FlagSet {
+ fs := flag.NewFlagSet(name, flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ return fs
+}
+
+func countInstances(entries []connector.SharedInstanceConfig, share bool) int {
+ count := 0
+ for _, entry := range entries {
+ if entry.Share == share {
+ count++
+ }
+ }
+ return count
+}
+
+func blankOrValue(value string) string {
+ if value == "" {
+ return "-"
+ }
+ return value
+}
+
+func formatList(values []string) string {
+ if len(values) == 0 {
+ return "-"
+ }
+ return strings.Join(values, ",")
+}
+
+func formatInstanceLine(entry connector.SharedInstanceConfig) string {
+ state := "unshared"
+ if entry.Share {
+ state = "shared"
+ }
+ return fmt.Sprintf("state=%s instance_id=%s daemon_url=%s manifest_path=%s",
+ state,
+ blankOrValue(entry.InstanceID),
+ blankOrValue(entry.DaemonURL),
+ blankOrValue(entry.ManifestPath),
+ )
+}
+
+func formatDiscoverLine(entry connector.DiscoverEntry) string {
+ return fmt.Sprintf("state=%s instance_id=%s repo_root=%s daemon_url=%s manifest_path=%s",
+ blankOrValue(entry.State),
+ blankOrValue(entry.InstanceID),
+ blankOrValue(entry.RepoRoot),
+ blankOrValue(entry.DaemonURL),
+ blankOrValue(entry.ManifestPath),
+ )
+}
+
+type multiStringFlag []string
+
+func (f *multiStringFlag) String() string {
+ if f == nil || len(*f) == 0 {
+ return ""
+ }
+ return strings.Join(*f, ",")
+}
+
+func (f *multiStringFlag) Set(value string) error {
+ *f = append(*f, value)
+ return nil
+}
+
+func (f *multiStringFlag) Values() []string {
+ if f == nil {
+ return nil
+ }
+ return append([]string(nil), (*f)...)
+}
+
+func writeJSON(stdout io.Writer, value any) error {
+ data, err := json.MarshalIndent(value, "", " ")
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintln(stdout, string(data))
+ return err
+}
diff --git a/cmd/codencer-connectord/main_test.go b/cmd/codencer-connectord/main_test.go
new file mode 100644
index 0000000..59bef41
--- /dev/null
+++ b/cmd/codencer-connectord/main_test.go
@@ -0,0 +1,250 @@
+package main
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "agent-bridge/internal/connector"
+ "agent-bridge/internal/domain"
+)
+
+func TestRunShareUnshareListAndConfig(t *testing.T) {
+ configPath := filepath.Join(t.TempDir(), "connector.json")
+ cfg := &connector.Config{
+ RelayURL: "http://relay.invalid",
+ ConnectorID: "connector-1",
+ MachineID: "machine-1",
+ PrivateKey: "secret-key",
+ PublicKey: "public-key",
+ ConfigPath: configPath,
+ Instances: []connector.SharedInstanceConfig{{InstanceID: "inst-known", DaemonURL: "http://127.0.0.1:8085", Share: true}},
+ DiscoveryRoots: []string{"/repos"},
+ }
+ if err := connector.SaveConfig(configPath, cfg); err != nil {
+ t.Fatal(err)
+ }
+
+ var daemon *httptest.Server
+ daemon = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/api/v1/instance" {
+ http.NotFound(w, r)
+ return
+ }
+ _ = json.NewEncoder(w).Encode(domain.InstanceInfo{
+ ID: "inst-new",
+ BaseURL: daemon.URL,
+ ManifestPath: "/repo/.codencer/instance.json",
+ })
+ }))
+ defer daemon.Close()
+
+ var stdout bytes.Buffer
+ var stderr bytes.Buffer
+
+ if err := run(context.Background(), []string{"share", "--config", configPath, "--daemon-url", daemon.URL}, &stdout, &stderr); err != nil {
+ t.Fatalf("share failed: %v stderr=%s", err, stderr.String())
+ }
+ if !strings.Contains(stdout.String(), "state=shared") || !strings.Contains(stdout.String(), "instance_id=inst-new") {
+ t.Fatalf("unexpected share output: %s", stdout.String())
+ }
+
+ stdout.Reset()
+ stderr.Reset()
+ if err := run(context.Background(), []string{"unshare", "--config", configPath, "--instance-id", "inst-new"}, &stdout, &stderr); err != nil {
+ t.Fatalf("unshare failed: %v stderr=%s", err, stderr.String())
+ }
+ if !strings.Contains(stdout.String(), "state=unshared") {
+ t.Fatalf("unexpected unshare output: %s", stdout.String())
+ }
+
+ stdout.Reset()
+ stderr.Reset()
+ if err := run(context.Background(), []string{"list", "--config", configPath}, &stdout, &stderr); err != nil {
+ t.Fatalf("list failed: %v stderr=%s", err, stderr.String())
+ }
+ output := stdout.String()
+ if !strings.Contains(output, "instance_id=inst-known") || !strings.Contains(output, "instance_id=inst-new") {
+ t.Fatalf("expected list output to include both known instances, got %s", output)
+ }
+ if !strings.Contains(output, "state=unshared") {
+ t.Fatalf("expected list output to include unshared entries, got %s", output)
+ }
+
+ stdout.Reset()
+ stderr.Reset()
+ if err := run(context.Background(), []string{"config", "--config", configPath, "--json"}, &stdout, &stderr); err != nil {
+ t.Fatalf("config failed: %v stderr=%s", err, stderr.String())
+ }
+ if strings.Contains(stdout.String(), "secret-key") {
+ t.Fatalf("expected redacted config output, got %s", stdout.String())
+ }
+ if !strings.Contains(stdout.String(), "[redacted]") {
+ t.Fatalf("expected redacted marker in config output, got %s", stdout.String())
+ }
+}
+
+func TestRunShareByInstanceIDRejectsUnresolvableEntryWithoutMutatingConfig(t *testing.T) {
+ configPath := filepath.Join(t.TempDir(), "connector.json")
+ cfg := &connector.Config{
+ RelayURL: "http://relay.invalid",
+ ConnectorID: "connector-1",
+ MachineID: "machine-1",
+ ConfigPath: configPath,
+ }
+ if err := connector.SaveConfig(configPath, cfg); err != nil {
+ t.Fatal(err)
+ }
+
+ var stdout bytes.Buffer
+ var stderr bytes.Buffer
+ err := run(context.Background(), []string{"share", "--config", configPath, "--instance-id", "inst-missing"}, &stdout, &stderr)
+ if err == nil {
+ t.Fatal("expected share to fail for unresolved instance id")
+ }
+ if !strings.Contains(err.Error(), "did not resolve to a local daemon url") {
+ t.Fatalf("unexpected share error: %v", err)
+ }
+
+ savedCfg, loadErr := connector.LoadConfig(configPath)
+ if loadErr != nil {
+ t.Fatal(loadErr)
+ }
+ if len(savedCfg.Instances) != 0 {
+ t.Fatalf("expected failed share to leave config unchanged, got %+v", savedCfg.Instances)
+ }
+}
+
+func TestRunStatusTextStaysInformativeAndStatusJSONPassesThrough(t *testing.T) {
+ configPath := filepath.Join(t.TempDir(), "connector.json")
+ cfg := &connector.Config{
+ RelayURL: "http://relay.invalid",
+ ConnectorID: "connector-1",
+ MachineID: "machine-1",
+ ConfigPath: configPath,
+ Instances: []connector.SharedInstanceConfig{
+ {InstanceID: "inst-shared", Share: true},
+ {InstanceID: "inst-hidden", Share: false},
+ },
+ }
+ if err := connector.SaveConfig(configPath, cfg); err != nil {
+ t.Fatal(err)
+ }
+ store := connector.NewStatusStore(configPath)
+ if err := store.Seed(cfg); err != nil {
+ t.Fatal(err)
+ }
+ if err := store.MarkConnected(cfg, []string{"inst-shared"}, time.Unix(10, 0)); err != nil {
+ t.Fatal(err)
+ }
+
+ var stdout bytes.Buffer
+ var stderr bytes.Buffer
+ if err := run(context.Background(), []string{"status", "--config", configPath}, &stdout, &stderr); err != nil {
+ t.Fatalf("status failed: %v stderr=%s", err, stderr.String())
+ }
+ output := stdout.String()
+ if !strings.Contains(output, "configured_instances=2") || !strings.Contains(output, "unshared_config=1") {
+ t.Fatalf("expected richer status output, got %s", output)
+ }
+ if !strings.Contains(output, "state=unshared instance_id=inst-hidden") {
+ t.Fatalf("expected status output to include configured unshared instance, got %s", output)
+ }
+
+ stdout.Reset()
+ stderr.Reset()
+ if err := run(context.Background(), []string{"status", "--config", configPath, "--json"}, &stdout, &stderr); err != nil {
+ t.Fatalf("status --json failed: %v stderr=%s", err, stderr.String())
+ }
+ if !strings.Contains(stdout.String(), "\"session_state\": \"connected\"") {
+ t.Fatalf("expected raw status json output, got %s", stdout.String())
+ }
+ if strings.Contains(stdout.String(), "inst-hidden") {
+ t.Fatalf("expected status --json to remain raw status file output, got %s", stdout.String())
+ }
+}
+
+func TestRunDiscoverUsesConfigRootsAndOverridesWithoutMutatingShareState(t *testing.T) {
+ configRoot := t.TempDir()
+ overrideRoot := t.TempDir()
+ configPath := filepath.Join(t.TempDir(), "connector.json")
+
+ writeManifest := func(root, repo, id, daemonURL string) string {
+ t.Helper()
+ manifestPath := filepath.Join(root, repo, ".codencer", "instance.json")
+ if err := os.MkdirAll(filepath.Dir(manifestPath), 0755); err != nil {
+ t.Fatal(err)
+ }
+ data, err := json.Marshal(domain.InstanceInfo{
+ ID: id,
+ RepoRoot: filepath.Join(root, repo),
+ ManifestPath: manifestPath,
+ BaseURL: daemonURL,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := os.WriteFile(manifestPath, data, 0644); err != nil {
+ t.Fatal(err)
+ }
+ return manifestPath
+ }
+
+ sharedManifest := writeManifest(configRoot, "repo-shared", "inst-shared", "http://127.0.0.1:8085")
+ discoveredOnlyManifest := writeManifest(overrideRoot, "repo-discovered", "inst-discovered", "http://127.0.0.1:8086")
+
+ cfg := &connector.Config{
+ RelayURL: "http://relay.invalid",
+ ConnectorID: "connector-1",
+ MachineID: "machine-1",
+ ConfigPath: configPath,
+ DiscoveryRoots: []string{configRoot},
+ Instances: []connector.SharedInstanceConfig{
+ {InstanceID: "inst-shared", Share: true},
+ {InstanceID: "inst-hidden", ManifestPath: filepath.Join(configRoot, "repo-hidden", ".codencer", "instance.json"), Share: false},
+ },
+ }
+ if err := connector.SaveConfig(configPath, cfg); err != nil {
+ t.Fatal(err)
+ }
+
+ var stdout bytes.Buffer
+ var stderr bytes.Buffer
+ if err := run(context.Background(), []string{"discover", "--config", configPath, "--root", overrideRoot}, &stdout, &stderr); err != nil {
+ t.Fatalf("discover failed: %v stderr=%s", err, stderr.String())
+ }
+ output := stdout.String()
+ if !strings.Contains(output, "state=shared instance_id=inst-shared") {
+ t.Fatalf("expected shared discovered output, got %s", output)
+ }
+ if !strings.Contains(output, "state=known_unshared instance_id=inst-hidden") {
+ t.Fatalf("expected known_unshared output, got %s", output)
+ }
+ if !strings.Contains(output, "state=discovered_only instance_id=inst-discovered") {
+ t.Fatalf("expected discovered_only output, got %s", output)
+ }
+ if !strings.Contains(output, "repo_root="+filepath.Join(configRoot, "repo-shared")) {
+ t.Fatalf("expected repo_root for shared instance, got %s", output)
+ }
+ if !strings.Contains(output, "manifest_path="+sharedManifest) || !strings.Contains(output, "manifest_path="+discoveredOnlyManifest) {
+ t.Fatalf("expected manifest paths in discover output, got %s", output)
+ }
+
+ savedCfg, err := connector.LoadConfig(configPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(savedCfg.Instances) != 2 {
+ t.Fatalf("expected discover to avoid mutating config, got %+v", savedCfg.Instances)
+ }
+ if savedCfg.Instances[0].Share != true || savedCfg.Instances[1].Share != false {
+ t.Fatalf("expected discover to preserve share state, got %+v", savedCfg.Instances)
+ }
+}
diff --git a/cmd/codencer-relayd/cli.go b/cmd/codencer-relayd/cli.go
new file mode 100644
index 0000000..da3bc29
--- /dev/null
+++ b/cmd/codencer-relayd/cli.go
@@ -0,0 +1,428 @@
+package main
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "os/signal"
+ "strings"
+ "syscall"
+
+ "agent-bridge/internal/relay"
+)
+
+func run(args []string) error {
+ if len(args) == 0 || strings.HasPrefix(args[0], "-") {
+ return runServe(args)
+ }
+
+ switch args[0] {
+ case "serve":
+ return runServe(args[1:])
+ case "status":
+ return runStatus(args[1:])
+ case "connectors":
+ return runConnectors(args[1:])
+ case "instances":
+ return runInstances(args[1:])
+ case "audit":
+ return runAudit(args[1:])
+ case "enrollment-token":
+ return runEnrollmentToken(args[1:])
+ case "planner-token":
+ return runPlannerToken(args[1:])
+ case "connector":
+ return runConnectorAdmin(args[1:])
+ default:
+ return fmt.Errorf("unknown relay command %q", args[0])
+ }
+}
+
+func runServe(args []string) error {
+ fs := flag.NewFlagSet("serve", flag.ContinueOnError)
+ configPath := fs.String("config", "", "Relay config path")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+
+ cfg, err := relay.LoadConfig(*configPath)
+ if err != nil {
+ return err
+ }
+ store, err := relay.OpenStore(cfg.DBPath)
+ if err != nil {
+ return err
+ }
+ defer store.Close()
+
+ server := relay.NewServer(cfg, store)
+ ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
+ defer cancel()
+ err = server.Start(ctx)
+ if err != nil && !errors.Is(err, context.Canceled) {
+ return err
+ }
+ return nil
+}
+
+type multiFlag []string
+
+func (m *multiFlag) String() string {
+ return strings.Join(*m, ",")
+}
+
+func (m *multiFlag) Set(value string) error {
+ value = strings.TrimSpace(value)
+ if value == "" {
+ return nil
+ }
+ *m = append(*m, value)
+ return nil
+}
+
+type adminTarget struct {
+ relayURL string
+ token string
+ asJSON bool
+}
+
+func runStatus(args []string) error {
+ target, err := parseAdminTarget("status", args)
+ if err != nil {
+ return err
+ }
+ return target.get("/api/v2/status")
+}
+
+func runConnectors(args []string) error {
+ target, err := parseAdminTarget("connectors", args)
+ if err != nil {
+ return err
+ }
+ return target.get("/api/v2/connectors")
+}
+
+func runInstances(args []string) error {
+ target, err := parseAdminTarget("instances", args)
+ if err != nil {
+ return err
+ }
+ return target.get("/api/v2/instances")
+}
+
+func runAudit(args []string) error {
+ fs := flag.NewFlagSet("audit", flag.ContinueOnError)
+ configPath := fs.String("config", "", "Relay config path")
+ relayURL := fs.String("relay-url", "", "Relay base URL")
+ token := fs.String("token", "", "Planner bearer token")
+ plannerName := fs.String("planner-name", "", "Planner token name from config")
+ asJSON := fs.Bool("json", false, "Print JSON response")
+ limit := fs.Int("limit", 100, "Maximum number of audit events to return")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+
+ target, err := resolveAdminTarget(*configPath, *relayURL, *token, *plannerName, *asJSON)
+ if err != nil {
+ return err
+ }
+ return target.get(fmt.Sprintf("/api/v2/audit?limit=%d", *limit))
+}
+
+func runEnrollmentToken(args []string) error {
+ if len(args) == 0 || args[0] != "create" {
+ return fmt.Errorf("usage: codencer-relayd enrollment-token create [flags]")
+ }
+ fs := flag.NewFlagSet("enrollment-token create", flag.ContinueOnError)
+ configPath := fs.String("config", "", "Relay config path")
+ relayURL := fs.String("relay-url", "", "Relay base URL")
+ token := fs.String("token", "", "Planner bearer token")
+ plannerName := fs.String("planner-name", "", "Planner token name from config")
+ label := fs.String("label", "local-dev", "Enrollment token label")
+ expiresInSeconds := fs.Int("expires-in-seconds", 600, "Enrollment token lifetime in seconds")
+ asJSON := fs.Bool("json", false, "Print JSON response")
+ if err := fs.Parse(args[1:]); err != nil {
+ return err
+ }
+
+ target, err := resolveAdminTarget(*configPath, *relayURL, *token, *plannerName, *asJSON)
+ if err != nil {
+ return err
+ }
+ body, err := json.Marshal(map[string]any{
+ "label": *label,
+ "expires_in_seconds": *expiresInSeconds,
+ })
+ if err != nil {
+ return err
+ }
+ return target.post("/api/v2/connectors/enrollment-tokens", body)
+}
+
+func runPlannerToken(args []string) error {
+ if len(args) == 0 || args[0] != "create" {
+ return fmt.Errorf("usage: codencer-relayd planner-token create [flags]")
+ }
+ fs := flag.NewFlagSet("planner-token create", flag.ContinueOnError)
+ configPath := fs.String("config", "", "Relay config path for optional write-back")
+ name := fs.String("name", "operator", "Planner token name")
+ writeConfig := fs.Bool("write-config", false, "Write the generated token into the relay config file")
+ asJSON := fs.Bool("json", false, "Print JSON output")
+ entropyBytes := fs.Int("entropy-bytes", 32, "Random entropy bytes before base64url encoding")
+ var scopes multiFlag
+ var instanceIDs multiFlag
+ fs.Var(&scopes, "scope", "Planner scope; repeat to add more")
+ fs.Var(&instanceIDs, "instance", "Planner-scoped instance ID; repeat to add more")
+ if err := fs.Parse(args[1:]); err != nil {
+ return err
+ }
+
+ if *entropyBytes < 16 {
+ return fmt.Errorf("entropy-bytes must be at least 16")
+ }
+ if len(scopes) == 0 {
+ scopes = append(scopes, "*")
+ }
+ token, err := randomBearerToken(*entropyBytes)
+ if err != nil {
+ return err
+ }
+
+ entry := relay.PlannerTokenConfig{
+ Name: *name,
+ Token: token,
+ Scopes: append([]string(nil), scopes...),
+ InstanceIDs: append([]string(nil), instanceIDs...),
+ }
+
+ if *writeConfig {
+ if *configPath == "" {
+ return fmt.Errorf("--config is required with --write-config")
+ }
+ cfg, err := loadRawRelayConfig(*configPath)
+ if err != nil {
+ return err
+ }
+ if cfg.PlannerToken != "" {
+ cfg.PlannerTokens = append([]relay.PlannerTokenConfig{{
+ Name: "default",
+ Token: cfg.PlannerToken,
+ Scopes: []string{"*"},
+ }}, cfg.PlannerTokens...)
+ cfg.PlannerToken = ""
+ }
+ cfg.PlannerTokens = upsertPlannerToken(cfg.PlannerTokens, entry)
+ if err := relay.SaveConfig(*configPath, cfg); err != nil {
+ return err
+ }
+ }
+
+ output := map[string]any{
+ "name": entry.Name,
+ "token": entry.Token,
+ "scopes": entry.Scopes,
+ "instance_ids": entry.InstanceIDs,
+ "config_entry": entry,
+ "config_path": *configPath,
+ "write_config": *writeConfig,
+ "restart_required": true,
+ }
+ return printOutput(output, *asJSON)
+}
+
+func runConnectorAdmin(args []string) error {
+ if len(args) < 2 {
+ return fmt.Errorf("usage: codencer-relayd connector [flags]")
+ }
+ action := args[0]
+ if action != "enable" && action != "disable" {
+ return fmt.Errorf("unknown connector action %q", action)
+ }
+ connectorID := args[1]
+
+ fs := flag.NewFlagSet("connector "+action, flag.ContinueOnError)
+ configPath := fs.String("config", "", "Relay config path")
+ relayURL := fs.String("relay-url", "", "Relay base URL")
+ token := fs.String("token", "", "Planner bearer token")
+ plannerName := fs.String("planner-name", "", "Planner token name from config")
+ asJSON := fs.Bool("json", false, "Print JSON response")
+ if err := fs.Parse(args[2:]); err != nil {
+ return err
+ }
+
+ target, err := resolveAdminTarget(*configPath, *relayURL, *token, *plannerName, *asJSON)
+ if err != nil {
+ return err
+ }
+ return target.post(fmt.Sprintf("/api/v2/connectors/%s/%s", connectorID, action), nil)
+}
+
+func parseAdminTarget(command string, args []string) (*adminTarget, error) {
+ fs := flag.NewFlagSet(command, flag.ContinueOnError)
+ configPath := fs.String("config", "", "Relay config path")
+ relayURL := fs.String("relay-url", "", "Relay base URL")
+ token := fs.String("token", "", "Planner bearer token")
+ plannerName := fs.String("planner-name", "", "Planner token name from config")
+ asJSON := fs.Bool("json", false, "Print JSON response")
+ if err := fs.Parse(args); err != nil {
+ return nil, err
+ }
+ return resolveAdminTarget(*configPath, *relayURL, *token, *plannerName, *asJSON)
+}
+
+func resolveAdminTarget(configPath, relayURL, token, plannerName string, asJSON bool) (*adminTarget, error) {
+ target := &adminTarget{relayURL: strings.TrimRight(relayURL, "/"), token: token, asJSON: asJSON}
+ if target.relayURL != "" && target.token != "" {
+ return target, nil
+ }
+
+ cfg, err := relay.LoadConfig(configPath)
+ if err != nil {
+ return nil, err
+ }
+ if target.relayURL == "" {
+ target.relayURL = fmt.Sprintf("http://%s:%d", cfg.Host, cfg.Port)
+ }
+ if target.token == "" {
+ target.token = plannerTokenFromConfig(cfg, plannerName)
+ }
+ if target.token == "" {
+ return nil, fmt.Errorf("planner bearer token is required; provide --token or configure planner_token(s)")
+ }
+ return target, nil
+}
+
+func (t *adminTarget) get(path string) error {
+ req, err := http.NewRequest(http.MethodGet, t.relayURL+path, nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Authorization", "Bearer "+t.token)
+ return t.do(req)
+}
+
+func (t *adminTarget) post(path string, body []byte) error {
+ req, err := http.NewRequest(http.MethodPost, t.relayURL+path, bytes.NewReader(body))
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Authorization", "Bearer "+t.token)
+ if len(body) > 0 {
+ req.Header.Set("Content-Type", "application/json")
+ }
+ return t.do(req)
+}
+
+func (t *adminTarget) do(req *http.Request) error {
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode >= 400 {
+ return fmt.Errorf("%s %s failed (%d): %s", req.Method, req.URL.String(), resp.StatusCode, strings.TrimSpace(string(body)))
+ }
+
+ if t.asJSON {
+ fmt.Println(string(body))
+ return nil
+ }
+
+ var payload any
+ if err := json.Unmarshal(body, &payload); err == nil {
+ pretty, err := json.MarshalIndent(payload, "", " ")
+ if err == nil {
+ fmt.Println(string(pretty))
+ return nil
+ }
+ }
+ fmt.Println(string(body))
+ return nil
+}
+
+func plannerTokenFromConfig(cfg *relay.Config, name string) string {
+ if cfg == nil {
+ return ""
+ }
+ if cfg.PlannerToken != "" {
+ return cfg.PlannerToken
+ }
+ if len(cfg.PlannerTokens) == 0 {
+ return ""
+ }
+ if name == "" {
+ return cfg.PlannerTokens[0].Token
+ }
+ for _, candidate := range cfg.PlannerTokens {
+ if candidate.Name == name {
+ return candidate.Token
+ }
+ }
+ return ""
+}
+
+func randomBearerToken(entropyBytes int) (string, error) {
+ buf := make([]byte, entropyBytes)
+ if _, err := rand.Read(buf); err != nil {
+ return "", err
+ }
+ return base64.RawURLEncoding.EncodeToString(buf), nil
+}
+
+func upsertPlannerToken(values []relay.PlannerTokenConfig, next relay.PlannerTokenConfig) []relay.PlannerTokenConfig {
+ for i := range values {
+ if values[i].Name == next.Name {
+ values[i] = next
+ return values
+ }
+ }
+ return append(values, next)
+}
+
+func loadRawRelayConfig(path string) (*relay.Config, error) {
+ cfg := relay.DefaultConfig()
+ if path == "" {
+ return cfg, nil
+ }
+ data, err := os.ReadFile(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return cfg, nil
+ }
+ return nil, err
+ }
+ if err := json.Unmarshal(data, cfg); err != nil {
+ return nil, err
+ }
+ return cfg, nil
+}
+
+func printOutput(payload any, asJSON bool) error {
+ if asJSON {
+ data, err := json.Marshal(payload)
+ if err != nil {
+ return err
+ }
+ fmt.Println(string(data))
+ return nil
+ }
+ data, err := json.MarshalIndent(payload, "", " ")
+ if err != nil {
+ return err
+ }
+ fmt.Println(string(data))
+ return nil
+}
diff --git a/cmd/codencer-relayd/cli_test.go b/cmd/codencer-relayd/cli_test.go
new file mode 100644
index 0000000..8cc60ac
--- /dev/null
+++ b/cmd/codencer-relayd/cli_test.go
@@ -0,0 +1,59 @@
+package main
+
+import (
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "strings"
+ "testing"
+)
+
+func TestRunAuditUsesLimitQuery(t *testing.T) {
+ var gotAuth string
+ var gotQuery string
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ gotAuth = r.Header.Get("Authorization")
+ gotQuery = r.URL.RawQuery
+ _, _ = w.Write([]byte(`[{"action":"abort_run"},{"action":"disable_connector"}]`))
+ }))
+ defer server.Close()
+
+ output := captureStdout(t, func() {
+ if err := run([]string{"audit", "--relay-url", server.URL, "--token", "planner-token", "--limit", "2"}); err != nil {
+ t.Fatalf("run audit: %v", err)
+ }
+ })
+
+ if gotAuth != "Bearer planner-token" {
+ t.Fatalf("expected bearer auth header, got %q", gotAuth)
+ }
+ if gotQuery != "limit=2" {
+ t.Fatalf("expected limit query, got %q", gotQuery)
+ }
+ if !strings.Contains(output, `"action": "abort_run"`) {
+ t.Fatalf("expected pretty audit output, got %s", output)
+ }
+}
+
+func captureStdout(t *testing.T, fn func()) string {
+ t.Helper()
+ original := os.Stdout
+ reader, writer, err := os.Pipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ os.Stdout = writer
+ defer func() { os.Stdout = original }()
+
+ fn()
+
+ if err := writer.Close(); err != nil {
+ t.Fatal(err)
+ }
+ data, err := io.ReadAll(reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return string(data)
+}
diff --git a/cmd/codencer-relayd/main.go b/cmd/codencer-relayd/main.go
new file mode 100644
index 0000000..50a6c06
--- /dev/null
+++ b/cmd/codencer-relayd/main.go
@@ -0,0 +1,12 @@
+package main
+
+import (
+ "log"
+ "os"
+)
+
+func main() {
+ if err := run(os.Args[1:]); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/cmd/mcp-sdk-smoke/main.go b/cmd/mcp-sdk-smoke/main.go
new file mode 100644
index 0000000..d47a429
--- /dev/null
+++ b/cmd/mcp-sdk-smoke/main.go
@@ -0,0 +1,330 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/modelcontextprotocol/go-sdk/mcp"
+)
+
+type authRoundTripper struct {
+ base http.RoundTripper
+ authorization string
+ origin string
+}
+
+func (rt authRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ base := rt.base
+ if base == nil {
+ base = http.DefaultTransport
+ }
+ cloned := req.Clone(req.Context())
+ cloned.Header = req.Header.Clone()
+ if rt.authorization != "" {
+ cloned.Header.Set("Authorization", rt.authorization)
+ }
+ if rt.origin != "" {
+ cloned.Header.Set("Origin", rt.origin)
+ }
+ return base.RoundTrip(cloned)
+}
+
+type instanceRecord struct {
+ InstanceID string `json:"instance_id"`
+}
+
+type stepRecord struct {
+ ID string `json:"id"`
+ State string `json:"state"`
+}
+
+type waitRecord struct {
+ StepID string `json:"step_id"`
+ State string `json:"state"`
+ Terminal bool `json:"terminal"`
+ TimedOut bool `json:"timed_out"`
+}
+
+type smokeOutput struct {
+ SessionID string `json:"session_id"`
+ ProtocolVersion string `json:"protocol_version"`
+ InstanceID string `json:"instance_id"`
+ RunID string `json:"run_id"`
+ StepID string `json:"step_id"`
+ StepState string `json:"step_state"`
+ ToolNames []string
+ Result any `json:"result,omitempty"`
+ Validations any `json:"validations,omitempty"`
+ Logs any `json:"logs,omitempty"`
+ RunGates any `json:"run_gates,omitempty"`
+ Artifacts any `json:"artifacts,omitempty"`
+ ArtifactContent any `json:"artifact_content,omitempty"`
+}
+
+func main() {
+ if err := run(context.Background(), os.Args[1:], os.Stdout, os.Stderr); err != nil {
+ fmt.Fprintf(os.Stderr, "mcp-sdk-smoke: %v\n", err)
+ os.Exit(1)
+ }
+}
+
+func run(ctx context.Context, args []string, stdout, stderr io.Writer) error {
+ fs := flag.NewFlagSet("mcp-sdk-smoke", flag.ContinueOnError)
+ fs.SetOutput(stderr)
+ endpoint := fs.String("endpoint", "http://127.0.0.1:8090/mcp", "MCP endpoint")
+ token := fs.String("token", "", "Bearer token for the target MCP surface")
+ origin := fs.String("origin", "", "Optional Origin header for browser-style MCP requests")
+ instanceID := fs.String("instance-id", "", "Target instance id; defaults to the first shared instance")
+ runID := fs.String("run-id", fmt.Sprintf("sdk-smoke-%d", time.Now().Unix()), "Run id to create")
+ projectID := fs.String("project-id", "sdk-smoke-project", "Project id for the run")
+ goal := fs.String("goal", "Verify official Go SDK interoperability", "Task goal for submit_task")
+ adapterProfile := fs.String("adapter-profile", "", "Optional adapter profile for submit_task")
+ validationCommand := fs.String("validation-command", "go build ./...", "Optional validation command to attach; set empty to disable")
+ waitTimeoutMS := fs.Int("wait-timeout-ms", 5000, "wait_step timeout in milliseconds")
+ waitIntervalMS := fs.Int("wait-interval-ms", 50, "wait_step poll interval in milliseconds")
+ jsonOutput := fs.Bool("json", true, "Print JSON output")
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+ if strings.TrimSpace(*token) == "" {
+ return errors.New("--token is required")
+ }
+
+ httpClient := &http.Client{
+ Transport: authRoundTripper{
+ authorization: "Bearer " + strings.TrimSpace(*token),
+ origin: strings.TrimSpace(*origin),
+ },
+ }
+ client := mcp.NewClient(&mcp.Implementation{
+ Name: "codencer-mcp-sdk-smoke",
+ Version: "1.0.0",
+ }, nil)
+ session, err := client.Connect(ctx, &mcp.StreamableClientTransport{
+ Endpoint: *endpoint,
+ HTTPClient: httpClient,
+ }, nil)
+ if err != nil {
+ return fmt.Errorf("connect to MCP endpoint: %w", err)
+ }
+ defer func() { _ = session.Close() }()
+
+ tools, err := session.ListTools(ctx, nil)
+ if err != nil {
+ return fmt.Errorf("list tools: %w", err)
+ }
+ toolNames := make([]string, 0, len(tools.Tools))
+ for _, tool := range tools.Tools {
+ toolNames = append(toolNames, tool.Name)
+ }
+
+ if strings.TrimSpace(*instanceID) == "" {
+ result, err := callTool(ctx, session, "codencer.list_instances", map[string]any{})
+ if err != nil {
+ return err
+ }
+ var instances []instanceRecord
+ if err := decodeStructured(result.StructuredContent, &instances); err != nil {
+ return fmt.Errorf("decode list_instances: %w", err)
+ }
+ if len(instances) == 0 || strings.TrimSpace(instances[0].InstanceID) == "" {
+ return errors.New("no shared instances were returned by the MCP surface")
+ }
+ *instanceID = instances[0].InstanceID
+ }
+
+ if _, err := callTool(ctx, session, "codencer.start_run", map[string]any{
+ "instance_id": *instanceID,
+ "payload": map[string]any{
+ "id": *runID,
+ "project_id": *projectID,
+ },
+ }); err != nil {
+ return err
+ }
+
+ task := map[string]any{
+ "version": "v1",
+ "goal": *goal,
+ }
+ if strings.TrimSpace(*adapterProfile) != "" {
+ task["adapter_profile"] = strings.TrimSpace(*adapterProfile)
+ }
+ if strings.TrimSpace(*validationCommand) != "" {
+ task["validations"] = []map[string]any{{
+ "name": "bridge-build",
+ "command": strings.TrimSpace(*validationCommand),
+ }}
+ }
+ submitted, err := callTool(ctx, session, "codencer.submit_task", map[string]any{
+ "instance_id": *instanceID,
+ "run_id": *runID,
+ "task": task,
+ })
+ if err != nil {
+ return err
+ }
+ var step stepRecord
+ if err := decodeStructured(submitted.StructuredContent, &step); err != nil {
+ return fmt.Errorf("decode submit_task response: %w", err)
+ }
+ if strings.TrimSpace(step.ID) == "" {
+ return errors.New("submit_task did not return a step id")
+ }
+
+ waited, err := callTool(ctx, session, "codencer.wait_step", map[string]any{
+ "instance_id": *instanceID,
+ "step_id": step.ID,
+ "timeout_ms": *waitTimeoutMS,
+ "interval_ms": *waitIntervalMS,
+ })
+ if err != nil {
+ return err
+ }
+ var waitInfo waitRecord
+ if err := decodeStructured(waited.StructuredContent, &waitInfo); err != nil {
+ return fmt.Errorf("decode wait_step response: %w", err)
+ }
+ if !waitInfo.Terminal {
+ return fmt.Errorf("wait_step did not reach a terminal state: %+v", waitInfo)
+ }
+
+ result, err := callTool(ctx, session, "codencer.get_step_result", map[string]any{
+ "instance_id": *instanceID,
+ "step_id": step.ID,
+ })
+ if err != nil {
+ return err
+ }
+ validations, err := callTool(ctx, session, "codencer.get_step_validations", map[string]any{
+ "instance_id": *instanceID,
+ "step_id": step.ID,
+ })
+ if err != nil {
+ return err
+ }
+ logs, err := callTool(ctx, session, "codencer.get_step_logs", map[string]any{
+ "instance_id": *instanceID,
+ "step_id": step.ID,
+ })
+ runGates, err := callTool(ctx, session, "codencer.list_run_gates", map[string]any{
+ "instance_id": *instanceID,
+ "run_id": *runID,
+ })
+ if err != nil {
+ return err
+ }
+ artifacts, err := callTool(ctx, session, "codencer.list_step_artifacts", map[string]any{
+ "instance_id": *instanceID,
+ "step_id": step.ID,
+ })
+ if err != nil {
+ return err
+ }
+
+ output := smokeOutput{
+ SessionID: session.ID(),
+ ProtocolVersion: session.InitializeResult().ProtocolVersion,
+ InstanceID: *instanceID,
+ RunID: *runID,
+ StepID: step.ID,
+ StepState: waitInfo.State,
+ ToolNames: toolNames,
+ Result: result.StructuredContent,
+ Validations: validations.StructuredContent,
+ Logs: toolContentOrSkip(logs, err),
+ RunGates: runGates.StructuredContent,
+ Artifacts: artifacts.StructuredContent,
+ }
+
+ var artifactList []map[string]any
+ if err := decodeStructured(artifacts.StructuredContent, &artifactList); err == nil && len(artifactList) > 0 {
+ if artifactID, _ := artifactList[0]["id"].(string); artifactID != "" {
+ artifactContent, err := callTool(ctx, session, "codencer.get_artifact_content", map[string]any{
+ "artifact_id": artifactID,
+ })
+ output.ArtifactContent = toolContentOrSkip(artifactContent, err)
+ }
+ }
+
+ if *jsonOutput {
+ return writeJSON(stdout, output)
+ }
+ _, err = fmt.Fprintf(stdout, "session_id=%s protocol=%s instance_id=%s run_id=%s step_id=%s step_state=%s\n",
+ output.SessionID,
+ output.ProtocolVersion,
+ output.InstanceID,
+ output.RunID,
+ output.StepID,
+ output.StepState,
+ )
+ return err
+}
+
+func callTool(ctx context.Context, session *mcp.ClientSession, name string, args any) (*mcp.CallToolResult, error) {
+ result, err := session.CallTool(ctx, &mcp.CallToolParams{
+ Name: name,
+ Arguments: args,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("%s failed: %w", name, err)
+ }
+ if result.IsError {
+ return nil, fmt.Errorf("%s failed: %s", name, resultErrorText(result))
+ }
+ return result, nil
+}
+
+func resultErrorText(result *mcp.CallToolResult) string {
+ if result == nil {
+ return "tool error"
+ }
+ var parts []string
+ for _, content := range result.Content {
+ if text, ok := content.(*mcp.TextContent); ok && strings.TrimSpace(text.Text) != "" {
+ parts = append(parts, strings.TrimSpace(text.Text))
+ }
+ }
+ if len(parts) == 0 {
+ return "tool returned isError=true"
+ }
+ return strings.Join(parts, "; ")
+}
+
+func decodeStructured(value any, out any) error {
+ data, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+ return json.Unmarshal(data, out)
+}
+
+func writeJSON(stdout io.Writer, value any) error {
+ data, err := json.MarshalIndent(value, "", " ")
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintln(stdout, string(data))
+ return err
+}
+
+func toolContentOrSkip(result *mcp.CallToolResult, err error) any {
+ if err != nil {
+ return map[string]any{
+ "skipped": true,
+ "reason": err.Error(),
+ }
+ }
+ if result == nil {
+ return nil
+ }
+ return result.StructuredContent
+}
diff --git a/cmd/orchestratorctl/main.go b/cmd/orchestratorctl/main.go
index e022d18..e25eb59 100644
--- a/cmd/orchestratorctl/main.go
+++ b/cmd/orchestratorctl/main.go
@@ -86,6 +86,7 @@ func printUsage() {
fmt.Println(" direct submissions persist original-input.* and normalized-task.json as attempt evidence")
fmt.Println(" step list [--json] List all task handles in a mission")
fmt.Println(" step state [--json] Check a specific UUID state")
+ fmt.Println(" step retry [--wait] [--json] Re-dispatch an existing UUID")
fmt.Println(" step wait [--interval d] [--timeout d] [--json] Poll a specific UUID until completion")
fmt.Println("\n3. Evidence & Inspection (The Truth):")
@@ -421,7 +422,7 @@ func runWait(runID string, interval, timeout time.Duration, asJSON bool) int {
func handleStepCommand(args []string) {
if len(args) < 1 {
- fmt.Println("Usage: orchestratorctl step [args]")
+ fmt.Println("Usage: orchestratorctl step [args]")
os.Exit(exitCodeUsage)
}
@@ -453,6 +454,12 @@ func handleStepCommand(args []string) {
os.Exit(exitCodeUsage)
}
stepState(subArgs[1], hasFlag(subArgs[2:], "--json"))
+ case "retry":
+ if len(subArgs) < 2 {
+ fmt.Println("Usage: orchestratorctl step retry [--wait] [--json]")
+ os.Exit(exitCodeUsage)
+ }
+ stepRetry(subArgs[1], hasFlag(subArgs[2:], "--wait"), hasFlag(subArgs[2:], "--json"))
case "result":
if len(subArgs) < 2 {
fmt.Println("Usage: orchestratorctl step result [--json]")
@@ -816,6 +823,37 @@ func stepValidations(stepID string, asJSON bool) {
}
}
+func stepRetry(stepID string, shouldWait, asJSON bool) {
+ req, _ := http.NewRequest(http.MethodPost, orchestratordURL+"/api/v1/steps/"+stepID+"/retry", nil)
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ failCLI(asJSON, exitCodeInfrastructure, "connecting to orchestratord", err.Error())
+ }
+ defer resp.Body.Close()
+
+ body, _ := io.ReadAll(resp.Body)
+ if resp.StatusCode >= 400 {
+ failHTTP(asJSON, resp.StatusCode, body)
+ }
+
+ if shouldWait {
+ fmt.Fprintf(os.Stderr, "==> Auto-waiting for retried Step %s...\n", stepID)
+ os.Exit(stepWait(stepID, 2*time.Second, 0, asJSON))
+ }
+
+ payload := map[string]any{
+ "step_id": stepID,
+ "action": "retry",
+ "status": "accepted",
+ }
+ if asJSON {
+ emitJSONDocument(payload)
+ return
+ }
+ fmt.Printf("Retry accepted for step %s\n", stepID)
+ fmt.Printf("[GUIDE] To monitor transition:\n ./bin/orchestratorctl step wait %s\n", stepID)
+}
+
func stepWait(stepID string, interval, timeout time.Duration, asJSON bool) int {
ticker := time.NewTicker(interval)
defer ticker.Stop()
@@ -840,27 +878,12 @@ func stepWait(stepID string, interval, timeout time.Duration, asJSON bool) int {
}
return exitCodeTimeout
default:
- resp, err := http.Get(orchestratordURL + "/api/v1/steps/" + stepID + "/result")
- if err != nil {
- failCLI(asJSON, exitCodeInfrastructure, "connecting to orchestratord", err.Error())
- }
-
- if resp.StatusCode >= 400 {
- body, _ := io.ReadAll(resp.Body)
- resp.Body.Close()
- failHTTP(asJSON, resp.StatusCode, body)
- }
-
- body, _ := io.ReadAll(resp.Body)
- resp.Body.Close()
+ step, _ := fetchStep(stepID, asJSON)
- var result domain.ResultSpec
- if err := json.Unmarshal(body, &result); err != nil {
- failCLI(asJSON, exitCodeInfrastructure, "parsing response", err.Error())
- }
-
- // Check for terminal or intervention-required states
- if result.State.IsTerminal() || result.State == domain.StepStateNeedsApproval || result.State == domain.StepStateNeedsManualAttention {
+ // Wait on the persisted step lifecycle, not an early attempt result snapshot.
+ // This keeps submit --wait from racing the local workspace unlock/finalization path.
+ if step.State.IsTerminal() || step.State == domain.StepStateNeedsApproval || step.State == domain.StepStateNeedsManualAttention {
+ result, body := fetchStepResult(stepID, asJSON)
fmt.Fprintf(os.Stderr, "\n[BRIDGE] Mission Handle %s reached terminal condition: %s\n", stepID, result.State)
switch result.State {
@@ -906,6 +929,44 @@ func stepWait(stepID string, interval, timeout time.Duration, asJSON bool) int {
}
}
+func fetchStep(stepID string, asJSON bool) (domain.Step, []byte) {
+ resp, err := http.Get(orchestratordURL + "/api/v1/steps/" + stepID)
+ if err != nil {
+ failCLI(asJSON, exitCodeInfrastructure, "connecting to orchestratord", err.Error())
+ }
+ defer resp.Body.Close()
+
+ body, _ := io.ReadAll(resp.Body)
+ if resp.StatusCode >= 400 {
+ failHTTP(asJSON, resp.StatusCode, body)
+ }
+
+ var step domain.Step
+ if err := json.Unmarshal(body, &step); err != nil {
+ failCLI(asJSON, exitCodeInfrastructure, "parsing response", err.Error())
+ }
+ return step, body
+}
+
+func fetchStepResult(stepID string, asJSON bool) (domain.ResultSpec, []byte) {
+ resp, err := http.Get(orchestratordURL + "/api/v1/steps/" + stepID + "/result")
+ if err != nil {
+ failCLI(asJSON, exitCodeInfrastructure, "connecting to orchestratord", err.Error())
+ }
+ defer resp.Body.Close()
+
+ body, _ := io.ReadAll(resp.Body)
+ if resp.StatusCode >= 400 {
+ failHTTP(asJSON, resp.StatusCode, body)
+ }
+
+ var result domain.ResultSpec
+ if err := json.Unmarshal(body, &result); err != nil {
+ failCLI(asJSON, exitCodeInfrastructure, "parsing response", err.Error())
+ }
+ return result, body
+}
+
func runDoctor() {
fmt.Println("==> Verifying local environment...")
@@ -1235,6 +1296,7 @@ func handleInstanceCommand(args []string) {
}
fmt.Printf("--- Codencer Instance Identity ---\n")
+ fmt.Printf("Instance ID: %s\n", info.ID)
fmt.Printf("Version: %s\n", info.Version)
fmt.Printf("Repo Root: %s\n", info.RepoRoot)
fmt.Printf("Base URL: %s\n", info.BaseURL)
diff --git a/cmd/orchestratorctl/main_test.go b/cmd/orchestratorctl/main_test.go
index 06e1f08..befb80b 100644
--- a/cmd/orchestratorctl/main_test.go
+++ b/cmd/orchestratorctl/main_test.go
@@ -48,6 +48,9 @@ func TestSubmitWaitJSONEmitsSingleTerminalPayload(t *testing.T) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusAccepted)
_, _ = w.Write([]byte(`{"id":"step-123","phase_id":"phase-execution-run-123","title":"T","goal":"G","adapter":"codex","created_at":"2026-04-06T00:00:00Z","updated_at":"2026-04-06T00:00:00Z"}`))
+ case r.Method == http.MethodGet && r.URL.Path == "/api/v1/steps/step-123":
+ w.Header().Set("Content-Type", "application/json")
+ _, _ = w.Write([]byte(`{"id":"step-123","phase_id":"phase-execution-run-123","title":"T","goal":"G","adapter":"codex","state":"completed","created_at":"2026-04-06T00:00:00Z","updated_at":"2026-04-06T00:00:00Z"}`))
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/steps/step-123/result":
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{"version":"v1","run_id":"run-123","phase_id":"phase-execution-run-123","step_id":"step-123","state":"completed","summary":"done"}`))
@@ -303,6 +306,9 @@ func TestSubmitGoalWaitJSONEmitsSingleTerminalPayload(t *testing.T) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusAccepted)
_, _ = w.Write([]byte(`{"id":"step-123","phase_id":"phase-execution-run-123","title":"Direct task","goal":"Fix it","created_at":"2026-04-06T00:00:00Z","updated_at":"2026-04-06T00:00:00Z"}`))
+ case r.Method == http.MethodGet && r.URL.Path == "/api/v1/steps/step-123":
+ w.Header().Set("Content-Type", "application/json")
+ _, _ = w.Write([]byte(`{"id":"step-123","phase_id":"phase-execution-run-123","title":"Direct task","goal":"Fix it","state":"completed","created_at":"2026-04-06T00:00:00Z","updated_at":"2026-04-06T00:00:00Z"}`))
case r.Method == http.MethodGet && r.URL.Path == "/api/v1/steps/step-123/result":
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{"version":"v1","run_id":"run-123","phase_id":"phase-execution-run-123","step_id":"step-123","state":"completed","summary":"done"}`))
@@ -362,12 +368,16 @@ func TestStepWaitJSONExitCodes(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.URL.Path != "/api/v1/steps/step-123/result" {
+ switch r.URL.Path {
+ case "/api/v1/steps/step-123":
+ w.Header().Set("Content-Type", "application/json")
+ _, _ = w.Write([]byte(`{"id":"step-123","phase_id":"phase-1","state":"` + tt.state + `","updated_at":"2026-04-06T00:00:00Z"}`))
+ case "/api/v1/steps/step-123/result":
+ w.Header().Set("Content-Type", "application/json")
+ _, _ = w.Write([]byte(`{"run_id":"run-123","phase_id":"phase-1","step_id":"step-123","state":"` + tt.state + `","summary":"state test"}`))
+ default:
http.NotFound(w, r)
- return
}
- w.Header().Set("Content-Type", "application/json")
- _, _ = w.Write([]byte(`{"run_id":"run-123","phase_id":"phase-1","step_id":"step-123","state":"` + tt.state + `","summary":"state test"}`))
}))
defer server.Close()
@@ -383,6 +393,85 @@ func TestStepWaitJSONExitCodes(t *testing.T) {
}
}
+func TestStepWaitWaitsForPersistedStepStateBeforeReturningResult(t *testing.T) {
+ var stepCalls int
+ var resultCalls int
+
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/api/v1/steps/step-123":
+ stepCalls++
+ state := "running"
+ if stepCalls >= 3 {
+ state = "completed"
+ }
+ w.Header().Set("Content-Type", "application/json")
+ _, _ = w.Write([]byte(`{"id":"step-123","phase_id":"phase-1","state":"` + state + `","updated_at":"2026-04-06T00:00:00Z"}`))
+ case "/api/v1/steps/step-123/result":
+ resultCalls++
+ w.Header().Set("Content-Type", "application/json")
+ _, _ = w.Write([]byte(`{"run_id":"run-123","phase_id":"phase-1","step_id":"step-123","state":"completed","summary":"done"}`))
+ default:
+ http.NotFound(w, r)
+ }
+ }))
+ defer server.Close()
+
+ result := runBinary(t, server.URL, "step", "wait", "step-123", "--json", "--interval", "1ms")
+ if result.exitCode != exitCodeSuccess {
+ t.Fatalf("exit code = %d stderr=%s", result.exitCode, result.stderr)
+ }
+ if stepCalls < 3 {
+ t.Fatalf("expected step lifecycle polling before completion, got %d calls", stepCalls)
+ }
+ if resultCalls != 1 {
+ t.Fatalf("expected one terminal result fetch, got %d", resultCalls)
+ }
+ assertSingleJSONDocument(t, result.stdout)
+}
+
+func TestStepRetryJSONWaitsForRetriedStepToFinish(t *testing.T) {
+ var stepCalls int
+ var retryCalls int
+
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ switch {
+ case r.Method == http.MethodPost && r.URL.Path == "/api/v1/steps/step-123/retry":
+ retryCalls++
+ w.WriteHeader(http.StatusAccepted)
+ case r.Method == http.MethodGet && r.URL.Path == "/api/v1/steps/step-123":
+ stepCalls++
+ state := "dispatching"
+ if stepCalls >= 3 {
+ state = "completed"
+ }
+ w.Header().Set("Content-Type", "application/json")
+ _, _ = w.Write([]byte(`{"id":"step-123","phase_id":"phase-1","state":"` + state + `","updated_at":"2026-04-06T00:00:00Z"}`))
+ case r.Method == http.MethodGet && r.URL.Path == "/api/v1/steps/step-123/result":
+ w.Header().Set("Content-Type", "application/json")
+ _, _ = w.Write([]byte(`{"run_id":"run-123","phase_id":"phase-1","step_id":"step-123","state":"completed","summary":"retried"}`))
+ default:
+ http.NotFound(w, r)
+ }
+ }))
+ defer server.Close()
+
+ result := runBinary(t, server.URL, "step", "retry", "step-123", "--wait", "--json")
+ if result.exitCode != exitCodeSuccess {
+ t.Fatalf("exit code = %d stderr=%s stdout=%s", result.exitCode, result.stderr, result.stdout)
+ }
+ if retryCalls != 1 {
+ t.Fatalf("expected one retry request, got %d", retryCalls)
+ }
+ if stepCalls < 3 {
+ t.Fatalf("expected retried step to be polled to completion, got %d state calls", stepCalls)
+ }
+ assertSingleJSONDocument(t, result.stdout)
+ if !strings.Contains(result.stdout, "\"summary\": \"retried\"") {
+ t.Fatalf("expected terminal retry payload, got %s", result.stdout)
+ }
+}
+
func TestRunWaitJSONExitCodes(t *testing.T) {
tests := []struct {
name string
diff --git a/deploy/cloud/.env.example b/deploy/cloud/.env.example
new file mode 100644
index 0000000..07ea680
--- /dev/null
+++ b/deploy/cloud/.env.example
@@ -0,0 +1,20 @@
+# Published cloud HTTP port on the host.
+CODENCER_CLOUD_PORT=8190
+
+# Image build metadata. Override this when validating a different tagged build.
+CODENCER_VERSION=v0.2.0-beta
+
+# Cloud store configuration.
+CODENCER_CLOUD_DB_PATH=/var/lib/codencer/cloud/cloud.db
+CODENCER_CLOUD_MASTER_KEY=replace-with-a-long-random-master-key
+
+# Composed relay runtime bridge configuration.
+RELAY_DB_PATH=/var/lib/codencer/relay/relay.db
+RELAY_PLANNER_TOKEN=replace-with-a-long-random-planner-token
+RELAY_ENROLLMENT_SECRET=replace-with-a-long-random-enrollment-secret
+RELAY_PROXY_TIMEOUT_SECONDS=300
+RELAY_ALLOWED_ORIGINS=
+
+# Worker defaults.
+CLOUD_WORKER_INTERVAL=2m
+CLOUD_WORKER_LIMIT=50
diff --git a/deploy/cloud/Dockerfile b/deploy/cloud/Dockerfile
new file mode 100644
index 0000000..de94546
--- /dev/null
+++ b/deploy/cloud/Dockerfile
@@ -0,0 +1,36 @@
+FROM golang:1.25-bookworm AS builder
+
+WORKDIR /src
+
+ARG VERSION=v0.2.0-beta
+
+COPY go.mod go.sum ./
+RUN go mod download
+
+COPY . .
+
+RUN mkdir -p /out && \
+ go build -ldflags "-X agent-bridge/internal/app.Version=${VERSION}" -o /out/codencer-cloudd ./cmd/codencer-cloudd && \
+ go build -ldflags "-X agent-bridge/internal/app.Version=${VERSION}" -o /out/codencer-cloudctl ./cmd/codencer-cloudctl && \
+ go build -ldflags "-X agent-bridge/internal/app.Version=${VERSION}" -o /out/codencer-cloudworkerd ./cmd/codencer-cloudworkerd && \
+ go build -ldflags "-X agent-bridge/internal/app.Version=${VERSION}" -o /out/codencer-relayd ./cmd/codencer-relayd
+
+FROM debian:bookworm-slim
+
+RUN apt-get update && \
+ apt-get install -y --no-install-recommends ca-certificates curl && \
+ rm -rf /var/lib/apt/lists/*
+
+WORKDIR /app
+
+COPY --from=builder /out/codencer-cloudd /usr/local/bin/codencer-cloudd
+COPY --from=builder /out/codencer-cloudctl /usr/local/bin/codencer-cloudctl
+COPY --from=builder /out/codencer-cloudworkerd /usr/local/bin/codencer-cloudworkerd
+COPY --from=builder /out/codencer-relayd /usr/local/bin/codencer-relayd
+
+RUN mkdir -p /var/lib/codencer/cloud /var/lib/codencer/relay /etc/codencer/cloud /etc/codencer/relay
+
+EXPOSE 8190
+
+ENTRYPOINT ["/usr/local/bin/codencer-cloudd"]
+CMD ["--config", "/etc/codencer/cloud/config.json"]
diff --git a/deploy/cloud/config/cloud.json b/deploy/cloud/config/cloud.json
new file mode 100644
index 0000000..9787cff
--- /dev/null
+++ b/deploy/cloud/config/cloud.json
@@ -0,0 +1,7 @@
+{
+ "host": "0.0.0.0",
+ "port": 8190,
+ "db_path": "/var/lib/codencer/cloud/cloud.db",
+ "master_key": "",
+ "relay_config_path": "/etc/codencer/relay/config.json"
+}
diff --git a/deploy/cloud/config/relay.json b/deploy/cloud/config/relay.json
new file mode 100644
index 0000000..38eb1eb
--- /dev/null
+++ b/deploy/cloud/config/relay.json
@@ -0,0 +1,9 @@
+{
+ "host": "127.0.0.1",
+ "port": 8090,
+ "db_path": "/var/lib/codencer/relay/relay.db",
+ "planner_token": "",
+ "enrollment_secret": "",
+ "proxy_timeout_seconds": 300,
+ "allowed_origins": []
+}
diff --git a/deploy/cloud/docker-compose.yml b/deploy/cloud/docker-compose.yml
new file mode 100644
index 0000000..ff6facb
--- /dev/null
+++ b/deploy/cloud/docker-compose.yml
@@ -0,0 +1,66 @@
+services:
+ cloud:
+ build:
+ context: ../..
+ dockerfile: deploy/cloud/Dockerfile
+ args:
+ VERSION: ${CODENCER_VERSION:-v0.2.0-beta}
+ environment:
+ CODENCER_CLOUD_HOST: ${CODENCER_CLOUD_HOST:-0.0.0.0}
+ CODENCER_CLOUD_PORT: ${CODENCER_CLOUD_INTERNAL_PORT:-8190}
+ CODENCER_CLOUD_DB_PATH: ${CODENCER_CLOUD_DB_PATH:-/var/lib/codencer/cloud/cloud.db}
+ CODENCER_CLOUD_MASTER_KEY: ${CODENCER_CLOUD_MASTER_KEY:?set CODENCER_CLOUD_MASTER_KEY}
+ CODENCER_CLOUD_RELAY_CONFIG: /etc/codencer/relay/config.json
+ RELAY_DB_PATH: ${RELAY_DB_PATH:-/var/lib/codencer/relay/relay.db}
+ RELAY_PLANNER_TOKEN: ${RELAY_PLANNER_TOKEN:?set RELAY_PLANNER_TOKEN}
+ RELAY_ENROLLMENT_SECRET: ${RELAY_ENROLLMENT_SECRET:?set RELAY_ENROLLMENT_SECRET}
+ RELAY_PROXY_TIMEOUT_SECONDS: ${RELAY_PROXY_TIMEOUT_SECONDS:-300}
+ RELAY_ALLOWED_ORIGINS: ${RELAY_ALLOWED_ORIGINS:-}
+ command:
+ - --config
+ - /etc/codencer/cloud/config.json
+ - --relay-config
+ - /etc/codencer/relay/config.json
+ ports:
+ - "${CODENCER_CLOUD_PORT:-8190}:8190"
+ volumes:
+ - cloud-data:/var/lib/codencer/cloud
+ - relay-data:/var/lib/codencer/relay
+ - ./config/cloud.json:/etc/codencer/cloud/config.json:ro
+ - ./config/relay.json:/etc/codencer/relay/config.json:ro
+ restart: unless-stopped
+ healthcheck:
+ test: ["CMD", "curl", "-fsS", "http://127.0.0.1:8190/healthz"]
+ interval: 10s
+ timeout: 5s
+ retries: 12
+ start_period: 10s
+
+ worker:
+ build:
+ context: ../..
+ dockerfile: deploy/cloud/Dockerfile
+ args:
+ VERSION: ${CODENCER_VERSION:-v0.2.0-beta}
+ entrypoint: ["/usr/local/bin/codencer-cloudworkerd"]
+ environment:
+ CODENCER_CLOUD_DB_PATH: ${CODENCER_CLOUD_DB_PATH:-/var/lib/codencer/cloud/cloud.db}
+ CODENCER_CLOUD_MASTER_KEY: ${CODENCER_CLOUD_MASTER_KEY:?set CODENCER_CLOUD_MASTER_KEY}
+ command:
+ - --config
+ - /etc/codencer/cloud/config.json
+ - --interval
+ - ${CLOUD_WORKER_INTERVAL:-2m}
+ - --limit
+ - ${CLOUD_WORKER_LIMIT:-50}
+ depends_on:
+ cloud:
+ condition: service_healthy
+ volumes:
+ - cloud-data:/var/lib/codencer/cloud
+ - ./config/cloud.json:/etc/codencer/cloud/config.json:ro
+ restart: unless-stopped
+
+volumes:
+ cloud-data:
+ relay-data:
diff --git a/deploy/cloud/smoke.sh b/deploy/cloud/smoke.sh
new file mode 100755
index 0000000..cbc92d4
--- /dev/null
+++ b/deploy/cloud/smoke.sh
@@ -0,0 +1,171 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
+STACK_DIR="$ROOT_DIR/deploy/cloud"
+COMPOSE_FILE="$STACK_DIR/docker-compose.yml"
+TMP_DIR="$(mktemp -d "${TMPDIR:-/tmp}/codencer-cloud-stack-smoke.XXXXXX")"
+ENV_FILE="$TMP_DIR/cloud-stack.env"
+BOOTSTRAP_JSON="$TMP_DIR/bootstrap.json"
+STATUS_JSON="$TMP_DIR/status.json"
+INSTALL_JSON="$TMP_DIR/install.json"
+AUDIT_JSON="$TMP_DIR/audit.json"
+
+CLOUD_PORT="${CODENCER_CLOUD_PORT:-18190}"
+COMPOSE_PROJECT_NAME="${COMPOSE_PROJECT_NAME:-codencer-cloud-smoke}"
+KEEP_CLOUD_STACK_SMOKE_STATE="${KEEP_CLOUD_STACK_SMOKE_STATE:-0}"
+CODENCER_VERSION="${CODENCER_VERSION:-v0.2.0-beta}"
+
+have_cmd() {
+ command -v "$1" >/dev/null 2>&1
+}
+
+json_get() {
+ local file="$1"
+ local expr="$2"
+ if have_cmd jq; then
+ jq -r "$expr" "$file"
+ return
+ fi
+ python3 - "$file" "$expr" <<'PY'
+import json
+import sys
+
+path = sys.argv[1]
+expr = sys.argv[2]
+with open(path, "r", encoding="utf-8") as handle:
+ payload = json.load(handle)
+
+value = payload
+for part in expr.strip(".").split("."):
+ if not part:
+ continue
+ if isinstance(value, dict):
+ value = value.get(part, "")
+ else:
+ value = ""
+ break
+if value is None:
+ value = ""
+print(value)
+PY
+}
+
+compose() {
+ docker compose \
+ --project-name "$COMPOSE_PROJECT_NAME" \
+ --env-file "$ENV_FILE" \
+ -f "$COMPOSE_FILE" \
+ "$@"
+}
+
+cleanup() {
+ compose down -v >/dev/null 2>&1 || true
+ if [[ "$KEEP_CLOUD_STACK_SMOKE_STATE" != "1" ]]; then
+ rm -rf "$TMP_DIR"
+ else
+ echo "cloud stack smoke temp dir kept at: $TMP_DIR" >&2
+ fi
+}
+
+trap cleanup EXIT
+
+if ! have_cmd docker; then
+ echo "ERROR: docker is required for deploy/cloud/smoke.sh" >&2
+ exit 1
+fi
+
+if have_cmd openssl; then
+ CLOUD_MASTER_KEY="$(openssl rand -hex 32)"
+ RELAY_PLANNER_TOKEN="planner_$(openssl rand -hex 24)"
+ RELAY_ENROLLMENT_SECRET="enroll_$(openssl rand -hex 24)"
+else
+ CLOUD_MASTER_KEY="$(LC_ALL=C tr -dc 'a-f0-9' "$ENV_FILE" </dev/null
+
+compose run --rm --entrypoint codencer-cloudctl cloud \
+ bootstrap \
+ --config /etc/codencer/cloud/config.json \
+ --org-slug smoke-org \
+ --org-name "Smoke Org" \
+ --workspace-slug smoke-workspace \
+ --workspace-name "Smoke Workspace" \
+ --project-slug smoke-project \
+ --project-name "Smoke Project" \
+ --token-name smoke-operator \
+ --json > "$BOOTSTRAP_JSON"
+
+BOOTSTRAP_TOKEN="$(json_get "$BOOTSTRAP_JSON" '.token')"
+ORG_ID="$(json_get "$BOOTSTRAP_JSON" '.org.id')"
+WORKSPACE_ID="$(json_get "$BOOTSTRAP_JSON" '.workspace.id')"
+PROJECT_ID="$(json_get "$BOOTSTRAP_JSON" '.project.id')"
+
+if [[ -z "$BOOTSTRAP_TOKEN" || -z "$ORG_ID" || -z "$WORKSPACE_ID" || -z "$PROJECT_ID" ]]; then
+ echo "ERROR: bootstrap output missing required identifiers" >&2
+ cat "$BOOTSTRAP_JSON" >&2
+ exit 1
+fi
+
+compose up -d cloud worker >/dev/null
+
+for _ in $(seq 1 30); do
+ if curl -fsS "http://127.0.0.1:$CLOUD_PORT/healthz" >/dev/null 2>&1; then
+ break
+ fi
+ sleep 1
+done
+
+curl -fsS -H "Authorization: Bearer $BOOTSTRAP_TOKEN" \
+ "http://127.0.0.1:$CLOUD_PORT/api/cloud/v1/status" > "$STATUS_JSON"
+
+compose run --rm --entrypoint codencer-cloudctl cloud \
+ install create \
+ --cloud-url http://cloud:8190 \
+ --token "$BOOTSTRAP_TOKEN" \
+ --org-id "$ORG_ID" \
+ --workspace-id "$WORKSPACE_ID" \
+ --project-id "$PROJECT_ID" \
+ --connector slack \
+ --name "Slack stack smoke" \
+ --config api_base_url=https://slack.com \
+ --secret token=xoxb-stack-smoke \
+ --secret webhook_secret=stack-secret \
+ --json > "$INSTALL_JSON"
+
+compose run --rm --entrypoint codencer-cloudctl cloud \
+ audit \
+ --cloud-url http://cloud:8190 \
+ --token "$BOOTSTRAP_TOKEN" \
+ --json > "$AUDIT_JSON"
+
+if ! grep -q '"status":"ok"' "$STATUS_JSON"; then
+ echo "ERROR: cloud stack status did not report ok" >&2
+ cat "$STATUS_JSON" >&2
+ exit 1
+fi
+
+if ! grep -q '"connector_key":"slack"' "$INSTALL_JSON"; then
+ echo "ERROR: cloud stack installation create did not return slack installation" >&2
+ cat "$INSTALL_JSON" >&2
+ exit 1
+fi
+
+if ! grep -q '"action":"create_installation"' "$AUDIT_JSON"; then
+ echo "ERROR: cloud stack audit did not record installation creation" >&2
+ cat "$AUDIT_JSON" >&2
+ exit 1
+fi
+
+echo "cloud stack smoke passed on http://127.0.0.1:$CLOUD_PORT"
diff --git a/docs/02_architecture.md b/docs/02_architecture.md
index 4de92a5..b6f6563 100644
--- a/docs/02_architecture.md
+++ b/docs/02_architecture.md
@@ -1,192 +1,122 @@
-> [!NOTE]
-> This is a **design specification** and may not fully reflect the current implementation.
-> For the latest implementation status, see the [Gap Audit](internal/GAP_AUDIT.md).
-
# Architecture
-## High-level architecture
+This document describes the current Codencer v2 runtime architecture.
+
+## High-Level Model
```text
-Planner Chat
+Planner / Chat
|
- | MCP tools / local CLI
+ | Relay HTTP API or Relay MCP
v
-Local MCP Server
+Relay
|
+ | Authenticated outbound websocket
v
-Orchestrator Daemon
+Connector
|
- +--> Policy Engine
- +--> SQLite Run Ledger
- +--> Artifact Store (filesystem)
- +--> Validation Runner
- +--> Workspace / Git Manager
- +--> Adapter Manager
- |
- +--> Codex Adapter
- +--> Claude Adapter
- +--> Qwen Adapter
- +--> IDE Companion Adapter
- +--> IDE Chat Adapter (later)
+ | Narrow allowlisted local API proxy
+ v
+Local Codencer Daemon
+ |
+ +--> SQLite state and settings
+ +--> Artifact store
+ +--> Workspace / git manager
+ +--> Validation runner
+ +--> Adapter dispatch
+ +--> Gate and recovery services
```
-## Core components
+Execution stays local. Planning stays outside Codencer.
+
+## Core Runtime Roles
-### Orchestrator daemon
-System of record.
+### Local daemon
+
+The daemon is the local system of record.
Responsibilities:
-- manage run / phase / step lifecycle
-- dispatch steps
-- supervise processes
+- manage run, step, attempt, and gate lifecycle
- persist state
-- enforce policy
-- collect artifacts
-- expose state
-
-Recommended implementation:
-- Go
-- local HTTP or Unix socket
-- SQLite
-- filesystem artifacts
-
-### MCP server
-Thin bridge exposing safe orchestrator operations:
-- start run
-- start step
-- get status
-- get result
-- approve gate
-- reject gate
-- retry step
-- abort run
-
-### Adapter manager
-Provider-neutral execution contract.
-
-Each adapter should support:
-- Start
-- Poll
-- Cancel
-- CollectArtifacts
-- NormalizeResult
-- Capabilities
-
-### Policy engine
-Decides:
-- continue
-- retry
-- stop for approval
-- fail terminally
-
-Inputs:
-- validations
-- file changes
-- forbidden path touches
-- migrations
-- dependency changes
-- adapter-reported uncertainty
-- timeouts
-
-### Run ledger
-Persist:
+- dispatch adapters
+- collect artifacts and validations
+- expose local `/api/v1` and local compatibility/admin `/mcp/call`
+
+The daemon is not the public internet-facing MCP server.
+
+### Connector
+
+The connector is the outbound bridge between relay and local daemon.
+
+Responsibilities:
+- persist connector identity
+- enroll with relay
+- authenticate with signed challenge/response
+- advertise explicitly shared instances
+- proxy only a narrow local API surface
+
+The connector does not plan and does not execute work directly.
+
+### Relay
+
+The relay is the remote control plane.
+
+Responsibilities:
+- authenticate planners
+- authenticate connectors
+- track online connectors and advertised instances
+- route planner requests to the correct shared local instance
+- persist audit events
+- expose relay HTTP API and relay MCP
+
+The relay is not a planner and not an executor.
+
+## Public Surfaces
+
+### Remote/public
+
+- relay HTTP API under `/api/v2`
+- relay MCP under `/mcp`
+- relay MCP compatibility path `/mcp/call`
+- connector websocket under `/ws/connectors`
+
+### Local/private by default
+
+- daemon HTTP API under `/api/v1`
+- daemon-local `/mcp/call` compatibility/admin bridge
+
+## State And Evidence
+
+The current authoritative state lives in the daemon:
- runs
-- phases
- steps
- attempts
+- gates
- artifacts
- validations
-- gates
+
+The relay stores only the remote control-plane state it needs:
+- connector identity
+- instance advertisement records
+- resource routing hints
- audit events
+- enrollment/challenge state
-### Artifact store
-Deterministic structure like:
+## Trust Boundaries
-```text
-.artifacts/
- runs/
- run-0001/
- manifest.json
- phase-execution/
- step-01/
- attempt-01/
- input.json
- stdout.log
- stderr.log
- result.json
- diff.patch
- changed-files.json
- validations.json
-```
+- planner decides what to do
+- relay authenticates and routes
+- connector limits remote reach to a narrow allowlist
+- daemon executes and records truth locally
+- adapters do local work only
-### Validation runner
-Runs:
-- lint
-- tests
-- build
-- typecheck
-- formatting
-- custom commands from policy/task spec
+There is no raw shell or arbitrary filesystem browsing surface in the relay or connector.
-### Workspace / Git manager
-Responsibilities:
-- detect dirty repo
-- allocate isolated worktree when configured
-- capture diffs
-- cleanup safely
-- prevent overlapping writes
-
-## State machine
-
-### Run states
-- created
-- running
-- paused_for_gate
-- completed
-- failed
-- cancelled
-
-### Step states
-- pending
-- dispatching
-- running
-- collecting_artifacts
-- validating
-- completed
-- completed_with_warnings
-- needs_approval
-- failed_retryable
-- failed_terminal
-- cancelled
-
-## Design rules
-
-### Planner is not source of truth
-Planner suggests.
-Orchestrator owns actual state.
-
-### Adapter is not control plane
-Adapter executes.
-Orchestrator decides lifecycle.
-
-### IDE extension is not orchestrator
-Extension is only a control/visibility surface.
-
-## Why CLI-first
-
-CLI agents are easier for:
-- process supervision
-- stdout/stderr capture
-- timeouts
-- retries
-- cancellation
-- deterministic wrapping
-
-## Why IDE chat automation is later
-
-IDE AI chats are often implemented inside extension-owned webviews or custom panels.
-That makes generic automation brittle.
-So:
-- CLI path is primary
-- IDE companion comes later
-- IDE chat adapters are targeted and optional
+## WSL / Windows Model
+
+The practical default is:
+- daemon, connector, repo, worktrees, and artifacts in WSL/Linux
+- IDE and Antigravity broker on Windows when needed
+- relay wherever the operator hosts the remote control plane
+
+See [WSL / Windows / Antigravity Topology](WSL_WINDOWS_ANTIGRAVITY.md) for detailed placement guidance.
diff --git a/docs/05_dsl_and_mcp.md b/docs/05_dsl_and_mcp.md
index b55a1eb..d478216 100644
--- a/docs/05_dsl_and_mcp.md
+++ b/docs/05_dsl_and_mcp.md
@@ -1,169 +1,72 @@
-> [!NOTE]
-> This is a **design specification** and may not fully reflect the current implementation.
-> For the latest implementation status, see the [Gap Audit](internal/GAP_AUDIT.md).
-
# DSL and MCP
-## Why a DSL
-
-Without a DSL, the system collapses into ad hoc prompt passing.
-
-The DSL should make each step:
-- declarative
-- validated
-- policy-aware
-- provider-neutral
-
-## TaskSpec (Execution Request)
-
-The `TaskSpec` is the canonical contract sent by a **Planner** to the **Bridge**. It defines exactly WHAT needs to be done and the BOUNDARIES of that execution. The Bridge acts as a deterministic relay and executor, ensuring policies are enforced without making planning decisions.
-
-### TaskSpec example
-
-```yaml
-version: v1
-project_id: local-agent-bridge
-run_id: run-0001
-# phase_id: phase-execution-run-123 # Optional, auto-generated if omitted
-# step_id: step-01 # Optional, auto-generated if omitted
-title: Implement Codex adapter invocation and artifact capture
-goal: Build the first working Codex adapter that can execute a step, capture logs, and return a normalized result.
-context:
- summary: >
- This is the first provider adapter. Keep it minimal but production-oriented.
-constraints:
- - Do not introduce cloud functionality.
- - Do not bypass service boundaries.
-allowed_paths:
- - internal/adapters/codex/**
- - internal/service/**
- - internal/domain/**
-forbidden_paths:
- - internal/adapters/claude/**
- - internal/adapters/qwen/**
-validations:
- - name: unit-tests
- command: go test ./...
- - name: lint
- command: golangci-lint run
-acceptance:
- - Codex adapter implements common adapter interface.
- - Logs are captured to artifact storage.
- - Result is normalized into ResultSpec.
-stop_conditions:
- - Adapter interface must be redesigned.
- - State machine must be rewritten.
-policy_bundle: safe_refactor
-adapter_profile: codex
-timeout_seconds: 300
-is_simulation: false
-```
-
-## ResultSpec example
-
-```json
-{
- "version": "v1",
- "run_id": "run-0001",
- "phase_id": "phase-04-codex-adapter",
- "step_id": "step-01",
- "attempt_id": "attempt-01",
- "adapter": "codex",
- "state": "completed_with_warnings",
- "is_simulation": false,
- "summary": "Implemented Codex adapter invocation and result normalization.",
- "files_changed": [
- "internal/adapters/codex/adapter.go",
- "internal/adapters/codex/invoke.go"
- ],
- "validations": [
- {"name": "unit-tests", "state": "passed"},
- {"name": "lint", "state": "failed"}
- ],
- "needs_human_decision": false,
- "warnings": ["Lint failed due to an unused import."],
- "questions": [],
- "artifacts": {
- "stdout_log": ".artifacts/.../stdout.log",
- "stderr_log": ".artifacts/.../stderr.log",
- "diff_patch": ".artifacts/.../diff.patch"
- }
-}
-```
-
-## PolicySpec example
-
-```yaml
-version: v1
-name: safe_refactor
-continue_when:
- all_validations_pass: true
- max_changed_files: 12
- no_forbidden_paths_touched: true
- no_migrations_detected: true
-gate_when:
- any_validation_fails: true
- dependency_files_changed: true
- migrations_detected: true
- changed_files_over: 12
- unresolved_questions_present: true
-retry_when:
- adapter_process_failed: true
- timeout_once: true
-fail_when:
- timeout_count_over: 2
- artifact_persistence_failed: true
-```
-
-### Execution States
-The `state` property in the result payload follows strict relay semantics:
-
-| State | Who Decides? | Meaning |
-| :--- | :--- | :--- |
-| `pending` | Planner | Waiting for dispatch. |
-| `running` | Bridge | Active execution. |
-| `completed` | Bridge/Policy | Success criteria met. |
-| `completed_with_warnings` | Bridge/Policy | Success with minor issues. |
-| `failed_retryable` | Bridge | Transient failure, retry possible. |
-| `failed_terminal` | Bridge | Non-retryable failure. |
-| `timeout` | Bridge | Limit exceeded. |
-| `needs_approval` | Bridge/Policy | Policy gate hit. |
-| `needs_manual_attention`| Bridge | Intervention reported. |
-| `cancelled` | Planner | Aborted by user. |
-
-## Simulation Semantics
-
-Simulation mode provides a high-fidelity environment for testing the bridge's state machine without real adapter execution.
-
-> [!IMPORTANT]
-> Simulation is intended for development and automated testing only. It does not produce valid metrics for adapter benchmarking.
-
-It allows planners to:
-1. Verify the end-to-end orchestration state machine.
-2. Test policy enforcement without executing heavy local binaries.
-3. Validate UI and notification flows.
-
-**IMPORTANT**: Simulation results are produced by stub adapters and do NOT represent real agency. Telemetry from simulated runs is kept separate in the benchmark ledger to ensure historical performance data remains honest.
-
-## MCP tool surface
-
-Expose only safe orchestrator primitives:
-
-- `orchestrator.start_run`
-- `orchestrator.start_step`
-- `orchestrator.get_status`
-- `orchestrator.get_result`
-- `orchestrator.list_artifacts`
-- `orchestrator.approve_gate`
-- `orchestrator.reject_gate`
-- `orchestrator.retry_step`
-- `orchestrator.abort_run`
-- `orchestrator.run_validations`
-
-## MCP rules
+This document describes the current execution contract and MCP surfaces in Codencer v2.
+
+## TaskSpec
+
+`TaskSpec` is the canonical execution request contract.
+
+It defines:
+- goal
+- stable identifiers
+- adapter preference
+- allowed and forbidden paths
+- validations
+- acceptance criteria
+- timeout and simulation intent
+
+Current source of truth:
+- `internal/domain/task.go`
+- `schemas/task.schema.json`
+
+### Required minimum
+
+A valid task must at least include:
+- `version`
+- `goal`
+
+The daemon or local CLI may fill omitted `run_id`, `phase_id`, and `step_id` where the route already establishes that context.
+
+## ResultSpec
+
+`ResultSpec` is the normalized execution result contract returned by the daemon and relay-backed flows.
+
+Required truth fields include:
+- `version`
+- `run_id`
+- `step_id`
+- `state`
+- `summary`
+Current source of truth:
+- daemon result serialization
+- `schemas/result.schema.json`
+
+## Local MCP Surface
+
+The daemon still exposes a local `/mcp/call` compatibility/admin surface.
+
+That surface is useful for local orchestration/admin tooling, but it is not the public remote MCP surface.
+
+## Remote MCP Surface
+
+The relay exposes the public remote MCP surface:
+- `/mcp`
+- `/mcp/call` compatibility path
+
+Supported MCP methods:
+- `initialize`
+- `tools/list`
+- `tools/call`
+
+Supported relay tools are the `codencer.*` tools documented in [mcp/relay_tools.md](mcp/relay_tools.md).
+
+## MCP Safety Rules
+
+Both local and remote MCP surfaces keep the bridge narrow:
- no raw shell tool exposure
-- no raw DB mutation
- no unrestricted filesystem browsing
-- input validation on every call
-- stable machine-readable errors only
+- no bypass of daemon/relay auth rules
+- machine-readable errors only
+
+The relay MCP surface also preserves relay auth scopes and connector sharing boundaries.
diff --git a/docs/06_adapters_and_ide.md b/docs/06_adapters_and_ide.md
index db90d3a..78dadc7 100644
--- a/docs/06_adapters_and_ide.md
+++ b/docs/06_adapters_and_ide.md
@@ -28,7 +28,7 @@ Why:
- good contrast for adapter-neutral design
Status:
-- **Supported (Beta)**: the v1 CLI wrapper contract is implemented and covered by fake-binary integration tests, prompt/normalization unit tests, lifecycle tests, and simulation conformance tests.
+- **Implemented wrapper path (alpha-grade)**: the CLI wrapper contract is implemented and covered by fake-binary integration tests, prompt/normalization unit tests, lifecycle tests, and simulation conformance tests.
- **Not covered in repo tests**: live authenticated Claude service calls.
Current v1 contract:
diff --git a/docs/07_security_ops.md b/docs/07_security_ops.md
index 1e0485a..0b21593 100644
--- a/docs/07_security_ops.md
+++ b/docs/07_security_ops.md
@@ -1,78 +1,62 @@
-> [!NOTE]
-> This is a **design specification** and may not fully reflect the current implementation.
-> For the latest implementation status, see the [Gap Audit](internal/GAP_AUDIT.md).
-
# Security and Operations
-## Security principles
-
-### Least privilege
-Planner side gets only safe MCP/orchestrator tools.
-
-### Explicit project root
-Every run is bound to a configured project root.
+This document describes the current operational security model for Codencer v2.
-### Deterministic artifacts
-All artifacts stored under controlled artifact root.
+## Security Principles
-### Policy before power
-Destructive/risky changes require gates.
+- **Bridge Not Brain**: Codencer executes, waits, records, and reports. It does not plan.
+- **Local Execution**: adapters and artifacts stay local to the daemon side.
+- **Explicit Sharing**: connector discovery does not imply exposure; config is the allowlist.
+- **Narrow Remote Surface**: relay HTTP API and relay MCP expose only instance-scoped orchestration operations.
+- **Evidence First**: results, validations, and artifacts are recorded as local truth.
-## Gate triggers for MVP
+## Remote Surfaces
-- migration file created/changed
-- dependency manifest or lockfile changed
-- forbidden path touched
-- too many files changed
-- deletes over threshold
-- validations fail
-- adapter reports unresolved ambiguity
+The only intended remote control surfaces are:
+- relay planner API
+- relay MCP
+- connector outbound websocket
-## Repo safety
+The daemon is not intended to be internet-facing.
-- detect dirty repo before run
-- optional worktree isolation
-- run locks
-- diff capture before cleanup
-- safe cleanup path
+## What Is Not Exposed
-## Process safety
+Codencer v2 does not expose:
+- raw shell execution through relay or MCP
+- arbitrary filesystem browsing through relay or MCP
+- generic network tunneling through the connector
+- implicit repo sharing
+- unauthenticated remote control
-- child process supervision
-- timeout handling
-- cancellation
-- retry limits
-- duplicate execution prevention
-- crash recovery
+## Local Safety
-## Artifact safety
+The daemon preserves local safety by:
+- anchoring execution to an explicit repo root
+- isolating attempts with worktree and provisioning logic
+- persisting run, step, gate, and artifact truth locally
+- keeping abort semantics honest when cancellation is not confirmed
-Artifacts may contain code and logs.
+## Remote Safety
-Recommendations:
-- local only by default
-- retention policy later
-- no automatic upload
-- clear directory ownership
+The relay and connector preserve remote safety by:
+- authenticating planners with bearer tokens
+- authenticating connectors with enrollment plus signed challenge/response
+- allowing only explicitly shared instances to be advertised
+- routing only through the connector allowlist
+- persisting audit events for remote control actions
-## Required test strategy
+## Current Honest Limitations
-### Unit tests
-- state transitions
-- policy evaluation
-- schema validation
-- adapter normalization
+- planner auth is static-token based
+- relay resource routing for `step`, `gate`, and `artifact` ids depends on authorized online shared instances being reachable; the relay now probes for missing route hints and persists successful matches, but it still fails closed on offline or ambiguous matches
+- large artifact transfer is intentionally bounded
+- abort remains best-effort unless the adapter actually stops
+- current self-host auth model is alpha-grade, not enterprise IAM
-### Integration tests
-- SQLite storage
-- artifact store
-- daemon lifecycle
-- CLI behavior
+## Operator Guidance
-### E2E
-- run start
-- step execution
-- validation
-- gate creation
-- approve/retry
-- completion
+- keep the daemon on loopback or another trusted local boundary
+- expose the relay instead of exposing the daemon
+- keep the connector on the same side as the daemon when possible
+- inspect results, validations, and artifacts via CLI or API, not raw path assumptions
+- treat Antigravity broker and relay as separate trust domains
diff --git a/docs/10_implementation_prompts.md b/docs/10_implementation_prompts.md
index 7784627..cac533b 100644
--- a/docs/10_implementation_prompts.md
+++ b/docs/10_implementation_prompts.md
@@ -1,5 +1,9 @@
# Implementation Prompts
+> [!WARNING]
+> **HISTORICAL PLANNING ARTIFACT**: This file captures old bootstrap prompts and phase prompts from an earlier repo shape.
+> Do **not** use it as current release truth. Use the frozen beta docs under [docs/internal/BETA_FINALIZATION_PLAN.md](internal/BETA_FINALIZATION_PLAN.md), [docs/internal/BETA_SUPPORT_CLASSIFICATION.md](internal/BETA_SUPPORT_CLASSIFICATION.md), and [docs/internal/BETA_VERIFICATION_MATRIX.md](internal/BETA_VERIFICATION_MATRIX.md) instead.
+
Use these prompts in Antigravity IDE with Gemini 3.1 Thinking / Flash.
## Master bootstrap prompt
diff --git a/docs/AI_OPERATOR_GUIDE.md b/docs/AI_OPERATOR_GUIDE.md
index a0100af..61e64d9 100644
--- a/docs/AI_OPERATOR_GUIDE.md
+++ b/docs/AI_OPERATOR_GUIDE.md
@@ -12,6 +12,7 @@ Codencer is a **Tactical Orchestration Bridge**, not a strategic planner. It han
1. **Bridge, Not Brain**: Do not expect the bridge to plan your next move or recursively fix its own failures. It executes precisely what you submit in a `TaskSpec`.
2. **Rule of Discovery (MANDATORY)**: Always verify the daemon's identity and repository anchor before taking any action. Use `instance --json`.
3. **Atomic Evidence**: Every task attempt is isolated in a Git Worktree. Success or failure is reported as a terminal state with immutable artifacts.
+4. **Remote Planner Surface**: When operating remotely, target the relay HTTP API or relay MCP surface. The daemon-local `/mcp/call` endpoint is not the public remote planner surface.
---
@@ -27,7 +28,7 @@ Always verify the daemon's identity to ensure you are targeting the correct repo
**Expected JSON Response:**
```json
{
- "version": "v0.1.0-beta",
+ "version": "v0.2.0-beta",
"repo_root": "/home/user/my-project",
"execution_mode": "REAL",
"port": 8085
@@ -63,6 +64,26 @@ echo '{"version":"v1","goal":"Update README","title":"Update docs"}' | \
./bin/orchestratorctl submit my-run-id --task-json - --wait --json
```
+## ๐ Remote Relay Path
+
+When operating across the self-host relay path:
+
+1. Discover the shared target instance through the relay, not by assuming it:
+ ```bash
+ ./bin/codencer-relayd instances --config .codencer/relay/config.json
+ ```
+2. Start or inspect runs through relay HTTP under `/api/v2`.
+3. Use relay MCP at `/mcp` only for the narrow `codencer.*` tool surface.
+4. Inspect result, validations, logs, artifacts, and gates through relay evidence routes before making the next planning decision.
+
+Remote planner checklist:
+- planner talks to relay
+- relay talks to authenticated connector
+- connector talks to local daemon
+- daemon remains the source of run, step, gate, and artifact truth
+
+For the separate cloud control-plane path, see [CLOUD.md](CLOUD.md) and [CLOUD_SELF_HOST.md](CLOUD_SELF_HOST.md). Those docs cover bootstrap/status/install/list flows, connector installation management, and the Jira polling worker. Do not confuse that surface with the local relay bridge or the daemon-local execution path.
+
---
## ๐ Phase 3: Auditing Terminal States
diff --git a/docs/BETA_TESTING.md b/docs/BETA_TESTING.md
new file mode 100644
index 0000000..4389228
--- /dev/null
+++ b/docs/BETA_TESTING.md
@@ -0,0 +1,167 @@
+# Public Beta Test Tracks
+
+Codencer is `v0.2.0-beta` overall. This guide freezes the **externally testable** public beta tracks and their exact proof boundaries.
+
+Use this page when you want to answer one practical question quickly:
+
+- local only
+- self-host relay/runtime
+- self-host cloud
+- planner/client integrations
+- provider connectors
+
+## Prerequisites
+
+Common requirements for the supported tracks:
+
+- Git
+- Go `1.25.0+`
+- `cc` or `gcc` for the CGO SQLite build
+- `curl`
+- `jq` or Python 3 for shell automation helpers
+
+Additional requirement for the Docker self-host baseline:
+
+- Docker CLI plus a running Docker daemon
+
+## Choose Your Test Track
+
+| Track | Start here | Build | Proof command | Current boundary |
+| --- | --- | --- | --- | --- |
+| Local-only daemon + CLI | [SETUP.md](SETUP.md) | `make build` | `./scripts/smoke_test_v1.sh` then `make smoke` | Canonical local proof is simulation-first; live adapter proof stays narrow. |
+| Self-host relay + runtime connector | [SELF_HOST_REFERENCE.md](SELF_HOST_REFERENCE.md) | `make build` | `PLANNER_TOKEN= make self-host-smoke-mcp` | Canonical remote self-host path; relay `/mcp` is the public MCP surface. |
+| Self-host cloud control plane | [CLOUD_SELF_HOST.md](CLOUD_SELF_HOST.md) | `make build-cloud` | `make cloud-smoke` | Binary-native proof covers bootstrap, tenancy, provider installs, and optional composed runtime/MCP/SDK proof. |
+| Planner / client integrations | [mcp/integrations.md](mcp/integrations.md) | `make build build-cloud build-mcp-sdk-smoke` | self-host or cloud smoke with MCP/SDK enabled | ChatGPT-style and Claude-style paths remain compatibility-only, not direct product proof. |
+| Provider connectors | [CLOUD_CONNECTORS.md](CLOUD_CONNECTORS.md) | `make build-cloud` | `make cloud-smoke` plus provider-focused tests | Slack is strongest today; Jira is polling-first; the rest stay narrow operator/package surfaces. |
+
+## Repo-Level Verification Commands
+
+For a supported non-Docker verification pass from a working checkout:
+
+```bash
+make build-supported
+make verify-beta
+```
+
+What `make verify-beta` covers:
+
+- main-module tests
+- local smoke
+- self-host relay/runtime smoke with MCP + official Go SDK proof
+- cloud binary smoke
+- Docker compose config validation
+
+For the Docker-backed cloud baseline on a Docker-capable host:
+
+```bash
+make verify-beta-docker
+```
+
+That adds:
+
+- `make cloud-stack-smoke`
+
+## Track Notes
+
+### Local-only
+
+- Start with [SETUP.md](SETUP.md).
+- The canonical local proof is:
+
+```bash
+./scripts/smoke_test_v1.sh
+./scripts/smoke_test_v1.sh
+make smoke
+```
+
+- `/api/v1/compatibility` is a runtime diagnostic surface, not a support certificate.
+- daemon-local `/mcp/call` is compatibility-only and not part of the public remote planner contract.
+
+### Self-host relay / runtime
+
+- Start with [SELF_HOST_REFERENCE.md](SELF_HOST_REFERENCE.md).
+- The canonical public relay surfaces are:
+ - HTTP: `/api/v2/...`
+ - MCP: `/mcp`
+- `/mcp/call` remains a compatibility alias.
+- The strongest scripted relay proof is:
+
+```bash
+PLANNER_TOKEN= make self-host-smoke-mcp
+```
+
+For broader relay proof:
+
+```bash
+PLANNER_TOKEN= make self-host-smoke-all
+```
+
+### Self-host cloud
+
+- Start with [CLOUD_SELF_HOST.md](CLOUD_SELF_HOST.md).
+- Use `make cloud-stack-smoke` for the **Docker baseline only**.
+- Use `make cloud-smoke` for the **binary-native cloud control-plane proof**.
+- Use composed-mode inputs with `make cloud-smoke` when you want runtime HTTP, cloud MCP, and official Go SDK proof:
+
+```bash
+make build-cloud build-mcp-sdk-smoke
+CLOUD_RELAY_CONFIG=.codencer/relay/config.json \
+CLOUD_RUNTIME_DAEMON_URL=http://127.0.0.1:8085 \
+CLOUD_SMOKE_MCP=1 \
+CLOUD_SMOKE_SDK=1 \
+make cloud-smoke
+```
+
+The Docker compose stack does not create a usable runtime instance by itself. For cloud-scoped runtime control, you still need an external `orchestratord` plus `codencer-connectord`.
+
+### Planner / client integrations
+
+- Start with [mcp/integrations.md](mcp/integrations.md).
+- Repo-proven remote client surfaces are:
+ - relay HTTP
+ - relay MCP
+ - cloud HTTP
+ - cloud MCP
+ - official Go SDK to relay/cloud MCP
+- Generic MCP clients beyond the checked-in proof helpers remain expected-only.
+- ChatGPT-style and Claude-style product integrations remain compatibility-only.
+
+### Provider connectors
+
+- Start with [CLOUD_CONNECTORS.md](CLOUD_CONNECTORS.md).
+- Current public truth:
+ - Slack is the strongest provider-shaped local test path.
+ - Jira is polling-first; webhook ingest remains deferred.
+ - GitHub, GitLab, and Linear are proven more narrowly through fixture and routed-package coverage.
+- `make cloud-smoke` proves the generic cloud/provider install-webhook-events-audit path.
+- Live vendor-account proof is not part of the current beta promise.
+
+## Clean-Checkout Notes
+
+When validating from a fresh checkout:
+
+1. Copy `.env.example` to `.env` only if you are testing the local daemon convenience flow.
+2. Build with `make build-supported`.
+3. Run `make verify-beta`.
+4. Run `make verify-beta-docker` only if Docker daemon access is available.
+
+If you only want one track, use the track-specific commands above instead of the full repo pass.
+
+## Known Public Boundaries
+
+- This guide reflects the repo-wide beta contract confirmed by the final verification pass.
+- `agent-broker`, the VS Code extension, daemon-local MCP, and secondary adapters remain outside the primary beta promise.
+- Provider connectors are beta-installable/testable within narrow claims, not marketplace-complete.
+- Product-specific ChatGPT, Claude Code, Claude Desktop, or vendor marketplace publication flows are not directly proven in this repo.
+
+## Filing Useful Test Reports
+
+Include all of the following when reporting a tester-facing issue:
+
+- the track you were testing
+- the exact command you ran
+- OS and shell
+- Go version
+- whether Docker daemon access was available
+- the failing log excerpt or artifact path
+- whether you were using simulation mode, relay self-host mode, or cloud composed mode
diff --git a/docs/CHECKLIST.md b/docs/CHECKLIST.md
index 781ff14..ea8204a 100644
--- a/docs/CHECKLIST.md
+++ b/docs/CHECKLIST.md
@@ -4,7 +4,7 @@ Follow these steps after a fresh `git clone` and `make setup` to verify that you
## 1. Build Verification
- [ ] Run `make build`.
-- [ ] Verify `bin/orchestratord` and `bin/orchestratorctl` exist.
+- [ ] Verify `bin/orchestratord`, `bin/orchestratorctl`, `bin/codencer-connectord`, and `bin/codencer-relayd` exist.
- [ ] Run `./bin/orchestratorctl doctor` and ensure Git, Go, and CC are **[OK]**.
## 2. Daemon & Explicit Targeting
@@ -33,10 +33,10 @@ Follow these steps after a fresh `git clone` and `make setup` to verify that you
- [ ] Verify the artifact directory exists in `.codencer/artifacts/`.
## 5. Antigravity Broker (Optional/Core)
-- [ ] If using WSL/Windows, start `agent-broker.exe` on Windows.
+- [ ] If using WSL/Windows, run `make build-broker` and start the resulting broker binary on Windows.
- [ ] Run `./bin/orchestratorctl antigravity list`.
- [ ] Verify that at least one IDE instance is discovered (or handle 'no instances' gracefully).
---
-**Status**: If steps 1-4 pass, your bridge is **Operational (v1.0-release-candidate)**.
+**Status**: If steps 1-4 pass, your local bridge baseline is **Operational (`v0.2.0-beta`)**.
diff --git a/docs/CLOUD.md b/docs/CLOUD.md
new file mode 100644
index 0000000..bb5520d
--- /dev/null
+++ b/docs/CLOUD.md
@@ -0,0 +1,184 @@
+# Codencer Cloud
+
+Codencer Cloud is the beta-track self-host cloud control plane for tenant-scoped provider integrations and tenant-scoped Codencer runtime control. It does not execute coding work, but it can now own the cloud-facing registry for claimed Codencer connectors and shared instances when started with an internal relay bridge.
+
+Use this page for the cloud scope and route contract.
+
+- Use [CLOUD_SELF_HOST.md](CLOUD_SELF_HOST.md) for bootstrap, Docker baseline, and smoke order.
+- Use [CLOUD_CONNECTORS.md](CLOUD_CONNECTORS.md) for per-provider install/test depth and limitations.
+- Use [mcp/integrations.md](mcp/integrations.md) for the cloud-vs-relay planner/client chooser.
+
+## What It Does
+
+- bootstraps org, workspace, project, and API token records
+- serves cloud status, org, workspace, project, token, installation, event, and audit routes
+- serves membership and role-scoped access routes for org/workspace/project operators
+- manages connector installation enable/disable state
+- records connector events, action logs, and audit events
+- claims Codencer runtime connectors into org/workspace/project scope
+- lists tenant-scoped claimed runtime connectors and shared runtime instances
+- proxies tenant-scoped runtime HTTP operations through an internal relay bridge when configured
+- exposes a tenant-scoped cloud MCP surface for runtime control when the relay bridge is configured
+
+## What It Does Not Do
+
+- it does not execute coding work
+- it does not replace the local daemon, relay, or connector bridge
+- it does not provide cloud billing, multi-tenant SaaS UI, or enterprise IAM
+- it does not add raw shell or arbitrary filesystem access
+- it does not replace the self-host relay contract for operators who are not using cloud tenancy
+- it does not automatically claim or auto-assign runtime connectors into tenant scope
+
+## Binaries
+
+- `codencer-cloudd`: cloud control-plane server
+- `codencer-cloudctl`: admin CLI for cloud bootstrap and control-plane operations
+- `codencer-cloudworkerd`: background worker for provider polling and maintenance
+
+## Runtime Composition
+
+The cloud daemon serves the cloud API under `/api/cloud/v1/*`.
+
+It can also start an internal relay runtime bridge when started with `--relay-config` or a config file that sets `relay_config_path`. In that mode:
+
+- cloud owns the public control-plane surface
+- cloud claims runtime connectors into org/workspace/project scope
+- cloud keeps a tenant-scoped runtime connector and runtime instance registry
+- cloud proxies tenant-scoped runtime HTTP operations through the in-process relay server
+- cloud exposes the tenant-scoped MCP surface at `/api/cloud/v1/mcp`
+- raw relay planner routes and relay MCP are implementation details in composed mode, not the cloud-facing contract
+
+## Access Model
+
+Cloud now includes a minimal first-class control-plane access layer:
+
+- `membership` records belong to an org and can optionally be bound to a workspace and project
+- role values are:
+ - `org_owner`
+ - `org_admin`
+ - `workspace_admin`
+ - `project_operator`
+ - `project_viewer`
+- API tokens can be linked to a membership and are scope-clamped by that membership role
+- connector installations and claimed runtime connectors now persist `owner_membership_id`
+- audit events now attribute membership-linked tokens as membership actors instead of anonymous service tokens
+
+This is still intentionally smaller than enterprise IAM. There is no SSO, no external identity provider, and no user-facing UI in this pass.
+
+## Public Cloud Routes
+
+- `GET /healthz`
+- `GET /api/cloud/v1/status`
+- `GET|POST /api/cloud/v1/orgs`
+- `GET|POST /api/cloud/v1/workspaces`
+- `GET|POST /api/cloud/v1/projects`
+- `GET|POST /api/cloud/v1/memberships`
+- `GET /api/cloud/v1/memberships/{id}`
+- `POST /api/cloud/v1/memberships/{id}/enable`
+- `POST /api/cloud/v1/memberships/{id}/disable`
+- `GET|POST /api/cloud/v1/tokens`
+- `POST /api/cloud/v1/tokens/{id}/revoke`
+- `GET|POST /api/cloud/v1/installations`
+- `GET /api/cloud/v1/installations/{id}`
+- `POST /api/cloud/v1/installations/{id}/validate`
+- `POST /api/cloud/v1/installations/{id}/enable`
+- `POST /api/cloud/v1/installations/{id}/disable`
+- `POST /api/cloud/v1/installations/{id}/actions`
+- `POST /api/cloud/v1/installations/{id}/webhook`
+- `GET|POST /api/cloud/v1/runtime/connectors`
+- `GET /api/cloud/v1/runtime/connectors/{id}`
+- `POST /api/cloud/v1/runtime/connectors/{id}/enable`
+- `POST /api/cloud/v1/runtime/connectors/{id}/disable`
+- `POST /api/cloud/v1/runtime/connectors/{id}/sync`
+- `GET /api/cloud/v1/runtime/connectors/{id}/instances`
+- `GET /api/cloud/v1/runtime/instances`
+- `GET /api/cloud/v1/runtime/instances/{id}`
+- `GET|POST /api/cloud/v1/runtime/instances/{id}/runs`
+- `GET /api/cloud/v1/runtime/instances/{id}/runs/{run_id}`
+- `POST /api/cloud/v1/runtime/instances/{id}/runs/{run_id}/steps`
+- `GET /api/cloud/v1/runtime/instances/{id}/runs/{run_id}/gates`
+- `POST /api/cloud/v1/runtime/instances/{id}/runs/{run_id}/abort`
+- `GET /api/cloud/v1/runtime/instances/{id}/steps/{step_id}`
+- `GET /api/cloud/v1/runtime/instances/{id}/steps/{step_id}/result`
+- `GET /api/cloud/v1/runtime/instances/{id}/steps/{step_id}/validations`
+- `GET /api/cloud/v1/runtime/instances/{id}/steps/{step_id}/logs`
+- `GET /api/cloud/v1/runtime/instances/{id}/steps/{step_id}/artifacts`
+- `GET /api/cloud/v1/runtime/instances/{id}/artifacts/{artifact_id}/content`
+- `POST /api/cloud/v1/runtime/instances/{id}/gates/{gate_id}/approve`
+- `POST /api/cloud/v1/runtime/instances/{id}/gates/{gate_id}/reject`
+- `GET|POST|DELETE /api/cloud/v1/mcp`
+- `POST /api/cloud/v1/mcp/call`
+- `GET /api/cloud/v1/events`
+- `GET /api/cloud/v1/audit`
+
+Planner/admin calls are bearer-token authenticated and scoped by org/workspace/project. Runtime operations stay explicitly instance-scoped on the cloud HTTP surface.
+
+When cloud is started in composed runtime mode it also accepts local Codencer connector ingress at:
+
+- `POST /api/v2/connectors/enroll`
+- `POST /api/v2/connectors/challenge`
+- `GET /ws/connectors`
+
+Those routes exist so the connector can dial the cloud host directly in composed mode. They are not the planner/admin API surface.
+
+## Cloud-Scoped MCP Surface
+
+The cloud-scoped canonical remote tool surface now exists at `/api/cloud/v1/mcp`.
+
+- It uses cloud bearer tokens, not relay planner tokens.
+- Transport auth only requires a valid cloud token; individual tool calls still enforce their own runtime scopes.
+- It enforces org/workspace/project visibility before any runtime tool can see an instance.
+- It intentionally exposes only the narrow `codencer.*` runtime tool set.
+- It is only useful when `codencer-cloudd` is started with a relay bridge.
+
+Boundary rule:
+
+- use `/api/cloud/v1/mcp` when the control plane is cloud tenancy
+- use relay `/mcp` when operating the self-host relay directly without cloud tenancy
+
+Both surfaces ultimately route through the same local runtime bridge doctrine, but only the cloud surface is tenant-scoped.
+
+For the frozen planner/client compatibility matrix, generic HTTP/MCP examples, and cloud-vs-relay packaging boundary, see [mcp/integrations.md](mcp/integrations.md) and [mcp/cloud_tools.md](mcp/cloud_tools.md).
+
+## Command Surface
+
+`codencer-cloudctl` mirrors the cloud API with a narrow CLI:
+
+- `bootstrap`
+- `status`
+- `orgs` / `orgs create`
+- `workspaces` / `workspaces create`
+- `projects` / `projects create`
+- `memberships` / `memberships list|create|get|enable|disable`
+- `tokens` / `tokens create|revoke`
+- `install` / `install create|get|validate|enable|disable|action`
+- `runtime-connectors` / `runtime-connectors claim|get|enable|disable|sync|instances`
+- `runtime-instances` / `runtime-instances list|get`
+- `events`
+- `audit`
+
+Use `bootstrap` to seed a new org/workspace/project/membership token set into the same SQLite store used by the cloud daemon. Because `bootstrap` writes the store directly, run it before starting the daemon or while the database is idle.
+
+The runtime CLI covers cloud-scoped claim/list/get flows for claimed runtime connectors and instances. Provider-action and runtime-execution flows still remain easier to script directly against the HTTP API.
+
+## Current Truth
+
+- Cloud runtime control is tenant-scoped over HTTP and cloud-scoped MCP in this pass.
+- Repo tests now cover token revocation denial, event/audit scope filtering, runtime HTTP scope enforcement, and cloud MCP session/scope parity.
+- Connector event history is append-only in the cloud store; repeated source-event IDs are preserved instead of overwriting older rows.
+- Provider action logs now persist request/response payloads plus start/end timestamps, and provider audit rows include richer action outcome details.
+- Raw relay routes are still available from `codencer-relayd` for self-host relay use, but they are not the cloud control-plane contract.
+- Cloud runtime control requires `codencer-cloudd` to be started with `relay_config_path` or `--relay-config`.
+- Cloud runtime connector ownership is explicit. A relay connector must still be claimed into org/workspace/project scope before the cloud API or cloud MCP can use it.
+- Connector enrollment-token issuance remains relay-config backed in this pass. Cloud hosts connector ingress in composed mode, but it does not yet add a cloud-native enrollment-token lifecycle.
+- Jira is polling-first in the current beta track.
+- Jira webhook ingest remains deferred and routed Jira webhook calls now return `501 webhook_deferred` instead of ingesting payloads.
+- `codencer-cloudworkerd` is the place where Jira polling runs.
+- `cloud_smoke.sh` now exercises the binary-native bootstrap, status, list, create, get, enable, disable, webhook ingest, events, and audit flows.
+- the provider-shaped binary smoke path is Slack-oriented today; GitHub, GitLab, Jira, and Linear remain narrower operator/package surfaces backed mainly by provider fixture tests plus the generic cloud install/action routes
+- In composed mode, `cloud_smoke.sh` can also prove claimed runtime visibility plus a real cloud runtime HTTP run/start + submit-task path. That composed proof can use either an existing shared connector id or a temporary connector enrolled from `CLOUD_RUNTIME_DAEMON_URL`.
+- In composed mode, `cloud_smoke.sh` can optionally prove cloud MCP initialize/list/call and official Go SDK interoperability.
+- `deploy/cloud/smoke.sh` exercises the Docker-based self-host stack baseline with bootstrap, status, installation create, and audit verification.
+- `make cloud-stack-smoke` is the Docker baseline proof only; the broader runtime/MCP/SDK proof lives in the binary-native `make cloud-smoke` path with composed-mode inputs.
+
+For operator steps and startup ordering, see [CLOUD_SELF_HOST.md](CLOUD_SELF_HOST.md). For provider capability details, see [CLOUD_CONNECTORS.md](CLOUD_CONNECTORS.md).
diff --git a/docs/CLOUD_CONNECTORS.md b/docs/CLOUD_CONNECTORS.md
new file mode 100644
index 0000000..74e6526
--- /dev/null
+++ b/docs/CLOUD_CONNECTORS.md
@@ -0,0 +1,300 @@
+# Codencer Cloud Connector Matrix
+
+This document freezes the current provider connector contract to repo-tested truth.
+
+Status labels in the matrix below mean:
+
+- `proven`: directly exercised by current repo tests or smoke.
+- `partial`: implemented and usable within a narrow scope, but proof or operator packaging is still thin.
+- `expected-only`: code/docs suggest it should work, but the repo does not directly prove it today.
+- `deferred`: intentionally outside the current provider beta promise.
+
+## Generic Install And Validate Contract
+
+All five providers use the same cloud installation surface:
+
+- `POST /api/cloud/v1/installations`
+- `GET /api/cloud/v1/installations/{id}`
+- `POST /api/cloud/v1/installations/{id}/validate`
+- `POST /api/cloud/v1/installations/{id}/enable`
+- `POST /api/cloud/v1/installations/{id}/disable`
+- `POST /api/cloud/v1/installations/{id}/actions`
+- `POST /api/cloud/v1/installations/{id}/webhook`
+- `GET /api/cloud/v1/events`
+- `GET /api/cloud/v1/audit`
+
+Create requests use this shape:
+
+```json
+{
+ "org_id": "org_...",
+ "workspace_id": "ws_...",
+ "project_id": "proj_...",
+ "connector_key": "slack",
+ "external_installation_id": "",
+ "external_account": "",
+ "name": "Slack smoke",
+ "config": {
+ "api_base_url": "https://slack.example",
+ "username": "jira@example.com",
+ "project_key": "PROJ"
+ },
+ "secrets": {
+ "token": "...",
+ "webhook_secret": "..."
+ }
+}
+```
+
+Generic runtime truth:
+
+- `config` is stored as installation config JSON.
+- `secrets` are stored separately and encrypted.
+- current provider code only consumes generic `config.api_base_url`, `config.username`, and the raw `config` map, plus `secrets.token` and `secrets.webhook_secret`
+- create does not live-check provider credentials; live validation happens later through `POST /validate`
+- `POST /validate` takes no body and returns:
+
+```json
+{
+ "validation": {},
+ "status": {},
+ "error": ""
+}
+```
+
+- install records start as `status=created`, `enabled=true`, `health=unknown` through the HTTP API
+- `GET /installations/{id}` exposes the persisted installation state
+- `POST /validate` returns a richer provider-derived status result and updates `last_validated_at`, `health`, and `last_error`
+
+## History, Action Logs, And Audit Truth
+
+- connector event history is append-only in the cloud store
+- repeated `source_event_id` values are preserved instead of overwriting earlier rows
+- event listing is newest-first
+- connector action logs now persist request JSON, response JSON, error text, started time, and completed time
+- cloud audit rows for connector actions now include provider, action, status, external ID, URL, status code, and error summary when present
+- webhook verification failures, webhook deferment, and normalization failures now create explicit audit rows instead of only leaving implicit HTTP responses
+
+Current limitation:
+
+- there is still no public API route for listing action-log rows directly; action-log depth is mainly a store/audit truth improvement in this phase
+
+## Provider Capability Matrix
+
+| Provider | Install/bootstrap | Validation | Ingest | Actions | Status/health | Audit | Local testability | Publishability-preparedness |
+| --- | --- | --- | --- | --- | --- | --- | --- | --- |
+| GitHub | `partial` | `proven` | `partial` | `proven` | `proven` | `partial` | `partial` | `partial` |
+| GitLab | `partial` | `proven` | `partial` | `proven` | `proven` | `partial` | `partial` | `partial` |
+| Jira | `partial` | `proven` | `partial` | `partial` | `proven` | `partial` | `partial` | `partial` |
+| Linear | `partial` | `proven` | `partial` | `partial` | `partial` | `partial` | `partial` | `partial` |
+| Slack | `proven` | `proven` | `partial` | `partial` | `proven` | `partial` | `proven` | `partial` |
+
+Practical interpretation:
+
+- Slack is the strongest operator-facing connector path in the repo today.
+- Jira is real and supported within a polling-first scope.
+- GitHub, GitLab, and Linear are real connectors with direct mocked-provider proof, but their operator packaging and local end-to-end proof remain thinner than Slack.
+- none of these providers should be described as marketplace-ready, OAuth-install ready, or vendor-depth complete in this phase
+
+## Provider Details
+
+### GitHub
+
+Current scope:
+
+- validate token against `GET /user`
+- verify webhook signatures and normalize issue, pull request, and push events
+- execute `create_issue_comment`
+- execute `create_issue`
+
+Required config and secrets:
+
+- secret `token`
+- optional `config.api_base_url`
+- secret `webhook_secret` only when webhook ingest is desired
+
+Local testing path:
+
+- `go test ./internal/cloud/connectors -count=1`
+- generic routed proof lives in `go test ./internal/cloud -count=1`
+- no GitHub-specific binary smoke exists in the repo today
+
+Publishability-prepared means:
+
+- operators can see the exact config and endpoint requirements
+- a token must authorize `GET /user`, `POST /repos/{owner}/{repo}/issues`, and `POST /repos/{owner}/{repo}/issues/{n}/comments`
+- webhook callers need a configured webhook secret and a compatible delivery into `/api/cloud/v1/installations/{id}/webhook`
+
+Still missing before broader publication/distribution:
+
+- provider-native app-install or OAuth flow
+- richer issue/PR actions
+- live vendor-account proof in repo automation
+
+### GitLab
+
+Current scope:
+
+- validate token against `GET /user`
+- verify webhook token and normalize issue, merge request, and push events
+- execute `create_issue_note`
+- execute `create_issue`
+
+Required config and secrets:
+
+- secret `token`
+- optional `config.api_base_url`
+- secret `webhook_secret` only when webhook ingest is desired
+
+Local testing path:
+
+- `go test ./internal/cloud/connectors -count=1`
+- generic routed proof lives in `go test ./internal/cloud -count=1`
+- no GitLab-specific binary smoke exists in the repo today
+
+Publishability-prepared means:
+
+- operators can see the exact config and endpoint requirements
+- a token must authorize `GET /user`, issue create, and issue-note endpoints against the configured GitLab API base
+- webhook callers need a configured webhook secret and a compatible delivery into `/api/cloud/v1/installations/{id}/webhook`
+
+Still missing before broader publication/distribution:
+
+- provider-native app-install or OAuth flow
+- merge-request write surface beyond the current narrow event normalization
+- live vendor-account proof in repo automation
+
+### Jira
+
+Current scope:
+
+- validate credentials against `GET /rest/api/3/myself`
+- polling-first ingest through `codencer-cloudworkerd`
+- execute `add_issue_comment`
+- execute `transition_issue`
+- derive installation status as polling-first with `webhook_ingest=disabled`
+
+Required config and secrets:
+
+- `config.api_base_url`
+- `config.username`
+- one of `config.jql` or `config.project_key`
+- secret `token`
+
+Important truth:
+
+- Jira webhook ingest is deferred in this phase
+- routed Jira webhook calls now return `501` with `webhook_deferred`
+- Jira webhook requests do not persist events and do not emit false-positive success audit rows
+- the supported ingest path is `codencer-cloudworkerd`, not `/webhook`
+
+Local testing path:
+
+- `go test ./internal/cloud/connectors -count=1`
+- `go test ./internal/cloud -run 'TestJiraWebhookRouteReturnsDeferredWithoutPersistingEvents|TestWorkerRunOncePollsJiraAndPersistsSnapshot' -count=1`
+- `./bin/codencer-cloudworkerd --config .codencer/cloud/config.json --once`
+
+Publishability-prepared means:
+
+- operators can see the exact config and endpoint requirements
+- credentials must authorize `GET /rest/api/3/myself`, `GET /rest/api/3/search`, `POST /rest/api/3/issue/{key}/comment`, and `POST /rest/api/3/issue/{key}/transitions`
+- polling must be configured explicitly through `config.jql` or `config.project_key`
+
+Still missing before broader publication/distribution:
+
+- webhook ingest
+- transition discovery and richer polling cursor semantics
+- live vendor-account proof in repo automation
+
+### Linear
+
+Current scope:
+
+- validate through the viewer query
+- verify webhook signatures and normalize issue webhooks
+- execute `create_issue`
+- execute `add_comment`
+
+Required config and secrets:
+
+- `config.api_base_url`
+- secret `token`
+- secret `webhook_secret` only when webhook ingest is desired
+
+Local testing path:
+
+- `go test ./internal/cloud/connectors -count=1`
+- generic routed proof lives in `go test ./internal/cloud -count=1`
+- no Linear-specific binary smoke exists in the repo today
+
+Publishability-prepared means:
+
+- operators can see the exact config and endpoint requirements
+- the configured token must authorize the viewer query plus the issue/comment mutations used by the connector
+- webhook callers need a configured webhook secret and a compatible delivery into `/api/cloud/v1/installations/{id}/webhook`
+
+Still missing before broader publication/distribution:
+
+- richer team/project/state discovery
+- broader issue workflow actions
+- live vendor-account proof in repo automation
+
+### Slack
+
+Current scope:
+
+- validate through `auth.test`
+- verify Slack signatures and normalize event callbacks, slash commands, and interactive payloads
+- execute `post_message`
+- execute `update_message`
+
+Required config and secrets:
+
+- `config.api_base_url`
+- secret `token`
+- secret `webhook_secret` when webhook ingest is desired
+
+Local testing path:
+
+- `go test ./internal/cloud/connectors -count=1`
+- `go test ./internal/cloud -run 'TestServerAdminAndConnectorFlows|TestWebhookHistoryPreservesRepeatedSourceEventIDs|TestConnectorActionLogsCaptureRequestCompletionAndAuditDetails' -count=1`
+- `make build-cloud && make cloud-smoke`
+
+Publishability-prepared means:
+
+- operators can see the exact config and endpoint requirements
+- the configured token must authorize `auth.test`, `chat.postMessage`, and `chat.update`
+- inbound webhook/event handling requires a signing secret and a compatible delivery into `/api/cloud/v1/installations/{id}/webhook`
+
+Still missing before broader publication/distribution:
+
+- broader Slack action surface such as reactions or view submissions
+- provider-native install flow or workspace app packaging
+- live workspace proof in repo automation
+
+## Local Test Commands
+
+Provider maintainers can use these repo-native checks:
+
+```bash
+go test ./internal/cloud/connectors -count=1
+go test ./internal/cloud -count=1
+make build-cloud
+make cloud-smoke
+```
+
+Practical split:
+
+- `go test ./internal/cloud/connectors -count=1` is the provider fixture/unit suite
+- `go test ./internal/cloud -count=1` adds routed cloud install/webhook/action/worker coverage
+- `make cloud-smoke` proves the generic cloud install/validate/webhook/events/audit path, with Slack as the current provider-shaped binary smoke
+- `make cloud-stack-smoke` remains the Docker deployment proof path and still requires a Docker-capable host
+
+## What This Phase Does Not Claim
+
+- no provider is marketplace-approved, app-store-ready, or vendor-partner complete
+- no provider has a repo-proven OAuth/app-install/bootstrap flow
+- no provider has full live vendor-account proof in the repo
+- GitHub, GitLab, Linear, and Jira do not yet have provider-specific binary smoke equivalent to Slack's current cloud smoke coverage
+
+For the self-host operator workflow, see [CLOUD_SELF_HOST.md](CLOUD_SELF_HOST.md). For the top-level cloud overview, see [CLOUD.md](CLOUD.md).
diff --git a/docs/CLOUD_SELF_HOST.md b/docs/CLOUD_SELF_HOST.md
new file mode 100644
index 0000000..2ca7413
--- /dev/null
+++ b/docs/CLOUD_SELF_HOST.md
@@ -0,0 +1,439 @@
+# Codencer Self-Host Cloud Control Plane Guide
+
+This guide covers the practical self-host bootstrap path for Codencer Cloud.
+
+Use this page when cloud tenancy is the public control plane.
+
+- Use [SELF_HOST_REFERENCE.md](SELF_HOST_REFERENCE.md) when you want the raw relay/runtime self-host path instead of cloud tenancy.
+- Use [CLOUD.md](CLOUD.md) for the cloud route/scope reference.
+- Use [CLOUD_CONNECTORS.md](CLOUD_CONNECTORS.md) for per-provider install/test depth and limitations.
+- Use [mcp/integrations.md](mcp/integrations.md) for the planner/client compatibility matrix.
+
+## Recommended Topology
+
+- `codencer-cloudd` on a server, VPS, or local host
+- `codencer-cloudctl` on the operator machine
+- `codencer-cloudworkerd` alongside the cloud daemon or as a scheduled worker
+- optional internal relay bridge under `codencer-cloudd` if you want cloud to own tenant-scoped Codencer runtime control
+
+The cloud control plane still does not execute coding work. In this pass it can also claim runtime connectors and shared instances into org/workspace/project scope, but the daemon and connector still execute and report locally.
+
+## Docker Baseline
+
+The repo now includes a practical Docker baseline under `deploy/cloud/`:
+
+- `deploy/cloud/Dockerfile`
+- `deploy/cloud/docker-compose.yml`
+- `deploy/cloud/.env.example`
+- `deploy/cloud/config/cloud.json`
+- `deploy/cloud/config/relay.json`
+- `deploy/cloud/smoke.sh`
+
+This stack is beta-track, SQLite-backed, and meant to be a serious self-host baseline rather than a production-ready managed deployment recipe.
+
+## Compose Reality
+
+The Docker baseline is intentionally narrow:
+
+- it starts `codencer-cloudd` plus `codencer-cloudworkerd`
+- it embeds the relay bridge inside the cloud process
+- it publishes only the cloud HTTP port on `8190`
+- it does **not** create a usable runtime instance by itself
+- cloud-scoped runtime proof still requires an external `orchestratord` plus `codencer-connectord`
+
+Proof boundary:
+
+- `make cloud-stack-smoke` proves the Docker baseline only
+- `make cloud-smoke` proves the binary-native cloud control-plane path
+- `make cloud-smoke` with composed-mode runtime inputs proves claimed runtime HTTP, cloud MCP, and official Go SDK access
+
+## Build
+
+Build the cloud binaries with:
+
+```bash
+make build-cloud
+```
+
+This produces:
+
+- `bin/codencer-cloudctl`
+- `bin/codencer-cloudd`
+- `bin/codencer-cloudworkerd`
+
+## Docker Compose Quickstart
+
+1. Copy the env file and set the required secrets:
+
+```bash
+cp deploy/cloud/.env.example deploy/cloud/.env
+```
+
+Set at least:
+
+- `CODENCER_CLOUD_MASTER_KEY`
+- `RELAY_PLANNER_TOKEN`
+- `RELAY_ENROLLMENT_SECRET`
+
+2. Bootstrap the SQLite store before starting the cloud service:
+
+```bash
+docker compose --env-file deploy/cloud/.env -f deploy/cloud/docker-compose.yml run --rm \
+ --entrypoint codencer-cloudctl cloud \
+ bootstrap \
+ --config /etc/codencer/cloud/config.json \
+ --org-slug acme \
+ --workspace-slug platform \
+ --project-slug core \
+ --token-name operator \
+ --member-name "Bootstrap Owner" \
+ --member-email owner@example.com \
+ --json
+```
+
+3. Start the cloud daemon and worker:
+
+```bash
+docker compose --env-file deploy/cloud/.env -f deploy/cloud/docker-compose.yml up -d cloud worker
+```
+
+4. Check health:
+
+```bash
+curl -fsS http://127.0.0.1:8190/healthz
+```
+
+5. Optional deployment smoke:
+
+```bash
+make cloud-stack-smoke
+```
+
+Persistent state in the compose baseline lives in named volumes:
+
+- `cloud-data` for the cloud SQLite database
+- `relay-data` for the composed relay SQLite database
+
+The committed JSON config files are templates. Secrets still come from the compose env file through runtime environment overrides.
+
+## Composed Runtime Proof With An External Daemon
+
+When you want tenant-scoped runtime control against the cloud host, use this order:
+
+1. bootstrap and start the cloud stack
+2. start a local `orchestratord` next to the repo you want to serve
+3. mint a relay enrollment token from the cloud image:
+
+```bash
+docker compose --env-file deploy/cloud/.env -f deploy/cloud/docker-compose.yml run --rm \
+ --entrypoint codencer-relayd cloud \
+ enrollment-token create \
+ --config /etc/codencer/relay/config.json \
+ --label local-dev \
+ --json
+```
+
+4. enroll and run `codencer-connectord` against `http://127.0.0.1:8190`
+5. claim the runtime connector into org/workspace/project scope with `codencer-cloudctl runtime-connectors claim`
+6. use cloud HTTP under `/api/cloud/v1/runtime/...` or cloud MCP under `/api/cloud/v1/mcp`
+
+If you want the scripted composed proof from a local checkout instead of the Docker baseline, use the binary-native smoke path:
+
+```bash
+make build-cloud build-mcp-sdk-smoke
+CLOUD_RELAY_CONFIG=.codencer/relay/config.json \
+CLOUD_RUNTIME_DAEMON_URL=http://127.0.0.1:8085 \
+CLOUD_SMOKE_MCP=1 \
+CLOUD_SMOKE_SDK=1 \
+make cloud-smoke
+```
+
+## Cloud Config
+
+Create a cloud config file such as `.codencer/cloud/config.json`:
+
+```json
+{
+ "host": "127.0.0.1",
+ "port": 8190,
+ "db_path": ".codencer/cloud/cloud.db",
+ "master_key": "replace-with-a-long-random-secret",
+ "relay_config_path": ".codencer/relay/config.json"
+}
+```
+
+Notes:
+
+- `master_key` is required if you want encrypted installation secrets.
+- `relay_config_path` is optional and only needed if you want `codencer-cloudd` to own cloud-scoped runtime control through an internal relay bridge.
+- If you use the environment variables `CODENCER_CLOUD_DB_PATH`, `CODENCER_CLOUD_HOST`, `CODENCER_CLOUD_PORT`, `CODENCER_CLOUD_MASTER_KEY`, or `CODENCER_CLOUD_RELAY_CONFIG`, they override the file values.
+
+## Bootstrap Order
+
+Because `codencer-cloudctl bootstrap` writes directly to the SQLite store, run it before starting the daemon or while the database is idle.
+
+```bash
+./bin/codencer-cloudctl bootstrap \
+ --config .codencer/cloud/config.json \
+ --org-slug acme \
+ --workspace-slug platform \
+ --project-slug core \
+ --token-name operator \
+ --json
+```
+
+The bootstrap response includes:
+
+- `org`
+- `workspace`
+- `project`
+- `membership`
+- a raw bearer token string
+- the persisted token record
+
+## Start The Cloud Daemon
+
+Standalone cloud:
+
+```bash
+./bin/codencer-cloudd --config .codencer/cloud/config.json
+```
+
+Cloud plus relay composition:
+
+```bash
+./bin/codencer-cloudd --config .codencer/cloud/config.json --relay-config .codencer/relay/config.json
+```
+
+In composed mode, use the cloud API for tenant-scoped runtime control. Do not treat raw relay routes as the cloud contract.
+
+Cloud-scoped MCP is also available in composed mode:
+
+- canonical cloud MCP endpoint: `/api/cloud/v1/mcp`
+- compatibility alias: `/api/cloud/v1/mcp/call`
+
+Use relay `/mcp` only when you are operating the self-host relay directly without cloud tenancy.
+
+For the frozen planner/client compatibility matrix, generic client examples, and cloud-vs-relay boundary, see [mcp/integrations.md](mcp/integrations.md) and [mcp/cloud_tools.md](mcp/cloud_tools.md).
+
+## Operator Commands
+
+Use the bearer token from bootstrap with the cloud control-plane CLI:
+
+```bash
+./bin/codencer-cloudctl status --cloud-url http://127.0.0.1:8190 --token --json
+curl -fsS -H "Authorization: Bearer " http://127.0.0.1:8190/api/cloud/v1/orgs
+curl -fsS -H "Authorization: Bearer " "http://127.0.0.1:8190/api/cloud/v1/workspaces?org_id="
+curl -fsS -H "Authorization: Bearer " "http://127.0.0.1:8190/api/cloud/v1/projects?workspace_id="
+curl -fsS -H "Authorization: Bearer " "http://127.0.0.1:8190/api/cloud/v1/memberships?org_id="
+curl -fsS -H "Authorization: Bearer " "http://127.0.0.1:8190/api/cloud/v1/tokens?org_id="
+curl -fsS -H "Authorization: Bearer " "http://127.0.0.1:8190/api/cloud/v1/installations?org_id="
+./bin/codencer-cloudctl events --cloud-url http://127.0.0.1:8190 --token --json
+./bin/codencer-cloudctl audit --cloud-url http://127.0.0.1:8190 --token --json
+```
+
+Create a connector installation:
+
+```bash
+./bin/codencer-cloudctl install create \
+ --cloud-url http://127.0.0.1:8190 \
+ --token \
+ --org-id \
+ --workspace-id \
+ --project-id \
+ --connector slack \
+ --name "Slack smoke" \
+ --config api_base_url=http://127.0.0.1:9 \
+ --secret token=smoke-token \
+ --secret webhook_secret=smoke-secret
+```
+
+Then toggle the installation explicitly:
+
+```bash
+./bin/codencer-cloudctl install disable --cloud-url http://127.0.0.1:8190 --token --installation-id
+./bin/codencer-cloudctl install enable --cloud-url http://127.0.0.1:8190 --token --installation-id
+```
+
+Installation records now expose:
+
+- `owner_membership_id`
+- `health`
+- `last_validated_at`
+- `last_webhook_at`
+- `last_action_at`
+- `last_sync_at`
+- `last_error`
+
+## Claim Codencer Runtime Into Cloud Scope
+
+When `codencer-cloudd` has a relay bridge configured and the relay already knows about a local Codencer connector, claim that runtime connector into tenant scope:
+
+```bash
+./bin/codencer-cloudctl runtime-connectors claim \
+ --cloud-url http://127.0.0.1:8190 \
+ --token \
+ --org-id \
+ --workspace-id \
+ --project-id \
+ --connector-id \
+ --json
+```
+
+Then inspect the claimed runtime connector and its shared instances:
+
+```bash
+./bin/codencer-cloudctl runtime-connectors list --cloud-url http://127.0.0.1:8190 --token --org-id --json
+./bin/codencer-cloudctl runtime-connectors instances --cloud-url http://127.0.0.1:8190 --token --runtime-connector-id --json
+./bin/codencer-cloudctl runtime-instances list --cloud-url http://127.0.0.1:8190 --token --org-id --json
+```
+
+You can also use the cloud HTTP surface directly for runtime work:
+
+```bash
+curl -fsS \
+ -H "Authorization: Bearer " \
+ -H "Content-Type: application/json" \
+ -d '{"id":"cloud-runtime-smoke","project_id":"cloud-smoke-project"}' \
+ http://127.0.0.1:8190/api/cloud/v1/runtime/instances//runs
+```
+
+Then submit a task through the same cloud-scoped prefix:
+
+```bash
+curl -fsS \
+ -H "Authorization: Bearer " \
+ -H "Content-Type: application/json" \
+ -d '{"version":"v1","goal":"Verify the cloud runtime HTTP path","adapter_profile":"codex"}' \
+ http://127.0.0.1:8190/api/cloud/v1/runtime/instances//runs/cloud-runtime-smoke/steps
+```
+
+Runtime steps, gates, logs, validations, and artifact content follow the same instance-scoped prefix under `/api/cloud/v1/runtime/instances//...`.
+
+The same tenant-scoped runtime access is also available through cloud MCP once the runtime bridge is active:
+
+```bash
+make build-mcp-sdk-smoke
+./bin/mcp-sdk-smoke --endpoint http://127.0.0.1:8190/api/cloud/v1/mcp --token --instance-id
+```
+
+## Connector Ingress In Composed Mode
+
+When `codencer-cloudd` is started with the relay bridge, the cloud host itself accepts local Codencer connector ingress:
+
+- `POST /api/v2/connectors/enroll`
+- `POST /api/v2/connectors/challenge`
+- `GET /ws/connectors`
+
+That means a local Codencer connector can point its relay URL at the cloud host in composed mode.
+
+Current limitation:
+
+- enrollment-token creation is still relay-config backed and is not yet a cloud-native API lifecycle
+
+For docker-compose based operators, the same image includes the relay admin CLI, so you can mint an enrollment token with:
+
+```bash
+docker compose --env-file deploy/cloud/.env -f deploy/cloud/docker-compose.yml run --rm \
+ --entrypoint codencer-relayd cloud \
+ enrollment-token create \
+ --config /etc/codencer/relay/config.json \
+ --label local-dev \
+ --json
+```
+
+## Worker
+
+`codencer-cloudworkerd` is the background worker for connector maintenance. In the current beta track:
+
+- GitHub, GitLab, Linear, and Slack remain webhook-first
+- Jira is polling-first
+- Jira webhook ingest is intentionally deferred
+- routed Jira webhook calls return `501 webhook_deferred` and do not persist events
+
+Safe worker run:
+
+```bash
+./bin/codencer-cloudworkerd --config .codencer/cloud/config.json --once
+```
+
+For a live Jira installation, provide:
+
+- `config.username`
+- `config.api_base_url`
+- either `config.jql` or `config.project_key`
+- installation secret `token`
+
+## Cloud Smoke
+
+The repo includes `scripts/cloud_smoke.sh` and a `make cloud-smoke` target. The smoke script exercises:
+
+- bootstrap
+- status
+- org/workspace/project listing via the HTTP API
+- installation creation/list/get
+- installation enable/disable
+- Slack-style webhook verification for the smoke installation
+- event listing for the smoke installation
+- audit inspection
+- a safe no-op `cloudworkerd --once` pass
+- optional composed-mode runtime claim/list assertions when `CLOUD_RELAY_CONFIG` is supplied together with either `CLOUD_RUNTIME_CONNECTOR_ID` or `CLOUD_RUNTIME_DAEMON_URL`
+- optional composed-mode cloud runtime HTTP proof under the same composed-mode runtime inputs
+- optional composed-mode cloud MCP initialize/list/call proof when `CLOUD_SMOKE_MCP=1`
+- optional composed-mode official Go SDK proof against `/api/cloud/v1/mcp` when `CLOUD_SMOKE_SDK=1`
+
+It does not claim external provider verification.
+
+Provider truth for this smoke:
+
+- Slack is the current provider-shaped binary smoke path
+- Jira polling is proven by focused tests and `codencer-cloudworkerd --once`, not by the baseline binary smoke
+- GitHub, GitLab, and Linear remain provider-fixture and routed-install/action proof paths, not provider-specific binary smoke paths
+
+Practical split:
+
+- `make cloud-smoke` proves the baseline cloud control-plane path.
+- `CLOUD_RELAY_CONFIG=... CLOUD_RUNTIME_CONNECTOR_ID=... make cloud-smoke` adds composed-mode claimed-runtime and cloud runtime HTTP proof.
+- `CLOUD_RELAY_CONFIG=... CLOUD_RUNTIME_DAEMON_URL=http://127.0.0.1:8080 make cloud-smoke` can bootstrap a temporary connector automatically for the composed proof when you do not already have a shared connector id.
+- `make build-mcp-sdk-smoke` plus `CLOUD_SMOKE_MCP=1 CLOUD_SMOKE_SDK=1` adds cloud MCP and official Go SDK proof in the same composed-mode smoke run.
+
+Example composed-mode proof:
+
+```bash
+make build-cloud build-mcp-sdk-smoke
+CLOUD_RELAY_CONFIG=.codencer/relay/config.json \
+CLOUD_RUNTIME_DAEMON_URL=http://127.0.0.1:8080 \
+CLOUD_SMOKE_MCP=1 \
+CLOUD_SMOKE_SDK=1 \
+make cloud-smoke
+```
+
+For the Docker-based deployment baseline, use:
+
+```bash
+make cloud-stack-smoke
+```
+
+That compose smoke verifies:
+
+- image build
+- bootstrap through the mounted config and SQLite volume
+- cloud health on the published port
+- installation create
+- audit visibility
+
+It requires a running Docker daemon. In environments where Docker CLI is installed but the daemon/socket is unavailable, use `docker compose ... config` plus the binary-native `make cloud-smoke` path instead.
+
+## Troubleshooting
+
+- If `bootstrap` or `status` fail, confirm the cloud server is using the same `db_path` as your config.
+- If secret storage fails, confirm `master_key` is set.
+- If a connector install remains `disabled`, check the enable route and the audit trail.
+- If a Jira webhook call returns `webhook_deferred`, that is expected in this phase; use `codencer-cloudworkerd` for Jira ingest.
+- If runtime connector claim fails, confirm the relay bridge is configured and either provide a valid shared `CLOUD_RUNTIME_CONNECTOR_ID` or set `CLOUD_RUNTIME_DAEMON_URL` so the smoke can enroll a temporary connector first.
+- If a runtime instance does not appear, confirm it is still shared by the local Codencer connector.
+- If cloud MCP calls fail, confirm the cloud daemon was started with `relay_config_path` or `--relay-config`, that the token is valid for the target tenant scope, and that the token includes the tool-specific scopes you are calling. `list_instances` and `get_instance` still require `runtime_instances:read`.
+- If Jira polling fails, confirm `config.jql` or `config.project_key` is present and that the provider credentials are valid.
+- If connector event history shows repeated source IDs, that is now expected append-only behavior rather than a silent overwrite.
+
+For connector capability details, see [CLOUD_CONNECTORS.md](CLOUD_CONNECTORS.md). For the high-level cloud overview, see [CLOUD.md](CLOUD.md).
diff --git a/docs/CONNECTOR.md b/docs/CONNECTOR.md
new file mode 100644
index 0000000..c996c55
--- /dev/null
+++ b/docs/CONNECTOR.md
@@ -0,0 +1,225 @@
+# Connector
+
+Codencerโs connector is the outbound-only bridge between a relay and one or more local Codencer daemons. It is not a planner, not an executor, and not a second orchestration brain.
+
+If you want the full self-host operator sequence first, start with [SELF_HOST_REFERENCE.md](SELF_HOST_REFERENCE.md) and return here for connector-specific semantics.
+
+## Role
+
+The connector is responsible for:
+- persistent connector identity
+- connector enrollment
+- outbound authenticated websocket session to the relay
+- explicit local instance sharing
+- narrow proxying to the local daemon
+
+The connector is not responsible for:
+- planning
+- direct local execution
+- raw shell exposure
+- generic tunneling
+
+## Local Config
+
+Default config path:
+- `.codencer/connector/config.json`
+
+Status path:
+- `.codencer/connector/status.json`
+
+The connector persists:
+- relay URL and websocket metadata
+- connector Ed25519 keypair
+- `connector_id`
+- `machine_id`
+- explicit shared-instance config
+- the last local session snapshot in `status.json`
+
+## Commands
+
+The connector CLI now exposes the full local operator surface:
+
+```bash
+./bin/codencer-connectord enroll ...
+./bin/codencer-connectord run ...
+./bin/codencer-connectord status [--json]
+./bin/codencer-connectord list [--json]
+./bin/codencer-connectord discover [--json] [--root /path/to/repos]
+./bin/codencer-connectord share --instance-id
+./bin/codencer-connectord share --daemon-url http://127.0.0.1:8085
+./bin/codencer-connectord unshare --instance-id
+./bin/codencer-connectord config [--json] [--show-secrets]
+```
+
+Command semantics:
+- `status` reads the local status snapshot. Plain text is richer and includes configured shared/unshared instances. `--json` still prints the raw status file for machine consumers.
+- `list` shows every configured connector instance, including `share: false` entries.
+- `discover` scans configured `discovery_roots` plus any repeated `--root` overrides and reports `instance_id`, `repo_root`, `manifest_path`, `daemon_url`, and state as `shared`, `known_unshared`, or `discovered_only`. It never changes the allowlist.
+- `share` resolves the selector to a healthy local daemon before it persists `share=true`. `--daemon-url` is the self-sufficient path. `--instance-id` works only when discovery or existing connector metadata can resolve that instance back to a local daemon.
+- `unshare` keeps the entry but flips `share=false`. It does not delete history from the config.
+- `config` prints the persisted config safely by default. `private_key` is redacted unless `--show-secrets` is explicitly passed. `--json` is available for machine-readable output.
+
+## Operator Status
+
+Use the local status file when you want to check connector state without contacting the relay:
+
+```bash
+./bin/codencer-connectord status --config .codencer/connector/config.json --json
+```
+
+The status file records:
+- `connector_id`
+- `machine_id`
+- `relay_url`
+- `session_state`
+- `last_connect_at`
+- `last_disconnect_at`
+- `last_heartbeat_at`
+- `last_error`
+- `shared_instances`
+
+Session states are intentionally small and honest:
+- `disconnected`
+- `connecting`
+- `connected`
+- `error`
+
+The plain-text `status` view additionally shows:
+- currently shared instance IDs from the latest live session snapshot
+- configured shared and unshared counts
+- each configured instance selector line from the local allowlist
+
+## Enrollment
+
+Preferred flow:
+
+```bash
+./bin/codencer-connectord enroll \
+ --relay-url http://127.0.0.1:8090 \
+ --daemon-url http://127.0.0.1:8085 \
+ --enrollment-token
+```
+
+Enrollment does two things:
+- exchanges the one-time relay enrollment token for connector identity
+- seeds one shared instance from the daemon URL used during enrollment
+
+That enrollment seed is only a starting point. Use `share`, `unshare`, `list`, and `discover` for day-to-day instance management after the connector is enrolled.
+
+## Shared Instances
+
+The connector does not expose every discovered repo by default.
+
+Sharing rules:
+- discovery roots can discover manifests
+- discovery alone does not share them
+- the connector config is the allowlist
+- only entries with `share: true` are advertised to the relay
+- `share: false` entries remain in the config so operators can see what has been intentionally withheld
+
+Each shared instance entry can identify the local daemon by one or more of:
+- `instance_id`
+- `daemon_url`
+- `manifest_path`
+
+Practical note:
+- `share --instance-id` is only valid when that id is already discoverable from configured discovery roots or from existing connector metadata.
+- `share --daemon-url` is the canonical self-host operator path when you want the connector to prove the target daemon is live before advertising it.
+
+Examples:
+
+```bash
+./bin/codencer-connectord share \
+ --config .codencer/connector/config.json \
+ --daemon-url http://127.0.0.1:8086
+
+./bin/codencer-connectord unshare \
+ --config .codencer/connector/config.json \
+ --instance-id inst_repo_b
+
+./bin/codencer-connectord list \
+ --config .codencer/connector/config.json
+
+./bin/codencer-connectord discover \
+ --config .codencer/connector/config.json \
+ --root ~/src
+```
+
+`list` and `discover` are intentionally different:
+- `list` is the configured-state view from the local allowlist.
+- `discover` is the live visibility view from configured discovery roots plus optional overrides.
+- `discover` never auto-shares newly found instances.
+
+## Session Model
+
+At runtime the connector:
+1. fetches a relay challenge
+2. signs the challenge with its local private key
+3. opens an outbound websocket session
+4. advertises only shared instances
+5. reloads config before heartbeats and sends a fresh advertise when the effective shared set changes
+6. re-advertises after reconnect
+
+No inbound listener is required for normal use.
+
+Reconnect behavior is deliberately simple:
+- failures back off exponentially from a short base delay up to a capped delay
+- a successful connection resets the backoff window
+- status snapshots keep the last error and latest heartbeat timestamps
+
+## Allowed Proxy Surface
+
+The connector only proxies the narrow local Codencer API surface:
+- instance read
+- run create/list/read
+- run patch operations such as abort
+- run gate listing
+- step submit
+- step read
+- step result
+- step validations
+- step artifact listing
+- step logs
+- step wait
+- step retry
+- gate read
+- gate approve/reject
+- artifact read
+- artifact content read
+
+The relay evidence path now works end to end for:
+- `GET /api/v1/steps/{id}/validations`
+- `GET /api/v1/steps/{id}/artifacts`
+- `GET /api/v1/steps/{id}/logs`
+
+It does not expose:
+- raw shell
+- arbitrary file reads
+- generic network tunneling
+
+Abort forwarding stays honest:
+- the connector can forward an abort request
+- it cannot guarantee a hard process kill on the local adapter side
+- a remote abort is only considered successful when the daemon confirms the active step reached `cancelled`
+
+## Placement Guidance
+
+The default recommendation is:
+- run the connector on the same side as the daemon
+- keep the repo, worktrees, and artifacts on that same side
+- let the relay be the remote surface
+
+In mixed WSL/Windows setups:
+- daemon and connector usually belong in WSL/Linux
+- Antigravity broker and IDE may live on Windows
+
+See [WSL / Windows / Antigravity Topology](WSL_WINDOWS_ANTIGRAVITY.md) for the practical topology.
+
+## Reset / Revocation
+
+To reset a connector locally:
+1. stop the connector process
+2. remove `.codencer/connector/config.json`
+3. enroll again with a fresh enrollment token
+
+Relay-side disable and revocation are controlled by the relay control plane. The connector will report disconnect and auth failures honestly in `status.json` if the relay stops accepting the connector.
diff --git a/docs/KNOWN_LIMITATIONS.md b/docs/KNOWN_LIMITATIONS.md
new file mode 100644
index 0000000..401d5b9
--- /dev/null
+++ b/docs/KNOWN_LIMITATIONS.md
@@ -0,0 +1,22 @@
+# Known Limitations
+
+This page consolidates already-stated beta boundaries from the current repo truth. Start with [BETA_TESTING.md](BETA_TESTING.md) for the frozen public test tracks, and use [mcp/integrations.md](mcp/integrations.md) for the relay-vs-cloud planner/client contract.
+
+| Beta Surface | Known Limitation | Workaround or Plan | Severity |
+| --- | --- | --- | --- |
+| Local daemon and local adapters | Canonical local proof is simulation-first; live adapter proof stays narrow, and the checked-in `codex` proof is still simulation-heavy rather than live-binary proven. | Use the canonical local proof path from [BETA_TESTING.md](BETA_TESTING.md): `./scripts/smoke_test_v1.sh` then `make smoke`. Treat `/api/v1/compatibility` and `orchestratorctl instance --json` as runtime truth for local readiness. | `friction` |
+| Local daemon MCP surface | Daemon-local `/mcp/call` is compatibility-only and is not the public remote planner MCP contract. | Keep remote planners on relay `/mcp` or cloud `/api/cloud/v1/mcp` as described in [mcp/integrations.md](mcp/integrations.md). | `blocker-for-use` |
+| Relay and daemon abort flows | Abort remains best-effort. Codencer only reports a successful abort when the active step actually reaches `cancelled`; it does not claim universal hard-kill semantics. | Operate runs through the recorded state: inspect step state, result, validations, logs, gates, and artifacts after an abort request instead of assuming a forced stop. | `friction` |
+| Relay artifact transport | Large binary artifact transfer is intentionally bounded; the connector is not a bulk file tunnel. | Use the normal artifact list and artifact content APIs for bounded evidence retrieval, and keep large-binary transfer expectations out of the relay path. | `friction` |
+| Self-host planner authentication | Self-host planner auth is static bearer-token based and is not enterprise IAM. | Use the self-host relay/cloud paths as documented today, with the understanding that the current beta scope is narrow self-host operator use rather than enterprise identity integration. | `friction` |
+| Generic MCP clients | Generic MCP clients beyond the manual JSON-RPC callers and official Go SDK helper remain expected-only; protocol behavior is proven, but specific client products are not universally repo-proven. | Prefer the repo-proven paths in [mcp/integrations.md](mcp/integrations.md): relay/cloud HTTP, relay/cloud MCP, and the official Go SDK smoke helper. | `friction` |
+| ChatGPT-style and Claude-style remote MCP paths | Product-specific ChatGPT-style and Claude-style integrations are compatibility-only in this beta contract. | Wire those clients only to relay `/mcp` or cloud `/api/cloud/v1/mcp`, keep the claim narrow, and use [mcp/integrations.md](mcp/integrations.md) as the Codencer-side contract. | `friction` |
+| Cloud runtime control | Cloud runtime HTTP and cloud MCP are only useful in composed mode, and cloud runtime connector ownership is explicit. A relay connector must still be claimed into org/workspace/project scope before cloud can use it. | Start `codencer-cloudd` with `--relay-config` or `relay_config_path`, then claim the runtime connector into tenant scope before using cloud runtime HTTP or MCP. | `blocker-for-use` |
+| Cloud connector enrollment | Connector enrollment-token issuance remains relay-config backed; cloud hosts connector ingress in composed mode, but it does not yet add a cloud-native enrollment-token lifecycle. | Issue enrollment tokens through the relay-backed self-host flow today. | `friction` |
+| Docker self-host cloud baseline | `make cloud-stack-smoke` is the Docker baseline proof only. The Docker compose stack does not create a usable runtime instance by itself, and the broader runtime/MCP/SDK proof lives elsewhere. | Use binary-native `make cloud-smoke` with composed-mode inputs when you need claimed runtime visibility, cloud runtime HTTP, cloud MCP, or official Go SDK proof. | `friction` |
+| Provider connectors overall | No provider is marketplace-approved, app-store-ready, OAuth-install ready, or vendor-depth complete in this phase. Live vendor-account proof is not part of the current beta promise. | Keep claims at the documented install/validate/action/webhook/audit depth from [CLOUD_CONNECTORS.md](CLOUD_CONNECTORS.md), and use [BETA_TESTING.md](BETA_TESTING.md) for the current provider track boundary. | `friction` |
+| GitHub, GitLab, Jira, and Linear provider packaging | Slack is the strongest provider-shaped operator path today; GitHub, GitLab, Jira, and Linear remain narrower operator/package surfaces, and they do not yet have provider-specific binary smoke equivalent to Slack's current coverage. | Use Slack when you need the strongest current provider-shaped smoke path. For the other providers, rely on the generic cloud install/action routes and the provider-focused tests documented in [CLOUD_CONNECTORS.md](CLOUD_CONNECTORS.md). | `friction` |
+| Jira connector ingest | Jira is polling-first in the current beta track. Webhook ingest is explicitly deferred, and routed Jira webhook calls return `501 webhook_deferred` instead of ingesting payloads. | Use `codencer-cloudworkerd` for Jira polling and configure `config.jql` or `config.project_key`. Do not treat `/webhook` as the supported Jira ingest path in this phase. | `blocker-for-use` |
+| Cloud connector action history | There is still no public API route for listing action-log rows directly; action-log depth is mainly a store and audit truth improvement in this phase. | Use the existing cloud audit and event surfaces for operator visibility until a direct action-log listing route exists. | `friction` |
+| WSL / Windows / agent-broker topology | The WSL/Windows layout is operator guidance, not an automated smoke-proof matrix, and the recommended topology keeps execution and artifacts on the Linux side. | Keep the repo checkout, daemon, connector, worktrees, and artifacts in WSL/Linux; keep `agent-broker` on Windows only when needed; inspect results through APIs and CLI rather than raw cross-side paths. | `friction` |
+| Ordered task execution | Ordered task execution remains wrapper-based and is not a native workflow engine inside Codencer core. | Use the checked-in wrapper examples and `submit --wait --json` loops for sequential execution. | `friction` |
diff --git a/docs/RELAY.md b/docs/RELAY.md
new file mode 100644
index 0000000..a1f6d7e
--- /dev/null
+++ b/docs/RELAY.md
@@ -0,0 +1,217 @@
+# Relay
+
+The Codencer relay is a narrow self-hostable control plane. It is not a planner, not an executor, and not a remote shell.
+
+If you want the end-to-end operator flow first, start with [SELF_HOST_REFERENCE.md](SELF_HOST_REFERENCE.md) and return here for route and auth details.
+
+## Role
+
+The relay does three things:
+- authenticates planners and connectors
+- routes planner calls to explicitly shared local instances
+- records audit events for remote control actions
+
+The relay does not:
+- plan work
+- execute local code
+- expose a generic tunnel
+- expose raw shell or arbitrary filesystem tools
+
+## Public Surfaces
+
+- planner HTTP API: `/api/v2/...`
+- relay MCP: `/mcp`
+- relay MCP compatibility path: `/mcp/call`
+- connector websocket: `/ws/connectors`
+
+The relay is the public remote control surface.
+
+The local daemon is not intended to be exposed directly.
+
+Cloud composition note: `codencer-cloudd` can optionally front the relay handler with `--relay-config` so the cloud control plane and relay API live in one process. That composition does not change the relay role: relay remains the local self-host control plane, while cloud provider installations and connector polling live under the separate cloud domain documented in [CLOUD.md](CLOUD.md).
+
+## Startup
+
+Run the canonical relay binary:
+
+- `./bin/codencer-relayd`
+- `./bin/codencer-relayd serve --config .codencer/relay/config.json`
+
+The relay config must include at least:
+- `db_path`
+- `planner_token` or `planner_tokens`
+
+Useful config keys for practical self-host use:
+- `proxy_timeout_seconds`
+- `allowed_origins`
+- `heartbeat_interval_seconds`
+- `session_ttl_seconds`
+- `challenge_ttl_seconds`
+
+Optional bootstrap compatibility:
+- `enrollment_secret`
+
+## Planner Auth
+
+Planner callers authenticate with bearer tokens.
+
+Supported config shapes:
+- `planner_token`: one full-scope token for small self-host setups
+- `planner_tokens[]`: named tokens with `token`, `scopes`, and optional `instance_ids`
+
+Current auth model is intentionally small:
+- static token config
+- explicit scopes
+- optional instance scoping
+- suitable for narrow self-host beta use
+
+It is not enterprise IAM.
+
+Operator helper:
+
+```bash
+./bin/codencer-relayd planner-token create \
+ --config .codencer/relay/config.json \
+ --write-config \
+ --name operator \
+ --scope '*'
+```
+
+## Connector Auth
+
+Connector auth uses:
+- one-time enrollment token exchange
+- connector Ed25519 keypair
+- signed challenge/response
+- outbound websocket session
+- heartbeat-driven presence
+
+Legacy bootstrap compatibility:
+- `enrollment_secret` can still be used directly if configured, but it should be treated as bootstrap-only fallback
+- new deployments should prefer one-time enrollment tokens
+
+## Persisted State
+
+The relay persists in SQLite:
+- connector records
+- one-time enrollment tokens
+- connector challenge state
+- advertised instance descriptors for the connector's current shared set
+- resource routing hints
+- audit events
+
+Advertise truth model:
+- each connector `advertise` payload is treated as the authoritative current shared-instance set for that connector
+- newly advertised instances are upserted
+- previously connector-owned instances that are absent from the new advertise payload are pruned
+- pruning also deletes cached resource-route hints for that instance so unshared instances stop appearing in `/api/v2/instances` and stop being routable
+
+## Planner API
+
+Planner-facing HTTP routes live under `/api/v2`.
+
+Current routes include:
+- `GET /api/v2/status`
+- `GET /api/v2/connectors`
+- `GET /api/v2/connectors/{connector_id}`
+- `POST /api/v2/connectors/{connector_id}/disable`
+- `POST /api/v2/connectors/{connector_id}/enable`
+- `GET /api/v2/audit?limit=N`
+- `GET /api/v2/instances`
+- `GET /api/v2/instances/{instance_id}`
+- `GET|POST /api/v2/instances/{instance_id}/runs`
+- `GET /api/v2/instances/{instance_id}/runs/{run_id}`
+- `GET /api/v2/instances/{instance_id}/runs/{run_id}/gates`
+- `POST /api/v2/instances/{instance_id}/runs/{run_id}/steps`
+- `POST /api/v2/instances/{instance_id}/runs/{run_id}/abort`
+- `GET /api/v2/steps/{step_id}`
+- `POST /api/v2/steps/{step_id}/wait`
+- `POST /api/v2/steps/{step_id}/retry`
+- `GET /api/v2/steps/{step_id}/result`
+- `GET /api/v2/steps/{step_id}/logs`
+- `GET /api/v2/steps/{step_id}/artifacts`
+- `GET /api/v2/steps/{step_id}/validations`
+- `GET /api/v2/artifacts/{artifact_id}/content`
+- `POST /api/v2/gates/{gate_id}/approve`
+- `POST /api/v2/gates/{gate_id}/reject`
+
+These routes stay narrow and instance-oriented.
+
+Operational notes:
+- `/api/v2/status` returns relay version, start time, connector and instance counts, auth mode, and whether bootstrap `enrollment_secret` mode is enabled
+- `/api/v2/connectors` returns connector identity, online/offline state, last seen, disabled state, and shared instance ids
+- connector enable/disable mutations are explicit planner-admin actions and are audited
+- `/api/v2/audit` returns recent persisted audit events newest first, default limit `100`, max `1000`
+- offline connectors remain visible through `/api/v2/connectors`, but stale offline sessions are never used for routing
+- `/api/v2/steps/{step_id}/wait` uses planner-provided `timeout_ms` when present, capped by `proxy_timeout_seconds`
+
+## MCP Surface
+
+The relay is also the public MCP surface.
+
+Supported MCP transport behavior:
+- `POST /mcp`
+- `GET /mcp`
+- `DELETE /mcp`
+- `/mcp/call` remains as a compatibility alias for POST callers
+
+Supported MCP methods:
+- `initialize`
+- `notifications/initialized`
+- `tools/list`
+- `tools/call`
+
+Supported tools are the `codencer.*` relay tools documented in [mcp/relay_tools.md](mcp/relay_tools.md).
+
+For the frozen planner/client compatibility matrix, generic HTTP/MCP examples, and client-specific packaging notes, see [mcp/integrations.md](mcp/integrations.md).
+
+Protocol notes:
+- the relay negotiates and returns `MCP-Protocol-Version`
+- the relay can return `MCP-Session-Id` on `initialize`
+- the relay enforces `allowed_origins` for browser-style MCP callers when configured
+- the canonical streamable session path is `/mcp`; use `POST /mcp/call` only as a compatibility POST alias, not as the primary long-lived session endpoint
+- the current relay remains request/response-first; it does not rely on unsolicited long-lived server notifications for planner functionality
+
+The local daemonโs `/mcp/call` surface is separate and should be treated as local compatibility/admin tooling, not as the remote public MCP endpoint.
+
+## Security Boundaries
+
+The remote path is intentionally narrow:
+- planner talks to relay
+- relay talks only to authenticated connectors
+- connector proxies only an allowlisted daemon API
+- daemon executes locally through adapters
+
+The relay does not widen privileges beyond planner token scopes or connector sharing decisions.
+
+## Known Limitations
+
+Current honest limitations:
+- planner auth is static-token based
+- relay resolves unknown `step`, `artifact`, and `gate` ids by probing only authorized online shared instances, then persists route hints; lookups still fail closed when no online match exists or multiple instances match
+- artifact transfer is bounded and is not intended for bulk binary transport
+- abort semantics remain best-effort unless the local adapter confirms stop; planner callers only get a successful abort when the daemon actually reaches `cancelled`
+- MCP compatibility is intentionally tool-focused; the public planner contract is the explicit `codencer.*` tool set rather than a broader autonomous control surface
+- relay MCP is intentionally not admin-surface-complete; status, audit, enrollment-token creation, and connector enable/disable remain HTTP admin routes
+
+## Audit Trail
+
+The relay appends audit events for:
+- planner enrollment-token creation
+- connector enrollment
+- connector session establishment
+- planner control calls
+
+Each audit event records actor, action, target, outcome, and timestamp.
+
+## Admin Helpers
+
+```bash
+./bin/codencer-relayd status --config .codencer/relay/config.json
+./bin/codencer-relayd connectors --config .codencer/relay/config.json
+./bin/codencer-relayd instances --config .codencer/relay/config.json
+./bin/codencer-relayd audit --config .codencer/relay/config.json --limit 50
+./bin/codencer-relayd enrollment-token create --config .codencer/relay/config.json --label local-dev --json
+./bin/codencer-relayd connector disable --config .codencer/relay/config.json
+./bin/codencer-relayd connector enable --config .codencer/relay/config.json
+```
diff --git a/docs/RELEASE_NOTES_v0.2.0-beta.md b/docs/RELEASE_NOTES_v0.2.0-beta.md
new file mode 100644
index 0000000..692221f
--- /dev/null
+++ b/docs/RELEASE_NOTES_v0.2.0-beta.md
@@ -0,0 +1,65 @@
+# Codencer v0.2.0-beta Release Notes
+
+Use [BETA_TESTING.md](BETA_TESTING.md) for the frozen beta tracks and [mcp/integrations.md](mcp/integrations.md) for the planner/client contract.
+
+## What shipped
+
+`v0.2.0-beta` ships five public beta tracks:
+
+- local-only daemon plus CLI
+- self-host relay plus runtime connector
+- self-host cloud control plane
+- planner/client relay and cloud integrations
+- provider connector beta surfaces
+
+## What's new since v0.1.0-beta
+
+- The repo now exposes a self-host relay path with planner HTTP, planner MCP, enrollment tokens, explicit instance sharing, and relay audit/admin surfaces.
+- The cloud control plane is now part of the beta truth with bootstrap, tenancy, provider installs, claimed runtime connectors, cloud HTTP, and cloud MCP.
+- Repo-level proof entrypoints now exist for supported builds and supported verification, including the Docker-backed cloud baseline on Docker-capable hosts.
+- The public docs now route testers by platform and planner/client instead of forcing them to reverse-engineer the support matrix.
+- Known limitations are consolidated into one operator-grade reference instead of being scattered across README, cloud docs, and planner docs.
+
+## How to try it
+
+Start with a clean clone, then use the supported build and verifier entrypoints before choosing a platform walkthrough and a planner walkthrough.
+
+```bash
+make build-supported
+make verify-beta
+make verify-beta-docker
+```
+
+Run `make verify-beta-docker` only on a Docker-capable host when you also want the Docker cloud baseline.
+
+## What works today
+
+- Local daemon, CLI, instance identity, simulation mode, artifacts, validations, and retry/gate flows are the canonical beta core.
+- Self-host relay HTTP, relay MCP, connector enrollment/session/share control, and relay audit/admin surfaces are supported beta targets.
+- Self-host cloud HTTP and cloud MCP are supported beta targets when cloud runs in composed runtime mode.
+- The official Go SDK path to relay MCP and cloud MCP is proven.
+- Provider connectors are included in the beta promise at the frozen documented depth, with Slack as the strongest current operator path and Jira explicitly polling-first.
+
+## What's compatibility-only or deferred
+
+- ChatGPT-style and Claude-style planner flows remain `compatibility-only`; use the new walkthroughs as operator guidance, not as direct product proof.
+- Gemini CLI remains `expected-only` in this pass because the docs are aligned to official Gemini CLI references but were not locally validated on this host.
+- Generic MCP clients beyond the repo-proven manual JSON-RPC callers and the official Go SDK helper remain `expected-only`.
+- Daemon-local `/mcp/call` remains a compatibility/admin bridge, not the public remote planner contract.
+- `agent-broker`, the VS Code extension, `openclaw-acpx`, `ide-chat`, and vendor-depth provider flows remain outside the primary beta promise.
+
+See [KNOWN_LIMITATIONS.md](KNOWN_LIMITATIONS.md) for the consolidated list.
+
+## Known limitations
+
+Use [KNOWN_LIMITATIONS.md](KNOWN_LIMITATIONS.md) for the current boundary list, workarounds, and severity labels.
+
+## Feedback
+
+File issues in the GitHub repository's normal Issues flow. There is no dedicated beta issue template in this repo right now, so use the default blank issue and include:
+
+- platform (`macOS`, `Windows`, `WSL`, or `remote VPS`)
+- planner/client (`ChatGPT`, `Claude Desktop`, `claude.ai`, `Gemini CLI`, or direct HTTP/MCP`)
+- the track you followed from [BETA_TESTING.md](BETA_TESTING.md)
+- the exact commands you ran
+- the first failing output snippet
diff --git a/docs/SELF_HOST_REFERENCE.md b/docs/SELF_HOST_REFERENCE.md
new file mode 100644
index 0000000..da44c1b
--- /dev/null
+++ b/docs/SELF_HOST_REFERENCE.md
@@ -0,0 +1,336 @@
+# Self-Host Relay / Runtime Reference
+
+Codencer v2 supports a self-hostable remote planner path without moving execution off the local machine.
+
+If you want cloud tenancy, cloud-scoped runtime control, or provider installations instead of the raw relay/runtime path, start with [CLOUD_SELF_HOST.md](CLOUD_SELF_HOST.md).
+
+## Current Topology
+
+```text
+Planner / Chat
+ -> Relay planner API or relay MCP
+ -> Relay daemon
+ -> Connector outbound websocket
+ -> Local Codencer daemon
+ -> Local adapters
+```
+
+Execution still stays local. The relay is transport, auth, and audit. The connector is an outbound bridge. The daemon remains the local system of record.
+
+## Public Surfaces
+
+- Local daemon API: `/api/v1`
+- Local daemon compatibility/admin MCP surface: `/mcp/call`
+- Relay planner API: `/api/v2`
+- Relay MCP: `/mcp`
+- Relay MCP compatibility path: `/mcp/call`
+- Relay connector websocket: `/ws/connectors`
+
+The local daemon is not the public remote MCP server.
+
+## Operator Flow
+
+### 0. Create a relay config and planner token
+
+The practical cold-start flow is:
+
+```bash
+mkdir -p .codencer/relay
+./bin/codencer-relayd planner-token create \
+ --config .codencer/relay/config.json \
+ --write-config \
+ --name operator \
+ --scope '*'
+```
+
+That command creates or updates a local relay config file with a high-entropy static planner bearer token.
+
+Minimal relay config example:
+
+```json
+{
+ "host": "127.0.0.1",
+ "port": 8090,
+ "db_path": ".codencer/relay/relay.db",
+ "planner_tokens": [
+ {
+ "name": "operator",
+ "token": "",
+ "scopes": ["*"]
+ }
+ ],
+ "proxy_timeout_seconds": 300,
+ "allowed_origins": ["http://127.0.0.1:8090"]
+}
+```
+
+### 1. Start the local daemon
+
+Run the daemon near the repo you want to serve:
+
+```bash
+./bin/orchestratord --repo-root /path/to/repo
+```
+
+Or use the existing convenience flow:
+
+```bash
+make start
+```
+
+### 2. Inspect local instance identity
+
+Verify the daemonโs stable identity and local manifest-backed metadata:
+
+```bash
+./bin/orchestratorctl instance --json
+```
+
+Or inspect the daemon directly:
+
+```bash
+curl http://127.0.0.1:8085/api/v1/instance
+```
+
+The daemon writes a repo-local manifest under `.codencer/instance.json`.
+
+### 3. Start the relay
+
+Run the relay with the config you created:
+
+```bash
+./bin/codencer-relayd --config .codencer/relay/config.json
+```
+
+The relay is the public remote control plane. Do not expose the daemon directly.
+
+Operator status/admin endpoints live on the relay too:
+- `GET /api/v2/status`
+- `GET /api/v2/connectors`
+- `GET /api/v2/audit?limit=N`
+
+Local helper commands are available too:
+
+```bash
+./bin/codencer-relayd status --config .codencer/relay/config.json
+./bin/codencer-relayd connectors --config .codencer/relay/config.json
+./bin/codencer-relayd instances --config .codencer/relay/config.json
+```
+
+### 4. Create a one-time enrollment token
+
+```bash
+./bin/codencer-relayd enrollment-token create \
+ --config .codencer/relay/config.json \
+ --label local-dev \
+ --expires-in-seconds 600 \
+ --json
+```
+
+### 5. Enroll the connector
+
+```bash
+./bin/codencer-connectord enroll \
+ --relay-url \
+ --daemon-url \
+ --enrollment-token
+```
+
+The connector persists:
+- `relay_url`
+- `connector_id`
+- `machine_id`
+- `private_key`
+- `instances[]` allowlist entries
+- `status.json` session snapshot
+
+Legacy bootstrap compatibility:
+- `enrollment_secret` is still accepted if configured on the relay as a bootstrap-only fallback
+- new self-host setups should prefer one-time enrollment tokens
+
+### 6. Verify instance sharing
+
+Enrollment seeds one shared instance from the daemon URL you enrolled against.
+
+Important rules:
+- discovery roots do not auto-share repos
+- connector config is the allowlist
+- only `share: true` instances are advertised
+
+Inspect and manage the allowlist explicitly before running the connector:
+
+```bash
+./bin/codencer-connectord discover --config .codencer/connector/config.json
+./bin/codencer-connectord list
+./bin/codencer-connectord share --daemon-url http://127.0.0.1:8085
+./bin/codencer-connectord share --instance-id
+./bin/codencer-connectord unshare --instance-id
+./bin/codencer-connectord config
+```
+
+`unshare` marks an instance as `share=false` and keeps the record in local config, so operators can see both known-shared and known-unshared repos.
+
+`share --instance-id` is only valid when discovery or existing connector metadata can resolve that id back to a healthy local daemon. `share --daemon-url` is the self-sufficient operator path.
+
+You can also inspect the relay-side view of shared instances with:
+
+```bash
+./bin/codencer-relayd connectors --config .codencer/relay/config.json
+./bin/codencer-relayd audit --config .codencer/relay/config.json --limit 20
+```
+
+### 7. Run the connector
+
+```bash
+./bin/codencer-connectord run
+```
+
+The connector opens an outbound authenticated websocket session to the relay and advertises only the explicitly shared local instances.
+
+Check connector state locally at any time:
+
+```bash
+./bin/codencer-connectord status --json
+```
+
+### 8. Connect the planner
+
+Use either:
+- relay planner API under `/api/v2`
+- relay MCP at `/mcp`
+
+The relay is the remote planner surface. The daemon-local `/mcp/call` endpoint is only a local compatibility/admin bridge.
+
+For the frozen planner/client compatibility matrix, generic HTTP/MCP examples, and client-specific packaging notes, see [mcp/integrations.md](mcp/integrations.md) and [mcp/relay_tools.md](mcp/relay_tools.md).
+
+Current MCP transport posture:
+- canonical endpoint: `/mcp`
+- compatibility alias: `/mcp/call`
+- POST JSON-RPC is supported for straightforward planner integrations
+- Streamable HTTP compatibility is implemented on `/mcp` with `GET`, `POST`, and `DELETE`, `MCP-Protocol-Version`, and `MCP-Session-Id`
+- the current relay is still request/response-first and does not emit long-lived unsolicited server notifications
+
+### 9. Start work and inspect evidence
+
+Typical remote sequence:
+1. list instances
+2. start run
+3. submit task
+4. wait for step or poll step/result
+5. inspect result
+6. inspect validations
+7. inspect logs
+8. inspect artifacts
+
+Remote artifact access is ID-based:
+- artifact content is fetched by `artifact_id`
+- there is no arbitrary path browsing tool
+- large binary transport is intentionally bounded
+
+### 10. Operate the run honestly
+
+Supported remote actions include:
+- approve gate
+- reject gate
+- abort run
+- retry step
+- disable or enable a connector from the relay admin surface
+
+Current limitations remain explicit:
+- abort is best-effort unless the adapter actually confirms stop, and the caller only gets a successful abort when the active step reaches `cancelled`
+- large binary artifact transfer is intentionally bounded
+
+Current routing behavior:
+- relay step/gate/artifact lookups first use stored route hints
+- if a hint is missing, the relay probes only authorized online shared instances
+- successful probes are persisted as route hints for later lookups
+- ambiguous matches still fail closed
+
+## Allowed Remote Surface
+
+The connector only proxies a narrow allowlist:
+- run create/list/read
+- run abort
+- run gate listing
+- step submit/read/result/validations/artifacts/logs
+- step retry
+- step wait
+- gate approve/reject
+- instance read
+- artifact content read
+
+The relay and connector do not expose:
+- raw shell
+- arbitrary filesystem browsing
+- generic network tunneling
+
+## Practical Smoke Path
+
+Once the daemon and relay are already running, use the repo smoke helper for the happy path:
+
+```bash
+PLANNER_TOKEN= make self-host-smoke
+```
+
+The smoke flow:
+1. reads the local daemon instance identity
+2. creates a one-time relay enrollment token through `codencer-relayd enrollment-token create`
+3. enrolls and runs a temporary connector
+4. waits for instance advertisement
+5. starts a run through the relay
+6. submits a real `TaskSpec`-compatible task
+7. waits for the step
+8. fetches result, validations, logs, gates, and artifacts
+
+Optional smoke scenario coverage:
+
+Default proof from `make self-host-smoke`:
+- connector enrollment and websocket session establishment
+- relay instance visibility for the enrolled daemon
+- run create, task submit, wait, result, validations, logs, gates, and artifact fetch over relay HTTP
+- relay audit visibility when `audit` is enabled
+
+Optional proof paths:
+
+```bash
+PLANNER_TOKEN= SMOKE_SCENARIOS=status,audit,share-control,mcp,mcp-sdk make self-host-smoke
+PLANNER_TOKEN= make self-host-smoke-all
+```
+
+- `share-control` now proves `unshare` removes relay visibility and blocks routing, then `share --instance-id` restores visibility before the main relay flow runs again.
+- `mcp` proves manual relay MCP initialize, SSE stream bootstrap, compatibility POST alias use, tool calls, and session delete.
+- `mcp-sdk` proves official Go SDK interoperability against relay `/mcp`.
+- `multi-instance` proves one connector can advertise two local daemons and that explicit instance targeting reaches only the selected daemon.
+
+Still outside smoke proof:
+- cold bootstrap of the daemon and relay themselves
+- real non-simulation adapter execution
+- WSL/Windows/Antigravity topology behavior
+- hard guarantees for gate, retry, or abort semantics beyond the statuses captured by the script
+
+If you want the standalone SDK proof path, build and run the helper directly:
+
+```bash
+make build-mcp-sdk-smoke
+./bin/mcp-sdk-smoke --endpoint http://127.0.0.1:8090/mcp --token --instance-id
+```
+
+If you need the Windows-side agent-broker binary too, build it separately with:
+
+```bash
+make build-broker
+```
+
+## WSL / Windows / Antigravity
+
+The practical default is:
+- daemon, connector, repos, worktrees, and artifacts in WSL/Linux
+- agent-broker and IDE on Windows when needed
+- relay wherever the operator wants to host the remote control plane
+
+This is recommended operator topology, not an automated smoke proof. See [WSL / Windows / Antigravity Topology](WSL_WINDOWS_ANTIGRAVITY.md) for the trust boundaries and placement guidance.
+
+## Default Relay vs Self-Host
+
+- Self-host mode is implemented in this repo and uses your own relay config, sqlite state, and tokens.
+- A future default or managed relay can speak the same connector session model, but self-host does not depend on that future service.
diff --git a/docs/SETUP.md b/docs/SETUP.md
index 9860cdc..ad69d83 100644
--- a/docs/SETUP.md
+++ b/docs/SETUP.md
@@ -1,12 +1,12 @@
# Environmental Reference & Setup
-This guide provides the technical baseline for running the Codencer Orchestration Bridge.
+This guide is the setup hub for the Codencer beta tracks. Use it for prerequisites, a platform chooser, the native Linux inline path, and links back to the frozen beta-track docs.
## 1. Prerequisites
### Software Requirements
- **Git**: Required for worktree isolation.
-- **Go 1.21+**: Required to build the daemon and CLI.
+- **Go 1.25.0+**: Required to build the daemon, CLI, connector, relay, and MCP SDK proof helper.
- **C Compiler (gcc/cc)**: Required for the CGO-based SQLite driver.
- **curl**: Required for health checking and polling.
- **jq or Python 3**: Recommended for bash/zsh automation wrappers that parse Codencer JSON output.
@@ -18,114 +18,147 @@ This guide provides the technical baseline for running the Codencer Orchestratio
---
-## 2. Getting Started (Canonical Path)
+## 2. Common Prerequisites And Build
### 2.1 Clone & Build
```bash
-git clone https://github.com/verbaux/codencer
+git clone https://github.com/lookmanrays/codencer
cd codencer
# 1. Initialize environment and check requirements
make setup
-# 2. Build orchestratord and orchestratorctl binaries
-make build
+# 2. Build the supported beta-track binaries
+make build-supported
+
+# 3. Build the Windows-side agent-broker separately only if you need it
+make build-broker
```
+If you only need the local and relay tracks, `make build` remains sufficient.
+
### 2.2 Verify Environment
-The `doctor` tool verifies if your environment is ready for tactical execution.
+The `doctor` tool verifies whether your environment is ready for local execution and operator flows.
+
```bash
./bin/orchestratorctl doctor
```
---
-## 3. Daemon Management
+## 3. Choose Your Platform
+
+Use the platform guide that matches where the repo, daemon, and connector will actually live.
+
+| Platform | Status in this wave | Start here | Notes |
+| --- | --- | --- | --- |
+| Native Linux | inline on this page | [Native Linux Inline Guide](#4-native-linux-inline-guide) | Primary supported platform for the local daemon and local artifacts. |
+| macOS | Wave 2 placeholder | [SETUP_MACOS.md](SETUP_MACOS.md) | Placeholder only in this pass; use the beta-track and planner/client docs for current truth. |
+| Windows Native | Wave 2 placeholder | [SETUP_WINDOWS.md](SETUP_WINDOWS.md) | The daemon is not a native Windows-supported path today. |
+| WSL | Wave 2 placeholder | [SETUP_WSL.md](SETUP_WSL.md) | Recommended cross-side layout keeps repo, daemon, connector, and artifacts in WSL/Linux. |
+| Remote VPS | Wave 2 placeholder | [SETUP_REMOTE_VPS.md](SETUP_REMOTE_VPS.md) | Use when hosting relay or cloud remotely while keeping the runtime boundaries explicit. |
+
+If you want the planner/client surface chooser right away, use [mcp/integrations.md](mcp/integrations.md). If you want the repo-wide supported test matrix, use [BETA_TESTING.md](BETA_TESTING.md).
+
+---
+
+## 4. Native Linux Inline Guide
+
+Native Linux remains the primary inline setup path in this doc.
+
+### 4.1 Start The Daemon
+
+The `orchestratord` daemon is the persistent system of record. It must be running to receive tasks.
-The `orchestratord` is the persistent system of record. It must be running to receive tasks.
+Simulation mode validates the local daemon, CLI, and evidence path without requiring live executor binaries:
-### 3.1 Simulation Mode (Orchestrator Validation)
-Use this mode to test your local setup, CLI, and MCP layers without consuming LLM credits or requiring agent binaries.
```bash
make start-sim
```
-### 3.2 Real Mode (Tactical Execution)
-Use this mode for real-world tasks. It requires agents like `codex-agent` or `claude` to be installed.
+Real mode is for actual local execution:
+
```bash
# Edit .env to set ALL_ADAPTERS_SIMULATION_MODE=0
make start
```
-Claude is executed in headless print mode as `claude -p --output-format json`. Codencer builds the task prompt, writes it to `prompt.txt`, delivers it on `stdin`, and runs the process from the attempt workspace root.
+Claude is executed in headless print mode as `claude -p --output-format json`. Codencer writes the built prompt to `prompt.txt`, sends it on `stdin`, and runs the process from the attempt workspace root.
----
+The `/api/v1/compatibility` endpoint is a runtime diagnostic only. It is useful for checking binary availability, simulation mode, and local bindings, but it is not a support-certification surface by itself.
+
+### 4.2 Current Local Adapter Proof Levels
+
+| Surface | Current repo proof | Local beta truth |
+| --- | --- | --- |
+| Local daemon + CLI + simulation lifecycle | direct smoke + repo tests | canonical |
+| `codex` adapter | simulation smoke + conformance only | primary intended local beta adapter, but still simulation-heavy in checked-in proof |
+| `claude` adapter | wrapper, prompt, normalize, and fake-binary tests | supported-beta target with narrow wrapper claims only |
+| `qwen` adapter | conformance/simulation only | secondary |
+| `antigravity` / `antigravity-broker` | mocked integration and environment-specific proof | secondary |
+| `openclaw-acpx` | unit and simulation-only proof | experimental / deferred |
+| `ide-chat` | code/manual handoff only | experimental / deferred |
+| daemon-local `/mcp/call` | compatibility/admin bridge only | compatibility only; not the public planner MCP contract |
+
+### 4.3 Local Smoke And Parity Proof
+
+The canonical local proof paths are:
+
+```bash
+./scripts/smoke_test_v1.sh
+./scripts/smoke_test_v1.sh
+make smoke
+```
+
+`scripts/smoke_test_v1.sh` verifies the legacy six-input same-run path. If no daemon is already reachable, it auto-starts a temporary simulation daemon in the same shell so the local `submit --wait` barrier stays trustworthy for back-to-back submissions.
+
+> [!IMPORTANT]
+> The daemon-local `/mcp/call` endpoint is only a local compatibility/admin surface. The canonical remote MCP surface for planners lives on the relay at `/mcp`.
+
+> [!IMPORTANT]
+> For the practical self-host relay path, the canonical public binaries are `codencer-connectord` and `codencer-relayd`. The Windows-side `agent-broker` binary is built separately with `make build-broker` because `cmd/broker` is a nested module.
+
+### 4.4 Daemon Management And Targeting
-## 4. Daemon Management & Targeting
Codencer follows a **One-Repo-One-Instance** model. Each repo clone manages its own database and worktrees.
-### 4.1 Explicit Repo Targeting
-To anchor a daemon to a specific repository regardless of your current directory, use the `--repo-root` flag.
+Anchor the daemon to a specific repository regardless of your current directory:
```bash
-# Anchor the daemon to a specific repo root
./bin/orchestratord --repo-root /path/to/my-project
```
-### 4.2 Port Management
-The daemon listens on port `8085` by default. To run multiple instances on the same machine, use the `PORT` environment variable:
+To run multiple instances on the same machine, use a different port:
```bash
-# Start an instance on a custom port
PORT=8086 ./bin/orchestratord --repo-root /path/to/project-b
```
-### 4.3 Startup Helper
-Use the provided script to start and build a daemon instance for a specific project:
+Or use the helper:
```bash
-# Usage: ./scripts/start_instance.sh [port] [extra_flags]
./scripts/start_instance.sh ~/projects/my-api 8085
```
-### 4.4 Environment Variables
-Codencer uses these variables to locate agent binaries and target the daemon:
+### 4.5 Environment Variables
+
+Codencer uses these variables to locate executor binaries and target the daemon:
- `CODEX_BINARY`: Path to the `codex-agent` binary.
- `CLAUDE_BINARY`: Path to the `claude` binary. Defaults to `claude`.
- `OPENCLAW_ACPX_BINARY`: Path to the `acpx` CLI (for OpenClaw support).
- `ORCHESTRATORD_URL`: URL of the daemon (default: `http://localhost:8085`).
-### 4.5 Claude Adapter Notes
-- Install the Claude CLI so the `claude` binary is available on your `$PATH`, or point `CLAUDE_BINARY` at the full path.
-- Codencer does not pass a workspace flag to Claude. The attempt workspace is supplied via process `cwd`.
-- Claude raw output is preserved in `stdout.log`; Codencer parses that JSON and synthesizes the normalized `result.json`.
-
----
-
-## 5. OpenClaw Setup (Experimental / Alpha)
+Claude notes:
+- install the Claude CLI so the `claude` binary is available on your `$PATH`, or point `CLAUDE_BINARY` at the full path
+- Codencer does not pass a workspace flag to Claude; the attempt workspace is supplied via process `cwd`
+- raw Claude output is preserved in `stdout.log`; Codencer parses that JSON and synthesizes the normalized `result.json`
-Codencer provides experimental support for the **Agent Client Protocol (ACP)** via the OpenClaw adapter. This integration is currently in **Alpha** and is intended for early-access testing of OpenClaw-compatible executors.
+### 4.6 Workspace Provisioning
-- **Adapter ID**: `openclaw-acpx`
-- **Binary**: `acpx` (Agent Client Protocol CLI).
-- **Local Runtime**: A running OpenClaw-compatible backend or agent stack must be discoverable by `acpx`.
-
-To configure a custom path for the ACPX binary:
-```bash
-# Add to your .env or export directly
-export OPENCLAW_ACPX_BINARY=/path/to/custom/acpx
-```
-
-> [!WARNING]
-> **OpenClaw support is Experimental (Alpha)**.
-> Codencer acts strictly as a **bridge**. It manages the `acpx` process lifecycle and workspace isolation, but it does **not** manage model routing, API keys, or backend selection for OpenClaw. Configure those directly via the OpenClaw/acpx configuration on your host machine.
-
----
+Codencer isolates every task attempt in a dedicated Git worktree. You can configure how those worktrees are prepared using `.codencer/workspace.json`.
-## 6. Workspace Provisioning
-Codencer isolates every task attempt in a dedicated Git worktree. You can configure how these worktrees are prepared using `.codencer/workspace.json`.
+Example:
-### Example `.codencer/workspace.json`
```json
{
"provisioning": {
@@ -138,58 +171,73 @@ Codencer isolates every task attempt in a dedicated Git worktree. You can config
}
```
-### Grove Compatibility
-Codencer optionally reads an environment-prep subset of Grove config (`grove.yaml` or `.groverc.json`) if a native config is missing.
+Codencer can also read a Grove-compatible subset from `grove.yaml` or `.groverc.json` if a native config is missing, but it does not depend on the Grove CLI.
-> [!IMPORTANT]
-> Codencer does **not** depend on the Grove CLI and is designed to coexist with existing Grove setups.
-
-For advanced provisioning examples, see **[EXAMPLES.md](EXAMPLES.md)**.
-
----
+### 4.7 Submission Inputs
-## 6. Automation-Friendly Submission Inputs
+`orchestratorctl submit` supports both rich task files and narrow direct input.
-`orchestratorctl submit` supports both rich canonical task definitions and narrow direct convenience input.
+Use a full YAML or JSON `TaskSpec` when you need path controls, constraints, or custom validation layout.
+Use direct convenience input when a shell wrapper, planner, or local script needs a deterministic single-task submission:
-Use a full YAML or JSON `TaskSpec` when you need rich structure such as constraints, path controls, or custom validation layouts.
-
-Use direct convenience input when a shell wrapper, planner, or local script needs a deterministic way to submit one task without authoring YAML first:
- `--task-json `
- `--prompt-file `
- `--goal `
- `--stdin`
-Exactly one primary source is required.
+Exactly one primary source is required. Direct convenience input stays intentionally narrow and preserves both `original-input.*` and `normalized-task.json` in the attempt artifacts.
+
+For concrete submit examples, see [EXAMPLES.md](EXAMPLES.md). For planner- and wrapper-oriented examples, see [CLI_AUTOMATION.md](CLI_AUTOMATION.md).
-Direct convenience input stays intentionally narrow. It deterministically normalizes into the canonical `TaskSpec` used by the daemon and preserves both:
-- `original-input.*`
-- `normalized-task.json`
+### 4.8 Cross-Side And Self-Host Notes
-For concrete submit examples, see **[EXAMPLES.md](EXAMPLES.md)**. For planner- and wrapper-oriented examples, see **[CLI_AUTOMATION.md](CLI_AUTOMATION.md)**.
+If you need the Windows-side bridge, build and run `agent-broker` separately and keep it distinct from the relay. For the practical WSL-first topology, keep the daemon and connector in WSL/Linux next to the repo and worktrees, keep the agent-broker on Windows when Antigravity is in play, and expose the relay instead of the daemon. See [WSL_WINDOWS_ANTIGRAVITY.md](WSL_WINDOWS_ANTIGRAVITY.md).
-The official v1 ordered-task model is wrapper-based. Use the scripts in `examples/automation/` when you need to execute an explicit ordered list one item at a time.
+After the daemon and relay are running, the current happy-path self-host proof is:
+
+```bash
+PLANNER_TOKEN= make self-host-smoke
+PLANNER_TOKEN= make self-host-smoke-mcp
+PLANNER_TOKEN= make self-host-smoke-all
+```
+
+Use [SELF_HOST_REFERENCE.md](SELF_HOST_REFERENCE.md) for the full relay/runtime order of operations. Use [CLOUD_SELF_HOST.md](CLOUD_SELF_HOST.md) and [CLOUD.md](CLOUD.md) when the cloud control plane is part of the deployment. Use [mcp/integrations.md](mcp/integrations.md) for the relay-vs-cloud planner/client chooser.
+
+### 4.9 OpenClaw Setup (Experimental / Alpha)
+
+Codencer provides experimental ACP bridge support through the `openclaw-acpx` adapter.
+
+- **Adapter ID**: `openclaw-acpx`
+- **Binary**: `acpx`
+- **Status**: experimental / deferred for the current beta promise
+
+To configure a custom ACPX binary path:
+
+```bash
+export OPENCLAW_ACPX_BINARY=/path/to/custom/acpx
+```
+
+Codencer manages the `acpx` process lifecycle and workspace isolation, but it does not manage model routing, API keys, or backend selection for OpenClaw.
---
-## 7. Antigravity Broker (Cross-Side Execution)
+## 5. Choose Your Track
-Use the Antigravity Broker for **cross-side execution** (e.g., Codencer in WSL controlling Antigravity in Windows).
+Use [BETA_TESTING.md](BETA_TESTING.md) as the repo-level tester guide. Pick the track that matches the surface you want to prove.
-### 7.1 Broker Execution Model
-The broker uses a **dual-path model**:
-- **Repo Root (Identity)**: The stable path used to bind this repository to an active IDE instance.
-- **Workspace Root (Execution)**: The isolated worktree path where the task is actually executed.
+| Track | Start here | Build | Proof command | Current boundary |
+| --- | --- | --- | --- | --- |
+| Local-only daemon + CLI | [SETUP.md](SETUP.md) | `make build` | `./scripts/smoke_test_v1.sh` then `make smoke` | Canonical local proof is simulation-first; live adapter proof stays narrow. |
+| Self-host relay + runtime connector | [SELF_HOST_REFERENCE.md](SELF_HOST_REFERENCE.md) | `make build` | `PLANNER_TOKEN= make self-host-smoke-mcp` | Canonical remote self-host path; relay `/mcp` is the public MCP surface. |
+| Self-host cloud control plane | [CLOUD_SELF_HOST.md](CLOUD_SELF_HOST.md) | `make build-cloud` | `make cloud-smoke` | Binary-native proof covers bootstrap, tenancy, provider installs, and optional composed runtime/MCP/SDK proof. |
+| Planner / client integrations | [mcp/integrations.md](mcp/integrations.md) | `make build build-cloud build-mcp-sdk-smoke` | self-host or cloud smoke with MCP/SDK enabled | ChatGPT-style and Claude-style paths remain compatibility-only, not direct product proof. |
+| Provider connectors | [CLOUD_CONNECTORS.md](CLOUD_CONNECTORS.md) | `make build-cloud` | `make cloud-smoke` plus provider-focused tests | Slack is strongest today; Jira is polling-first; the rest stay narrow operator/package surfaces. |
-### 7.2 Setup & Binding
-1. **Start the Broker**: Run `agent-broker.exe` on the host machine.
-2. **Bind**: Link your local repository to a running IDE instance:
- ```bash
- ./bin/orchestratorctl antigravity bind
- ```
-3. **Execute**: Submit tasks using the `antigravity-broker` adapter:
- ```bash
- ./bin/orchestratorctl submit --goal "Check UI" --adapter antigravity-broker --wait
- ```
+For the repo-level supported verification pass:
+
+```bash
+make build-supported
+make verify-beta
+```
-For detailed examples, see **[EXAMPLES.md](EXAMPLES.md)**.
+Run `make verify-beta-docker` only on a Docker-capable host when you also want the Docker self-host cloud baseline.
diff --git a/docs/SETUP_MACOS.md b/docs/SETUP_MACOS.md
new file mode 100644
index 0000000..5844b1b
--- /dev/null
+++ b/docs/SETUP_MACOS.md
@@ -0,0 +1,318 @@
+# macOS Setup
+
+This page is the macOS zero-to-smoke walkthrough for the `v0.2.0-beta` local and self-host relay paths. Keep claims narrow: the canonical repo-wide beta contract still lives in [BETA_TESTING.md](BETA_TESTING.md), and the planner/client contract still lives in [mcp/integrations.md](mcp/integrations.md).
+
+Use this page when all of the following are true:
+
+- you are on macOS
+- you are running the repo, daemon, and connector locally on that Mac
+- you want the supported non-Docker verification path first
+
+> [!IMPORTANT]
+> Use a real `git clone`. Do not use a ZIP download. Codencer depends on git worktrees for isolated attempts.
+
+## 1. macOS Prerequisites
+
+Codencer's supported beta verification path requires:
+
+- Git
+- Go `1.25.0+`
+- `cc` for the CGO SQLite build
+- `curl`
+- `jq` or Python 3 for JSON-parsing shell helpers
+
+On macOS, the shortest reliable setup is:
+
+```bash
+xcode-select --install
+brew install go jq
+```
+
+Then confirm the toolchain:
+
+```bash
+git --version
+go version
+cc --version
+curl --version
+jq --version
+go env CGO_ENABLED CC
+```
+
+Expected shape:
+
+```text
+go version go1.25.x darwin/arm64
+Apple clang version ...
+1
+clang
+```
+
+Notes:
+
+- Xcode Command Line Tools provide `cc` on macOS.
+- Homebrew Go `1.25+` is the required floor for this beta track.
+- Do not force `CGO_ENABLED=0`; the repo build uses the CGO SQLite driver.
+- If `go env CGO_ENABLED` reports `0`, unset that override before building.
+
+## 2. Clean Clone To Supported Verification
+
+This is the repo-truth clean-checkout path for macOS:
+
+```bash
+git clone https://github.com/lookmanrays/codencer
+cd codencer
+make setup
+make build-supported
+make verify-beta
+```
+
+### 2.1 `make setup`
+
+Expected output from a fresh checkout:
+
+```text
+==> Initializing local environment (.codencer/)...
+==> Creating .env from .env.example...
+```
+
+What it does:
+
+- creates `.env` from `.env.example` if needed
+- creates `bin/`
+- creates `.codencer/artifacts`
+- creates `.codencer/workspace`
+
+### 2.2 `make build-supported`
+
+Expected output shape:
+
+```text
+==> Building orchestratord...
+==> Building orchestratorctl...
+==> Building codencer-connectord...
+==> Building codencer-relayd...
+==> Building codencer-cloudctl...
+==> Building codencer-cloudd...
+==> Building codencer-cloudworkerd...
+==> Building mcp-sdk-smoke (official MCP SDK proof helper)...
+```
+
+### 2.3 `make verify-beta`
+
+`make verify-beta` is the supported non-Docker repo pass from [BETA_TESTING.md](BETA_TESTING.md). Expected output shape:
+
+```text
+==> Running main-module tests...
+==> Running local smoke...
+==> smoke test complete: SUCCESS
+==> Creating temporary relay planner token config...
+==> Starting temporary simulation daemon on http://127.0.0.1:18085...
+==> Starting temporary relay on http://127.0.0.1:18090...
+==> Running self-host relay/runtime smoke with MCP + SDK...
+==> Running cloud binary smoke...
+==> Validating docker compose config...
+==> Supported beta-track verification complete.
+```
+
+If you only want the Docker-backed cloud baseline on a Docker-capable host, that is a separate command:
+
+```bash
+make verify-beta-docker
+```
+
+## 3. Repeatable Local Smoke On macOS
+
+After the clean-checkout pass, this is the repeatable local smoke loop:
+
+```bash
+make start-sim
+./bin/orchestratorctl doctor
+./scripts/smoke_test_v1.sh
+make smoke
+```
+
+Expected output shape:
+
+```text
+==> Starting orchestratord in SIMULATION MODE (background)...
+Simulated daemon successfully started on http://127.0.0.1:8085 ...
+```
+
+```text
+[OK] .codencer directory found
+[OK] Git detected: ...
+[OK] Go detected: ...
+[OK] C Compiler (for SQLite CGO) detected: ...
+[OK] Daemon reachable at http://127.0.0.1:8085
+```
+
+```text
+==> starting smoke test: smoke-test-...
+==> Auditing last step: ...
+==> smoke test complete: SUCCESS
+```
+
+Use `/api/v1/compatibility`, `orchestratorctl doctor`, and the smoke scripts as runtime truth for a Mac environment. Do not use the compatibility endpoint as a support certificate by itself.
+
+## 4. Local Adapter Verification Matrix For macOS
+
+Keep the support claims aligned with [internal/BETA_SUPPORT_CLASSIFICATION.md](internal/BETA_SUPPORT_CLASSIFICATION.md), [BETA_TESTING.md](BETA_TESTING.md), and [KNOWN_LIMITATIONS.md](KNOWN_LIMITATIONS.md).
+
+| Surface | macOS path | Current repo truth | What to verify locally on Mac |
+| --- | --- | --- | --- |
+| `codex` adapter | local daemon path on the Mac | `supported-beta target`, but checked-in proof is still simulation-heavy | Canonical Mac proof is still the local smoke path. For a live Codex binary test, set `CODEX_BINARY` if needed, run real mode, and confirm availability through `/api/v1/compatibility`. |
+| `claude` adapter | local daemon path on the Mac | `supported-beta target`, wrapper path proven, no live authenticated repo proof | Install the `claude` CLI, ensure `CLAUDE_BINARY` or `$PATH` resolves it, then verify with `/api/v1/compatibility` and a real-mode local run. Claude is invoked as `claude -p --output-format json` with `stdin` prompt input and attempt workspace `cwd`. |
+| `antigravity` / `agent-broker` | not applicable on native macOS | `secondary`, Windows/WSL-oriented topology only | Do not treat this as a native Mac path. The documented broker topology is for Windows-side Antigravity with Linux/WSL-side execution boundaries, not a primary macOS local adapter path. |
+| `openclaw-acpx` | local daemon path on the Mac if `acpx` is installed | `experimental` / deferred | Only use this if you are intentionally testing the experimental ACP bridge. Set `OPENCLAW_ACPX_BINARY` if needed and keep claims at experimental status. |
+
+## 5. Single-Machine Relay Loopback On macOS
+
+This section keeps the daemon, relay, and connector on one Mac and exposes only the relay as the planner surface.
+
+### 5.1 Start the local daemon
+
+Use simulation mode for the repeatable smoke path:
+
+```bash
+make start-sim
+./bin/orchestratorctl instance --json
+```
+
+### 5.2 Create a relay config and planner token
+
+```bash
+mkdir -p .codencer/relay
+./bin/codencer-relayd planner-token create \
+ --config .codencer/relay/config.json \
+ --write-config \
+ --name operator \
+ --scope '*'
+```
+
+### 5.3 Start the relay
+
+```bash
+./bin/codencer-relayd --config .codencer/relay/config.json
+```
+
+In another terminal:
+
+```bash
+./bin/codencer-relayd status --config .codencer/relay/config.json --json
+```
+
+### 5.4 Enroll the local connector
+
+Create a one-time enrollment token:
+
+```bash
+./bin/codencer-relayd enrollment-token create \
+ --config .codencer/relay/config.json \
+ --label mac-local \
+ --expires-in-seconds 600 \
+ --json
+```
+
+Then enroll the connector against the same Mac's daemon:
+
+```bash
+./bin/codencer-connectord enroll \
+ --relay-url http://127.0.0.1:8090 \
+ --daemon-url http://127.0.0.1:8085 \
+ --enrollment-token
+```
+
+### 5.5 Run and verify the connector
+
+```bash
+./bin/codencer-connectord run
+```
+
+In another terminal:
+
+```bash
+./bin/codencer-connectord status --json
+./bin/codencer-connectord list
+./bin/codencer-relayd instances --config .codencer/relay/config.json --json
+./bin/codencer-relayd connectors --config .codencer/relay/config.json --json
+```
+
+Expected outcome:
+
+- the connector is online
+- the repo-local daemon instance appears on the relay
+- the relay is the only planner-facing surface you expose
+
+### 5.6 Run the relay smoke
+
+Once the daemon and relay are already running:
+
+```bash
+PLANNER_TOKEN= make self-host-smoke-mcp
+```
+
+Expected output shape:
+
+```text
+--- Codencer Self-Host Smoke ---
+Daemon: http://127.0.0.1:8085
+Relay: http://127.0.0.1:8090
+Scenarios: status,audit,mcp,mcp-sdk
+Local instance: ...
+--- Self-Host Smoke Summary ---
+Run: ...
+Step: ...
+State: completed
+Terminal: true
+Summary: ...
+```
+
+This is the canonical single-machine loopback proof for the self-host relay/runtime path on macOS. For the broader operator flow, see [SELF_HOST_REFERENCE.md](SELF_HOST_REFERENCE.md).
+
+## 6. Wiring ChatGPT And Claude Desktop On macOS
+
+Keep the planner/client claim narrow:
+
+- use the relay MCP surface at `/mcp`
+- do not point ChatGPT, Claude Desktop, or another planner runtime at the local daemon
+- treat ChatGPT-style and Claude-style product paths as `compatibility-only`, not direct repo proof
+
+Codencer-side target on the same Mac:
+
+```text
+http://127.0.0.1:8090/mcp
+Authorization: Bearer
+```
+
+For ChatGPT-style clients on macOS:
+
+- use the local relay URL, not daemon `/mcp/call`
+- use the relay planner bearer token
+- keep the claim at remote MCP `compatibility-only`
+- follow [mcp/integrations/chatgpt.md](mcp/integrations/chatgpt.md) for the operator walkthrough
+
+For Claude Desktop or `claude.ai` on macOS:
+
+- use the relay URL above
+- keep the planner-side remote connector path separate from the local `claude` executor adapter
+- follow [mcp/integrations/claude.md](mcp/integrations/claude.md) for the current remote connector walkthrough
+- use [mcp/examples/claude-desktop-relay.mcp.json](mcp/examples/claude-desktop-relay.mcp.json) as a value-reference example, not as a direct product import
+
+For current product-specific MCP behavior outside Codencer itself, follow the current vendor docs plus the Codencer-side contract in [mcp/integrations.md](mcp/integrations.md).
+
+## 7. macOS-Specific Practical Limits
+
+These are practical operator-side notes for Mac environments. Keep them as local-environment caveats, not beta-promotion claims.
+
+- Keychain permissions: vendor CLIs such as Claude or Codex may prompt for macOS Keychain access the first time they need credentials. If a real-mode run fails immediately, re-check the CLI directly in the same terminal session.
+- Firewall prompts: the first local bind for relay or daemon may trigger a macOS firewall prompt depending on host policy. Allow local loopback access if you intend to use `127.0.0.1:8085` and `127.0.0.1:8090`.
+- Codesign noise: locally built Go binaries may produce unsigned-binary or first-run trust noise on tightly managed Macs. Treat this as host policy friction, not as a Codencer planner/runtime feature.
+- Adapter proof remains narrow: on macOS, as on other platforms, the canonical proof is still simulation-first for local smoke; live adapter validation depends on your local binary, auth, and shell environment.
+
+## 8. Where To Go Next
+
+- Use [BETA_TESTING.md](BETA_TESTING.md) for the frozen public beta track chooser.
+- Use [mcp/integrations.md](mcp/integrations.md) for the relay-vs-cloud planner/client contract.
+- Use [SELF_HOST_REFERENCE.md](SELF_HOST_REFERENCE.md) for the full relay/runtime operator flow.
+- Use [KNOWN_LIMITATIONS.md](KNOWN_LIMITATIONS.md) when you need the current boundary list in one place.
diff --git a/docs/SETUP_REMOTE_VPS.md b/docs/SETUP_REMOTE_VPS.md
new file mode 100644
index 0000000..fc4da12
--- /dev/null
+++ b/docs/SETUP_REMOTE_VPS.md
@@ -0,0 +1,568 @@
+# Remote VPS Setup
+
+This page replaces the Wave 1 placeholder with the practical `v0.2.0-beta` Remote VPS dev-server walkthrough.
+
+Use this page when all of the following are true:
+
+- the VPS is your development machine
+- the repo checkout, worktrees, daemon, and connector live on the VPS
+- your laptop hosts the planner-side control plane as a self-host relay or self-host cloud
+- your laptop is the planner client location, but it does not execute the coding work
+
+Keep the support claim narrow:
+
+- this is the existing self-host relay/runtime path adapted to a VPS-hosted execution machine
+- relay HTTP, relay MCP, connector enrollment, and shared-instance routing are the proven Codencer surfaces
+- ChatGPT-style and Claude Desktop-style planner wiring remain `compatibility-only` as described in [BETA_TESTING.md](BETA_TESTING.md), [KNOWN_LIMITATIONS.md](KNOWN_LIMITATIONS.md), and [mcp/integrations.md](mcp/integrations.md)
+
+> [!IMPORTANT]
+> Use a real `git clone` on the VPS. Do not use a ZIP download. Codencer depends on git worktrees for isolated attempts.
+
+> [!IMPORTANT]
+> Do not expose the VPS daemon directly. The public planner surface is the relay `/mcp` or cloud `/api/cloud/v1/mcp`, not the daemon-local `/mcp/call`.
+
+## 1. Topology
+
+Primary topology for this page:
+
+```text
+Laptop
+ Planner client (ChatGPT-style / Claude Desktop-style / curl / SDK)
+ -> relay MCP / planner HTTP on the laptop, or cloud MCP / runtime HTTP on the laptop
+ -> operator-controlled HTTPS front door if the VPS or planner product needs a reachable URL
+
+VPS
+ codencer-connectord
+ -> outbound authenticated websocket to the laptop relay or cloud relay bridge
+ orchestratord
+ -> local adapters
+ -> repo checkout, worktrees, artifacts, validations
+```
+
+What stays true in this topology:
+
+- the VPS is the execution side
+- the laptop is the planning side
+- the laptop does not run the daemon, worktrees, or adapters for this repo
+- the connector is an outbound bridge from the VPS to the laptop control plane
+- the laptop does not need inbound reachability to the VPS except normal SSH admin access
+
+## 2. Why This Layout
+
+This split is useful for two boring, good reasons:
+
+- isolation: the repo, daemon, worktrees, executor binaries, and artifacts stay on one disposable Linux machine instead of leaking into the laptop environment
+- reproducibility: the VPS gives you a stable Ubuntu or Debian runtime for builds, validations, and adapter binaries, while the planner surface on the laptop stays thin and operator-oriented
+
+This layout also keeps the service boundary honest:
+
+- the orchestrator remains the control plane for execution state
+- the relay remains the remote planner surface
+- the connector remains an outbound transport bridge
+- the laptop planner is not turned into an executor
+
+## 3. Prerequisites
+
+### 3.1 VPS
+
+Recommended VPS baseline:
+
+- Ubuntu `22.04+` or Debian `12+`
+- `sshd`
+- `ufw` or an equivalent host firewall
+- Git
+- Go `1.25.0+`
+- `cc` or `gcc`
+- `curl`
+- `jq` or Python 3
+- a `t3.small`-equivalent or better machine for a comfortable dev-server baseline
+
+One straightforward Ubuntu or Debian package setup is:
+
+```bash
+sudo apt update
+sudo apt install -y git golang-go build-essential curl jq openssh-server ufw
+```
+
+Then verify the toolchain:
+
+```bash
+git --version
+go version
+cc --version
+curl --version
+jq --version
+go env CGO_ENABLED CC
+```
+
+Expected shape:
+
+```text
+go version go1.25.x linux/amd64
+...
+1
+gcc
+```
+
+Notes:
+
+- do not force `CGO_ENABLED=0`; the repo build uses the CGO SQLite driver
+- keep the daemon and connector on the same Linux side as the repo checkout
+- only SSH should be inbound to the VPS for this walkthrough
+
+### 3.2 Laptop
+
+The laptop needs the planner-side control plane:
+
+- self-host relay on the laptop, or
+- self-host cloud on the laptop if you want the cloud control plane instead of raw relay
+
+If you are running the raw relay on the laptop from a repo checkout, the laptop also needs:
+
+- Go `1.25.0+`
+- `curl`
+- `jq` or Python 3
+
+If the VPS or planner product needs a reachable URL, put an operator-controlled HTTPS front door in front of the laptop relay or cloud. The checked-in relay binary is plain HTTP by itself; HTTPS termination is your operator responsibility when you want outbound `https://...` and `wss://...` from the VPS.
+
+## 4. Walkthrough
+
+This section uses the laptop-hosted relay as the main path because that is the most direct match for the requested topology. If you want laptop-hosted cloud tenancy instead, keep the VPS daemon and connector placement the same and use [CLOUD_SELF_HOST.md](CLOUD_SELF_HOST.md) for the cloud bootstrap.
+
+### 4.1 Clone And Build On The VPS
+
+SSH to the VPS and build the runtime binaries there:
+
+```bash
+ssh @
+
+git clone https://github.com/lookmanrays/codencer
+cd codencer
+
+make setup
+make build
+```
+
+Expected build output shape:
+
+```text
+==> Building orchestratord...
+==> Building orchestratorctl...
+==> Building codencer-connectord...
+==> Building codencer-relayd...
+```
+
+`make build` is enough for the daemon, CLI, connector, and raw relay path.
+
+### 4.2 Start `orchestratord` On The VPS
+
+For a repeatable first proof, start the daemon in simulation mode on the VPS:
+
+```bash
+ALL_ADAPTERS_SIMULATION_MODE=1 ./bin/orchestratord --repo-root "$PWD"
+```
+
+If you want actual VPS-side execution after the smoke pass, restart it later without `ALL_ADAPTERS_SIMULATION_MODE=1` and make sure the required executor binary is installed on the VPS.
+
+In another VPS shell, confirm instance identity:
+
+```bash
+curl -fsS http://127.0.0.1:8085/api/v1/instance | jq '{id, repo_root}'
+curl -fsS http://127.0.0.1:8085/api/v1/compatibility | jq '{tier, adapters, environment}'
+```
+
+Expected shape:
+
+```json
+{
+ "id": "",
+ "repo_root": "/home//codencer"
+}
+```
+
+At this point the VPS is the real execution side, even if the first smoke is simulation-only.
+
+### 4.3 Start The Relay On The Laptop And Mint A Planner Token
+
+On the laptop, use a checkout or copied binaries for the relay side:
+
+```bash
+git clone https://github.com/lookmanrays/codencer ~/codencer-relay
+cd ~/codencer-relay
+
+make build
+
+mkdir -p .codencer/relay
+./bin/codencer-relayd planner-token create \
+ --config .codencer/relay/config.json \
+ --write-config \
+ --name operator \
+ --scope '*' \
+ --json | tee .codencer/relay/planner-token.json
+
+PLANNER_TOKEN="$(jq -r '.token' .codencer/relay/planner-token.json)"
+
+nohup ./bin/codencer-relayd \
+ --config .codencer/relay/config.json \
+ > .codencer/relay/relay.log 2>&1 &
+echo $! > .codencer/relay/relay.pid
+
+./bin/codencer-relayd status \
+ --config .codencer/relay/config.json \
+ --json | jq
+```
+
+Expected planner-token output snippet:
+
+```json
+{
+ "name": "operator",
+ "token": "",
+ "write_config": true,
+ "restart_required": true
+}
+```
+
+Expected relay status snippet:
+
+```json
+{
+ "planner_auth_mode": "static_bearer_tokens"
+}
+```
+
+Operator note:
+
+- the relay config defaults to `127.0.0.1:8090`
+- if the VPS must reach it over outbound HTTPS only, publish an HTTPS URL for the laptop relay through your own reverse proxy or tunnel
+- keep the raw relay private; expose only the HTTPS front door
+- if you expect browser-style MCP callers, add the planner origin to `allowed_origins`
+
+For the rest of this guide, assume the VPS will use:
+
+```bash
+export RELAY_URL="https://relay.example.com"
+```
+
+The laptop can keep using the local config and local admin CLI even if the VPS connector uses the published HTTPS URL.
+
+### 4.4 Create A One-Time Enrollment Token On The Laptop
+
+Mint a short-lived enrollment token from the laptop relay:
+
+```bash
+./bin/codencer-relayd enrollment-token create \
+ --config .codencer/relay/config.json \
+ --label vps-dev \
+ --expires-in-seconds 600 \
+ --json | tee .codencer/relay/enrollment-token.json
+
+ENROLLMENT_TOKEN="$(jq -r '.secret' .codencer/relay/enrollment-token.json)"
+```
+
+Expected output shape:
+
+```json
+{
+ "token_id": "",
+ "secret": "",
+ "label": "vps-dev",
+ "expires_at": "..."
+}
+```
+
+This token is bootstrap-only and single-use. Prefer it over the legacy static `enrollment_secret` fallback.
+
+### 4.5 Enroll And Run `codencer-connectord` On The VPS
+
+Back on the VPS, enroll the connector against the laptop relay and the local VPS daemon:
+
+```bash
+cd ~/codencer
+
+CONNECTOR_CONFIG=".codencer/connector/config.json"
+
+./bin/codencer-connectord enroll \
+ --relay-url "$RELAY_URL" \
+ --daemon-url http://127.0.0.1:8085 \
+ --enrollment-token "$ENROLLMENT_TOKEN" \
+ --config "$CONNECTOR_CONFIG" \
+ --label vps-dev
+
+./bin/codencer-connectord list --config "$CONNECTOR_CONFIG" --json | jq
+
+nohup ./bin/codencer-connectord run \
+ --config "$CONNECTOR_CONFIG" \
+ > .codencer/connector/connector.log 2>&1 &
+echo $! > .codencer/connector/connector.pid
+
+./bin/codencer-connectord status \
+ --config "$CONNECTOR_CONFIG" \
+ --json | jq
+```
+
+Expected enroll output:
+
+```text
+Connector enrolled: machine=
+```
+
+Expected `list` snippet:
+
+```json
+[
+ {
+ "daemon_url": "http://127.0.0.1:8085",
+ "share": true
+ }
+]
+```
+
+Expected status snippet after the connector settles:
+
+```json
+{
+ "relay_url": "https://relay.example.com",
+ "session_state": "connected"
+}
+```
+
+What happens here:
+
+- the connector enrolls once with the single-use token
+- it opens an outbound authenticated websocket to the relay under `/ws/connectors`
+- the laptop still does not need a direct inbound path to the VPS other than SSH for you as the operator
+
+### 4.6 Verify From The Laptop That The VPS Instance Is Shared
+
+First verify the relay HTTP view from the laptop:
+
+```bash
+curl -fsS \
+ -H "Authorization: Bearer $PLANNER_TOKEN" \
+ "$RELAY_URL/api/v2/instances" | jq
+```
+
+Expected output snippet:
+
+```json
+[
+ {
+ "instance_id": "",
+ "online": true
+ }
+]
+```
+
+Then verify the canonical planner MCP tool from the laptop:
+
+```bash
+curl -fsS -D /tmp/codencer-vps-mcp-headers.txt \
+ -H "Authorization: Bearer $PLANNER_TOKEN" \
+ -H "Content-Type: application/json" \
+ -H "MCP-Protocol-Version: 2025-11-25" \
+ -d '{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2025-11-25"}}' \
+ "$RELAY_URL/mcp" > /tmp/codencer-vps-mcp-init.json
+
+SESSION_ID="$(awk -F': ' 'tolower($1)=="mcp-session-id" {gsub("\r", "", $2); print $2}' /tmp/codencer-vps-mcp-headers.txt)"
+
+curl -fsS \
+ -H "Authorization: Bearer $PLANNER_TOKEN" \
+ -H "Content-Type: application/json" \
+ -H "MCP-Session-Id: ${SESSION_ID}" \
+ -H "MCP-Protocol-Version: 2025-11-25" \
+ -d '{"jsonrpc":"2.0","id":2,"name":"codencer.list_instances","arguments":{}}' \
+ "$RELAY_URL/mcp/call" | jq
+```
+
+Expected output snippet:
+
+```json
+[
+ {
+ "instance_id": "",
+ "connector_id": "",
+ "online": true
+ }
+]
+```
+
+At this point `codencer.list_instances` from the laptop is seeing the VPS-shared instance, not a laptop-local daemon.
+
+### 4.7 Run A Task From The Laptop Planner Through The Relay To The VPS
+
+The Codencer-side contract is:
+
+- planner targets relay `/mcp` or cloud `/api/cloud/v1/mcp`
+- planner calls `codencer.list_instances`, `codencer.start_run`, `codencer.submit_task`, `codencer.wait_step`, and `codencer.get_step_result`
+- execution still happens on the VPS daemon through the connector
+
+Keep the product claim narrow:
+
+- ChatGPT-style and Claude Desktop-style paths are `compatibility-only`
+- follow [mcp/integrations.md](mcp/integrations.md) for the planner/client contract and the checked-in example files
+- do not point those planner products at the VPS daemon directly
+
+For a first planner-side smoke from the laptop, use this sequence:
+
+1. Point the planner at the laptop relay MCP URL.
+2. Ask it to call `codencer.list_instances`.
+3. Ask it to call `codencer.start_run`.
+4. Ask it to call `codencer.submit_task`.
+5. Ask it to call `codencer.wait_step` and `codencer.get_step_result`.
+
+Example Codencer MCP payloads:
+
+```json
+{
+ "instance_id": "",
+ "payload": {
+ "id": "vps-planner-smoke-001",
+ "project_id": "vps-planner-smoke"
+ }
+}
+```
+
+```json
+{
+ "instance_id": "",
+ "run_id": "vps-planner-smoke-001",
+ "task": {
+ "version": "v1",
+ "goal": "Compatibility smoke only. Return the VPS repo root in the final summary. Do not edit files.",
+ "is_simulation": true
+ }
+}
+```
+
+Expected `submit_task` shape:
+
+```json
+{
+ "id": "step-",
+ "state": "queued"
+}
+```
+
+Expected `wait_step` shape:
+
+```json
+{
+ "state": "completed",
+ "terminal": true
+}
+```
+
+When you are ready for real VPS execution instead of a compatibility smoke:
+
+- restart `orchestratord` on the VPS without simulation mode
+- install the target executor binary on the VPS
+- set `adapter_profile` in the submitted task
+
+The laptop still remains only the planner side.
+
+## 5. Repeatable Verification Smoke
+
+Once the VPS daemon is running and the laptop relay is reachable, use the checked-in self-host smoke helper from the VPS. This is the most repeatable repo-truth verification loop for this topology.
+
+On the VPS:
+
+```bash
+cd ~/codencer
+
+PLANNER_TOKEN="" \
+RELAY_URL="$RELAY_URL" \
+DAEMON_URL="http://127.0.0.1:8085" \
+SMOKE_SCENARIOS="status,audit,mcp,mcp-sdk" \
+./scripts/self_host_smoke.sh
+```
+
+Expected output starts like:
+
+```text
+--- Codencer Self-Host Smoke ---
+Daemon: http://127.0.0.1:8085
+Relay: https://relay.example.com
+Scenarios: status,audit,mcp,mcp-sdk
+Local instance:
+```
+
+Expected summary tail looks like:
+
+```text
+--- Self-Host Smoke Summary ---
+Run: smoke-run-...
+Step: ...
+State: completed
+Terminal: true
+Summary: ...
+MCP SDK: /tmp/...
+```
+
+What this verifies in the VPS-plus-laptop topology:
+
+- the VPS daemon identity is readable
+- the laptop relay can mint enrollment tokens and accept the connector session
+- the VPS connector can advertise the shared VPS instance
+- relay HTTP and relay MCP both route to the VPS daemon
+- the official Go SDK smoke helper can talk to relay `/mcp`
+
+This is the repeatable smoke to rerun after VPS rebuilds, relay restarts, or firewall changes.
+
+## 6. Security Notes
+
+For this topology, keep the security posture intentionally small:
+
+- planner auth is a relay bearer token
+- connector bootstrap uses a single-use enrollment token
+- the VPS should expose no inbound Codencer ports; keep inbound to SSH only
+- the connector should reach the laptop relay or cloud over outbound HTTPS and `wss` only
+- do not expose the daemon on `8085` to the internet
+- if you publish the relay, publish only the relay or cloud front door, not the VPS executor side
+
+Practical firewall shape:
+
+- VPS inbound: `22/tcp` only
+- VPS outbound: HTTPS to the laptop relay front door or laptop cloud front door
+- laptop inbound: whatever your HTTPS front door requires
+
+## 7. Known Limitations For This Topology
+
+The same repo-wide beta limits still apply here:
+
+- self-host planner auth is static bearer-token auth, not enterprise IAM
+- abort is best-effort; Codencer only reports success when the active step actually reaches `cancelled`
+- artifact transport is bounded and is not a bulk file tunnel
+- connector artifact content transport is capped at `8 MiB`; larger payloads fail as too large for connector transport
+- ChatGPT-style and Claude Desktop-style planner wiring remains `compatibility-only`, not direct repo-proven product setup
+
+Use [KNOWN_LIMITATIONS.md](KNOWN_LIMITATIONS.md) for the consolidated boundary list.
+
+## 8. Alternate Topologies
+
+### 8.1 Relay Also On The VPS
+
+If you do not need the laptop to host the raw relay, you can move `codencer-relayd` onto the VPS and keep the same daemon-plus-connector local placement there.
+
+That variant is simpler operationally, but it changes the trust split:
+
+- the planner surface and execution surface now sit on the same machine
+- you lose the laptop-local operator separation that this page is optimizing for
+- you still should not expose the daemon directly
+
+### 8.2 Laptop Cloud Instead Of Laptop Relay
+
+If you want the laptop to host the cloud control plane instead of the raw relay:
+
+- keep `orchestratord` and `codencer-connectord` on the VPS
+- use the cloud-composed runtime path from [CLOUD_SELF_HOST.md](CLOUD_SELF_HOST.md)
+- claim the VPS runtime connector into cloud scope before using cloud runtime HTTP or cloud MCP
+
+For the Docker baseline when you want a composed cloud reference instead of the raw relay path, use [CLOUD_SELF_HOST.md](CLOUD_SELF_HOST.md).
+
+## 9. Where To Go Next
+
+- use [BETA_TESTING.md](BETA_TESTING.md) for the frozen public beta track chooser
+- use [SELF_HOST_REFERENCE.md](SELF_HOST_REFERENCE.md) for the raw relay/runtime operator flow
+- use [mcp/integrations.md](mcp/integrations.md) for the relay-vs-cloud planner/client contract
+- use [KNOWN_LIMITATIONS.md](KNOWN_LIMITATIONS.md) when you need the current boundary list in one place
diff --git a/docs/SETUP_WINDOWS.md b/docs/SETUP_WINDOWS.md
new file mode 100644
index 0000000..4e0c392
--- /dev/null
+++ b/docs/SETUP_WINDOWS.md
@@ -0,0 +1,494 @@
+# Windows Setup (WSL Daemon + Windows IDE / Broker)
+
+This page replaces the Wave 1 placeholder with the current repo-truth Windows walkthrough for `v0.2.0-beta`.
+
+> [!IMPORTANT]
+> Repo truth for this release:
+> - `orchestratord` is **not** a natively supported Windows daemon path.
+> - The recommended Windows topology is:
+> - **WSL/Linux**: repo checkout, daemon, worktrees, artifacts, connector
+> - **Windows**: IDE, Antigravity, optional `agent-broker`
+> - If you want a pure Windows-only runtime, use **WSL2** for the daemon side instead of trying to run the daemon natively on Windows.
+> - `agent-broker` remains a **secondary / experimental** bridge in the repo support matrix, not part of the primary beta promise.
+
+Use [SETUP.md](SETUP.md) for the common build and platform hub, [BETA_TESTING.md](BETA_TESTING.md) for the frozen beta tracks, and [mcp/integrations.md](mcp/integrations.md) for the exact relay/cloud planner-client contract.
+
+## 1. Recommended Topology
+
+This is the practical mixed Windows layout described elsewhere in the repo:
+
+```text
+Windows IDE / planner client
+ -> Windows localhost relay URL (native or forwarded)
+ -> Codencer relay
+ -> Codencer connector (WSL)
+ -> Codencer daemon (WSL)
+ -> local adapters / worktrees / artifacts (WSL)
+
+Windows Antigravity
+ -> agent-broker (Windows localhost)
+ -> daemon Antigravity binding APIs (WSL)
+```
+
+Keep these boundaries clear:
+
+- the **daemon** is the local system of record
+- the **connector** is the outbound bridge from daemon to relay
+- the **relay** is the remote planner surface
+- the **agent-broker** is a local IDE bridge, not the relay
+
+Do **not** expose the daemon directly to ChatGPT, Claude Desktop, or any other remote planner client. The daemon-local `/mcp/call` path is compatibility-only and not the public planner contract.
+
+## 2. What Runs Where
+
+### WSL/Linux side
+
+- repo checkout
+- `orchestratord`
+- `codencer-connectord`
+- worktrees
+- artifacts
+- optional relay if you keep the relay next to the daemon
+
+### Windows side
+
+- IDE with **Antigravity** active
+- `agent-broker`
+- optional relay if you intentionally want the relay to terminate on Windows localhost
+- ChatGPT / Claude Desktop style clients pointed at the **relay**, not the daemon
+
+## 3. Build The Required Binaries
+
+### 3.1 WSL: build the daemon-side binaries
+
+From the repo root in WSL:
+
+```bash
+make build
+```
+
+Expected build lines include:
+
+```text
+==> Building orchestratord...
+==> Building orchestratorctl...
+==> Building codencer-connectord...
+==> Building codencer-relayd...
+```
+
+### 3.2 Windows: build `agent-broker`
+
+The broker is built separately because `cmd/broker` is a nested module.
+
+From the repo root in a **Windows shell**:
+
+```powershell
+make build-broker
+```
+
+Expected build line:
+
+```text
+==> Building agent-broker (nested module)...
+```
+
+The resulting Windows binary is:
+
+```text
+.\bin\agent-broker.exe
+```
+
+If you intentionally want the relay on Windows too, build the normal relay binary with the standard `make build` flow from [SETUP.md](SETUP.md). The mixed-layout recommendation in this page still keeps the daemon and connector in WSL.
+
+## 4. WSL Walkthrough: Daemon + Relay + Connector
+
+This is the recommended side for repo state, worktrees, artifacts, and execution.
+
+### 4.1 Start the daemon in WSL
+
+```bash
+./bin/orchestratord --repo-root /home//Projects/codencer
+```
+
+In a second WSL shell, verify runtime truth:
+
+```bash
+curl -fsS http://127.0.0.1:8085/api/v1/compatibility
+./bin/orchestratorctl instance --json
+```
+
+Expected output snippets:
+
+```json
+{"tier":...}
+```
+
+```json
+{
+ "id": "",
+ "repo_root": "/home//Projects/codencer",
+ "base_url": "http://127.0.0.1:8085"
+}
+```
+
+### 4.2 Start a relay in WSL (recommended default)
+
+```bash
+mkdir -p .codencer/relay
+./bin/codencer-relayd planner-token create \
+ --config .codencer/relay/config.json \
+ --write-config \
+ --name operator \
+ --scope '*'
+
+./bin/codencer-relayd --config .codencer/relay/config.json
+```
+
+From another WSL shell:
+
+```bash
+./bin/codencer-relayd status --config .codencer/relay/config.json
+```
+
+Expected output snippet:
+
+```json
+{
+ "planner_auth_mode": "static_bearer_tokens",
+ "connector_count": 0,
+ "instance_count": 0
+}
+```
+
+### 4.3 Enroll and run the connector in WSL
+
+Create a one-time enrollment token:
+
+```bash
+./bin/codencer-relayd enrollment-token create \
+ --config .codencer/relay/config.json \
+ --label wsl-dev \
+ --expires-in-seconds 600 \
+ --json
+```
+
+Enroll the connector against the WSL daemon:
+
+```bash
+./bin/codencer-connectord enroll \
+ --relay-url http://127.0.0.1:8090 \
+ --daemon-url http://127.0.0.1:8085 \
+ --enrollment-token
+
+./bin/codencer-connectord share --daemon-url http://127.0.0.1:8085
+./bin/codencer-connectord run
+```
+
+Verify connector state:
+
+```bash
+./bin/codencer-connectord status
+./bin/codencer-connectord list
+```
+
+Expected output snippets:
+
+```text
+connector= machine= relay=http://127.0.0.1:8090 state=connected
+configured_instances=1 shared_config=1 unshared_config=0
+```
+
+```text
+state=shared instance_id= daemon_url=http://127.0.0.1:8085 manifest_path=<...>
+```
+
+## 5. Windows Walkthrough: Antigravity + `agent-broker`
+
+This page assumes **Antigravity** is the Windows IDE path.
+
+### 5.1 Antigravity discovery path on Windows
+
+The broker discovers Antigravity instances from:
+
+```text
+%USERPROFILE%\.gemini\antigravity\daemon\ls_*.json
+```
+
+The broker persists repo bindings at:
+
+```text
+%USERPROFILE%\.gemini\antigravity\broker_binding.json
+```
+
+If `orchestratorctl antigravity list` shows no instances, first confirm that Antigravity is active in the Windows IDE and that the `ls_*.json` files exist under the directory above.
+
+### 5.2 Start the broker on Windows
+
+In a Windows shell:
+
+```powershell
+.\bin\agent-broker.exe
+```
+
+Expected startup line:
+
+```text
+Antigravity Broker v0.1.0-alpha starting on 127.0.0.1:8088
+```
+
+Verify the local broker endpoints:
+
+```powershell
+curl.exe -fsS http://127.0.0.1:8088/health
+curl.exe -fsS http://127.0.0.1:8088/version
+```
+
+Expected output:
+
+```json
+{"status":"ok"}
+```
+
+```json
+{"version":"0.1.0-alpha"}
+```
+
+### 5.3 Bind WSL Codencer to the Windows broker
+
+Back in WSL:
+
+```bash
+export CODENCER_ANTIGRAVITY_BROKER_URL=http://127.0.0.1:8088
+./bin/orchestratorctl antigravity list
+./bin/orchestratorctl antigravity bind
+./bin/orchestratorctl antigravity status
+```
+
+Expected output snippets:
+
+```text
+PID PORT REACHABLE WORKSPACE
+------------------------------------------------------------
+```
+
+```text
+Successfully bound repo to Antigravity PID
+```
+
+```text
+Status: BOUND (Active)
+PID:
+Port:
+Workspace:
+```
+
+When you want the cross-side adapter path explicitly, use the `antigravity-broker` adapter:
+
+```bash
+./bin/orchestratorctl submit windows-broker-smoke \
+ --goal "Verify Windows Antigravity broker path" \
+ --adapter antigravity-broker \
+ --wait --json
+```
+
+That adapter choice is required for the broker-backed execution path. A repo-scoped Antigravity bind alone does not switch adapters automatically.
+
+## 6. Wire ChatGPT / Claude Desktop On Windows To The Relay
+
+These product-specific paths remain **compatibility-only** in the current beta contract. Keep the claim narrow:
+
+- Codencer proves the relay/cloud MCP surfaces
+- Codencer does **not** claim direct repo-executed ChatGPT or Claude Desktop product setup
+- remote clients should target the **relay** `/mcp` endpoint, never the local daemon
+
+### 6.1 Canonical Codencer-side values
+
+For a relay-backed MCP client on Windows, the Codencer-side values are:
+
+- URL: `http://127.0.0.1:8090/mcp`
+- auth header: `Authorization: Bearer `
+- canonical MCP path: `/mcp`
+- compatibility alias for simple POST callers: `/mcp/call`
+
+If the client asks for a protocol version, use the current repo examples from [mcp/integrations.md](mcp/integrations.md), which show `MCP-Protocol-Version: 2025-11-25`.
+
+### 6.2 If the relay itself runs on Windows
+
+Point the Windows client directly at:
+
+```text
+http://127.0.0.1:8090/mcp
+```
+
+This is the simplest Windows client wiring because the client and relay are on the same loopback interface.
+
+### 6.3 If the relay runs in WSL
+
+Preferred outcome: the Windows client still reaches the relay as:
+
+```text
+http://127.0.0.1:8090/mcp
+```
+
+If Windows localhost already reaches the WSL listener, no extra step is needed.
+
+If it does **not**, add an OS-level localhost port forward from Windows to the WSL relay port and keep the client URL unchanged. Example:
+
+```powershell
+wsl hostname -I
+netsh interface portproxy add v4tov4 listenaddress=127.0.0.1 listenport=8090 connectaddress= connectport=8090
+```
+
+After that, a Windows-local client can still use:
+
+```text
+http://127.0.0.1:8090/mcp
+```
+
+This keeps ChatGPT / Claude Desktop style Windows configuration stable even when the actual relay process lives in WSL.
+
+### 6.4 Windows-side relay verification before opening the client
+
+Before you configure a Windows MCP client, verify the relay from Windows:
+
+```powershell
+curl.exe -fsS ^
+ -H "Authorization: Bearer " ^
+ http://127.0.0.1:8090/api/v2/status
+```
+
+Expected output snippet:
+
+```json
+{
+ "planner_auth_mode": "static_bearer_tokens",
+ "connector_count": 1,
+ "instance_count": 1
+}
+```
+
+Then verify MCP initialize on the same Windows URL:
+
+```powershell
+curl.exe -fsS -D mcp-headers.txt ^
+ -H "Authorization: Bearer " ^
+ -H "Content-Type: application/json" ^
+ -H "MCP-Protocol-Version: 2025-11-25" ^
+ -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"initialize\",\"params\":{\"protocolVersion\":\"2025-11-25\"}}" ^
+ http://127.0.0.1:8090/mcp
+```
+
+Expected verification points:
+
+- HTTP `200`
+- response body contains `"jsonrpc":"2.0"`
+- response headers include `MCP-Session-Id`
+
+If this Windows `curl.exe` check fails, the product client will fail too. Fix the relay URL, token, or localhost forwarding first.
+
+## 7. Known Limitations For This Topology
+
+Keep these limits explicit:
+
+- the Windows daemon path is **unsupported**; use WSL for daemon-side runtime
+- the mixed WSL/Windows layout is **operator guidance**, not an automated smoke-proof matrix
+- `agent-broker` is a **secondary / experimental** bridge in the repo support classification
+- `agent-broker` task sessions are **in-memory only**; restarting the broker can orphan active tasks
+- product-specific ChatGPT-style and Claude-style MCP wiring is **compatibility-only**
+- Windows Firewall / Defender may prompt the first time `agent-broker.exe`, `codencer-relayd.exe`, or localhost port forwarding is used; approve only the local/private binding you intend to use
+
+Also remember:
+
+- keep the relay separate from the broker
+- keep the connector on the same side as the daemon
+- keep artifacts and worktrees on the WSL side
+- inspect results via Codencer APIs and CLI rather than raw cross-side paths
+
+## 8. Repeatable Verification Smoke
+
+Use this as the repeatable mixed-layout smoke for this page.
+
+### 8.1 WSL daemon truth
+
+```bash
+curl -fsS http://127.0.0.1:8085/api/v1/compatibility
+./bin/orchestratorctl instance --json
+```
+
+Pass condition:
+
+- `/api/v1/compatibility` includes `"tier"`
+- `instance --json` shows the expected WSL `repo_root`
+
+### 8.2 Windows broker truth
+
+```powershell
+curl.exe -fsS http://127.0.0.1:8088/health
+curl.exe -fsS http://127.0.0.1:8088/version
+```
+
+Pass condition:
+
+- `{"status":"ok"}`
+- `{"version":"0.1.0-alpha"}`
+
+### 8.3 WSL Antigravity bind truth
+
+```bash
+export CODENCER_ANTIGRAVITY_BROKER_URL=http://127.0.0.1:8088
+./bin/orchestratorctl antigravity list
+./bin/orchestratorctl antigravity bind
+./bin/orchestratorctl antigravity status
+```
+
+Pass condition:
+
+- at least one reachable Antigravity instance is listed
+- bind succeeds
+- status is `BOUND (Active)`
+
+### 8.4 Relay truth from Windows
+
+```powershell
+curl.exe -fsS ^
+ -H "Authorization: Bearer " ^
+ http://127.0.0.1:8090/api/v2/status
+```
+
+Pass condition:
+
+- JSON contains `"planner_auth_mode":"static_bearer_tokens"`
+- JSON shows at least one connected connector / instance after the WSL connector is running
+
+### 8.5 Relay MCP truth from Windows
+
+```powershell
+curl.exe -fsS -D mcp-headers.txt ^
+ -H "Authorization: Bearer " ^
+ -H "Content-Type: application/json" ^
+ -H "MCP-Protocol-Version: 2025-11-25" ^
+ -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"initialize\",\"params\":{\"protocolVersion\":\"2025-11-25\"}}" ^
+ http://127.0.0.1:8090/mcp
+```
+
+Pass condition:
+
+- HTTP `200`
+- body contains `"jsonrpc":"2.0"`
+- `mcp-headers.txt` contains `MCP-Session-Id`
+
+### 8.6 Optional broker-backed adapter smoke
+
+```bash
+./bin/orchestratorctl submit windows-broker-smoke \
+ --goal "Verify Windows Antigravity broker path" \
+ --adapter antigravity-broker \
+ --wait --json
+```
+
+Pass condition:
+
+- the run completes without broker connection failure
+- follow-up result inspection shows the broker execution context rather than a local non-broker adapter path
+
+For the frozen repo-level beta proof boundaries, return to [BETA_TESTING.md](BETA_TESTING.md). For the canonical planner/client surfaces and MCP examples, return to [mcp/integrations.md](mcp/integrations.md).
diff --git a/docs/SETUP_WSL.md b/docs/SETUP_WSL.md
new file mode 100644
index 0000000..1220c07
--- /dev/null
+++ b/docs/SETUP_WSL.md
@@ -0,0 +1,431 @@
+# WSL Setup
+
+This guide is the WSL2 zero-to-smoke path for Codencer `v0.2.0-beta`.
+
+Use it when all of the following are true:
+
+- you are on **WSL2**
+- your distro is **Ubuntu or Debian**
+- you want the **repo, daemon, connector, worktrees, and artifacts** to stay inside WSL/Linux
+- you want the relay reachable from WSL first, and optionally from Windows-side planner clients later
+
+For the frozen repo-wide beta tracks, see [BETA_TESTING.md](BETA_TESTING.md). For the remote planner/client contract, see [mcp/integrations.md](mcp/integrations.md). For the WSL plus Windows broker layout, see [WSL_WINDOWS_ANTIGRAVITY.md](WSL_WINDOWS_ANTIGRAVITY.md).
+
+## 1. WSL Support Boundary
+
+Current repo truth for this guide:
+
+- **WSL2 (Ubuntu/Debian)** is a supported setup path for the local daemon and repo checkout.
+- Keep the **daemon and connector on the same side as the repo**: inside WSL.
+- The **relay** is the remote planner surface.
+- The daemon-local `/mcp/call` endpoint is **compatibility-only** and is **not** the public remote MCP contract.
+- ChatGPT-style and Claude-style desktop wiring remains **compatibility-only** in this beta. Codencer proves the relay `/mcp` surface, not each product UI flow.
+
+Recommended topology:
+
+```text
+Windows desktop client (optional)
+ -> relay /mcp on localhost:8090
+ -> connector websocket from WSL
+ -> daemon on 127.0.0.1:8085 inside WSL
+ -> WSL-local executor adapter
+```
+
+## 2. WSL2 Prerequisites
+
+Install the Ubuntu/Debian packages that the beta docs require:
+
+```bash
+sudo apt update
+sudo apt install -y build-essential ca-certificates curl git jq
+```
+
+Install Go `1.25.0+`.
+
+The repo requires Go `1.25.0+`. The example below uses `go1.25.9`, which was listed on `go.dev/dl` on 2026-04-24. If a newer `1.25.x` patch exists when you read this, use that instead.
+
+```bash
+ARCH="$(dpkg --print-architecture)"
+case "$ARCH" in
+ amd64) GO_ARCH=amd64 ;;
+ arm64) GO_ARCH=arm64 ;;
+ *)
+ echo "Unsupported WSL architecture: $ARCH"
+ exit 1
+ ;;
+esac
+
+GO_VERSION=1.25.9
+curl -fsSLO "https://go.dev/dl/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz"
+sudo rm -rf /usr/local/go
+sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-${GO_ARCH}.tar.gz"
+echo 'export PATH=/usr/local/go/bin:$PATH' >> ~/.profile
+export PATH=/usr/local/go/bin:$PATH
+```
+
+Verify the toolchain:
+
+```bash
+go version
+gcc --version | head -n 1
+jq --version
+git --version
+```
+
+Expected output looks like:
+
+```text
+go version go1.25.9 linux/amd64
+gcc (Ubuntu ...) ...
+jq-1.6
+git version ...
+```
+
+## 3. Clean Clone And Supported Verification
+
+Start with a clean WSL clone and run the repo-level supported verifier exactly as the beta docs describe:
+
+```bash
+git clone https://github.com/lookmanrays/codencer
+cd codencer
+
+make build-supported
+make verify-beta
+```
+
+What to expect from `make build-supported`:
+
+```text
+==> Building orchestratord...
+==> Building orchestratorctl...
+==> Building codencer-connectord...
+==> Building codencer-relayd...
+==> Building codencer-cloudctl...
+==> Building codencer-cloudd...
+==> Building codencer-cloudworkerd...
+==> Building mcp-sdk-smoke (official MCP SDK proof helper)...
+```
+
+What to expect from `make verify-beta`:
+
+```text
+==> Running main-module tests...
+==> Running local smoke...
+==> Running self-host relay/runtime smoke with MCP + SDK...
+==> Running cloud binary smoke...
+==> Validating docker compose config...
+==> Supported beta-track verification complete.
+```
+
+This is the canonical fresh-checkout proof path from [BETA_TESTING.md](BETA_TESTING.md). Run `make verify-beta-docker` only if you also want the Docker-backed cloud baseline on a Docker-capable host.
+
+## 4. One-Host Loopback Flow Inside WSL
+
+This section keeps the **daemon, relay, and connector all inside WSL** on one host. It matches the self-host boundaries from [SELF_HOST_REFERENCE.md](SELF_HOST_REFERENCE.md) and the WSL topology guidance from [WSL_WINDOWS_ANTIGRAVITY.md](WSL_WINDOWS_ANTIGRAVITY.md).
+
+### 4.1 Start A Simulation Daemon In WSL
+
+Simulation mode is the most repeatable local proof path.
+
+```bash
+mkdir -p .codencer/relay .codencer/connector
+
+REPO_ROOT="$(pwd)"
+DAEMON_URL="http://127.0.0.1:8085"
+RELAY_URL="http://127.0.0.1:8090"
+CONNECTOR_CONFIG=".codencer/connector/config.json"
+
+nohup env ALL_ADAPTERS_SIMULATION_MODE=1 ./bin/orchestratord \
+ --repo-root "$REPO_ROOT" \
+ > .codencer/daemon.log 2>&1 &
+echo $! > .codencer/daemon.pid
+
+for _ in $(seq 1 20); do
+ if curl -fsS "$DAEMON_URL/api/v1/instance" >/dev/null 2>&1; then
+ break
+ fi
+ sleep 1
+done
+
+curl -fsS "$DAEMON_URL/api/v1/instance" | tee .codencer/instance.json | jq
+```
+
+Expected output snippet:
+
+```json
+{
+ "id": "",
+ "repo_root": "/home//.../codencer"
+}
+```
+
+### 4.2 Create A Relay Config And Planner Token
+
+The relay is the public remote planner surface. Do not expose the daemon directly.
+
+```bash
+./bin/codencer-relayd planner-token create \
+ --config .codencer/relay/config.json \
+ --write-config \
+ --name wsl-operator \
+ --scope '*' \
+ --json | tee .codencer/relay/planner-token.json
+
+PLANNER_TOKEN="$(jq -r '.token' .codencer/relay/planner-token.json)"
+```
+
+Expected output snippet:
+
+```json
+{
+ "name": "wsl-operator",
+ "token": ""
+}
+```
+
+### 4.3 Start The Relay In WSL
+
+```bash
+nohup ./bin/codencer-relayd \
+ --config .codencer/relay/config.json \
+ > .codencer/relay/relay.log 2>&1 &
+echo $! > .codencer/relay/relay.pid
+
+for _ in $(seq 1 20); do
+ if ./bin/codencer-relayd status --config .codencer/relay/config.json --json >/dev/null 2>&1; then
+ break
+ fi
+ sleep 1
+done
+
+./bin/codencer-relayd status \
+ --config .codencer/relay/config.json \
+ --json | jq
+```
+
+Expected output snippet:
+
+```json
+{
+ "planner_auth_mode": "static_bearer_tokens"
+}
+```
+
+### 4.4 Enroll And Run The Connector In WSL
+
+Create a short-lived enrollment token, enroll the connector against the WSL daemon, then run it.
+
+```bash
+./bin/codencer-relayd enrollment-token create \
+ --config .codencer/relay/config.json \
+ --label wsl-loopback \
+ --expires-in-seconds 600 \
+ --json | tee .codencer/relay/enrollment-token.json
+
+ENROLLMENT_TOKEN="$(jq -r '.secret' .codencer/relay/enrollment-token.json)"
+
+./bin/codencer-connectord enroll \
+ --relay-url "$RELAY_URL" \
+ --daemon-url "$DAEMON_URL" \
+ --enrollment-token "$ENROLLMENT_TOKEN" \
+ --config "$CONNECTOR_CONFIG" \
+ --label wsl-loopback
+
+./bin/codencer-connectord list --config "$CONNECTOR_CONFIG" --json | jq
+
+nohup ./bin/codencer-connectord run \
+ --config "$CONNECTOR_CONFIG" \
+ > .codencer/connector/connector.log 2>&1 &
+echo $! > .codencer/connector/connector.pid
+
+for _ in $(seq 1 20); do
+ if curl -fsS -H "Authorization: Bearer $PLANNER_TOKEN" "$RELAY_URL/api/v2/instances" >/dev/null 2>&1; then
+ break
+ fi
+ sleep 1
+done
+
+./bin/codencer-connectord status --config "$CONNECTOR_CONFIG" --json | jq
+```
+
+Expected output snippet after `list`:
+
+```json
+[
+ {
+ "daemon_url": "http://127.0.0.1:8085",
+ "share": true
+ }
+]
+```
+
+### 4.5 Confirm The Relay Sees The WSL Instance
+
+```bash
+curl -fsS \
+ -H "Authorization: Bearer $PLANNER_TOKEN" \
+ "$RELAY_URL/api/v2/instances" | jq
+```
+
+Expected output snippet:
+
+```json
+[
+ {
+ "instance_id": ""
+ }
+]
+```
+
+At this point the one-host loopback chain is live:
+
+- daemon in WSL on `127.0.0.1:8085`
+- relay in WSL on `127.0.0.1:8090`
+- connector in WSL with an outbound websocket to the relay
+- planner clients targeting the relay, not the daemon
+
+## 5. Repeatable Verification Smoke
+
+For a repeatable relay-plus-MCP proof against the WSL loopback stack you just started, run:
+
+```bash
+PLANNER_TOKEN="$PLANNER_TOKEN" \
+RELAY_CONFIG=".codencer/relay/config.json" \
+RELAY_URL="$RELAY_URL" \
+DAEMON_URL="$DAEMON_URL" \
+SMOKE_SCENARIOS="status,audit,mcp,mcp-sdk" \
+./scripts/self_host_smoke.sh
+```
+
+Expected output starts like:
+
+```text
+--- Codencer Self-Host Smoke ---
+Daemon: http://127.0.0.1:8085
+Relay: http://127.0.0.1:8090
+Scenarios: status,audit,mcp,mcp-sdk
+Local instance:
+```
+
+Expected summary tail looks like:
+
+```text
+--- Self-Host Smoke Summary ---
+Run: smoke-run-...
+Step: ...
+State: completed
+Terminal: true
+Summary: ...
+MCP SDK: /tmp/...
+```
+
+This lines up with the self-host proof boundary in [BETA_TESTING.md](BETA_TESTING.md) and the planner/client contract in [mcp/integrations.md](mcp/integrations.md).
+
+## 6. Executor Adapters On WSL
+
+Current WSL-facing adapter posture:
+
+| Adapter | WSL posture | Beta label | Notes |
+| --- | --- | --- | --- |
+| `codex` | runs inside WSL if the binary is available there | `supported-beta target` | Primary intended local beta adapter, but checked-in proof is still simulation-heavy rather than live-binary proven. |
+| `claude` / Claude Code | runs inside WSL if the `claude` binary is available there | `supported-beta target` | Wrapper proof is real, but live authenticated proof remains narrow. This is separate from Claude Desktop remote MCP wiring. |
+| `qwen` | runs inside WSL if the binary is available there | `secondary` | Conformance and simulation coverage only; not part of the primary beta promise. |
+| `antigravity` | do not treat as a WSL-native adapter path | `secondary` | Keep Antigravity on Windows. WSL reaches it through `agent-broker`; see [SETUP_WINDOWS.md](SETUP_WINDOWS.md) and [WSL_WINDOWS_ANTIGRAVITY.md](WSL_WINDOWS_ANTIGRAVITY.md). |
+
+Antigravity remains a Windows-side concern in this topology:
+
+- keep the repo, worktrees, daemon, connector, and artifacts in WSL
+- keep `agent-broker` on Windows only when you need the IDE-side bridge
+- do not move the connector to Windows just because Antigravity exists
+
+## 7. Reaching The WSL Relay From Windows-Side ChatGPT Desktop Or Claude Desktop
+
+Codencer-side truth stays narrow:
+
+- point Windows-side planner clients at the **relay** on `/mcp`
+- do **not** point them at the WSL daemon
+- product-specific ChatGPT-style and Claude-style setup remains **compatibility-only**; use [mcp/integrations.md](mcp/integrations.md) as the Codencer contract
+
+### 7.1 First Try `localhost`
+
+Microsoft's current WSL networking docs say Windows can usually reach Linux-side services through `localhost`, and WSL's `localhostForwarding` setting is on by default for WSL2. If that holds on your machine, the Windows-side MCP endpoint is simply:
+
+```text
+http://localhost:8090/mcp
+```
+
+If your desktop client needs a config file on the Windows side, keep the URL as `http://localhost:8090/mcp`. You can read example files from the repo through `\\wsl$`, but `\\wsl$` is a filesystem path, not an HTTP host.
+
+### 7.2 If `localhost` Does Not Work, Use An Explicit Windows Port Proxy
+
+This is the explicit Windows-side port forward fallback for the WSL relay.
+
+WSL networking mode, VPNs, firewall policy, and mirrored-mode behavior can change reachability. If a Windows-side client cannot hit `http://localhost:8090/mcp`, create an explicit Windows port proxy to the current WSL IP.
+
+In **PowerShell as Administrator**:
+
+```powershell
+$WslIp = (wsl hostname -I).Trim().Split(' ')[0]
+netsh interface portproxy add v4tov4 `
+ listenaddress=127.0.0.1 `
+ listenport=8090 `
+ connectaddress=$WslIp `
+ connectport=8090
+```
+
+Then use:
+
+```text
+http://localhost:8090/mcp
+```
+
+Important caveats:
+
+- the WSL IP can change after `wsl --shutdown`, reboot, or distro restart
+- mirrored networking and NAT networking behave differently
+- Windows firewall or Hyper-V firewall policy can still block access
+- some VPN setups change or break expected reachability
+
+### 7.3 What `\\wsl$` Is Good For
+
+`\\wsl$` is useful for:
+
+- reading logs from Windows
+- copying a checked-in example config out of the repo
+- editing files from Windows tools while the repo still lives in WSL
+
+`\\wsl$` is **not** the transport for relay HTTP or MCP traffic. Use `localhost` or an explicit forwarded port for that.
+
+## 8. Known WSL Callouts
+
+Keep these WSL-specific cautions in mind while testing:
+
+- **Clock drift after sleep or resume can happen on WSL2.** If tokens suddenly look expired or TLS timestamps look wrong, compare `date` in WSL with Windows time. A WSL restart such as `wsl --shutdown` usually resets the clock; on affected machines you may also need an explicit time sync inside the distro.
+- **Networking mode differences are real.** WSL NAT, mirrored mode, VPN policy, and firewall policy can change whether Windows reaches WSL services through `localhost` without extra setup.
+- **Cross-side paths are not execution contracts.** Keep repo roots, worktrees, artifacts, daemon state, and connector state on the WSL side. Use APIs and CLI output for results and artifacts instead of assuming raw WSL paths are meaningful to Windows-side clients.
+- **Do not expose the daemon directly.** Expose the relay if a Windows-side planner client needs remote MCP access.
+
+These cautions are consistent with [KNOWN_LIMITATIONS.md](KNOWN_LIMITATIONS.md) and [WSL_WINDOWS_ANTIGRAVITY.md](WSL_WINDOWS_ANTIGRAVITY.md).
+
+## 9. Stop The Local WSL Processes
+
+When you are done with the manual loopback stack:
+
+```bash
+kill "$(cat .codencer/connector/connector.pid)"
+kill "$(cat .codencer/relay/relay.pid)"
+kill "$(cat .codencer/daemon.pid)"
+```
+
+If you used `make start-sim` instead of the explicit daemon command, you can also use:
+
+```bash
+make stop
+```
+
+## 10. Where To Go Next
+
+- For the frozen beta verification tracks, go back to [BETA_TESTING.md](BETA_TESTING.md).
+- For planner/client URL and MCP surface choices, use [mcp/integrations.md](mcp/integrations.md).
+- For the mixed WSL plus Windows Antigravity topology, use [WSL_WINDOWS_ANTIGRAVITY.md](WSL_WINDOWS_ANTIGRAVITY.md) and [SETUP_WINDOWS.md](SETUP_WINDOWS.md).
+- For the fuller self-host relay/runtime operator flow, use [SELF_HOST_REFERENCE.md](SELF_HOST_REFERENCE.md).
diff --git a/docs/TROUBLESHOOTING.md b/docs/TROUBLESHOOTING.md
index 3e11ab7..f22e1c9 100644
--- a/docs/TROUBLESHOOTING.md
+++ b/docs/TROUBLESHOOTING.md
@@ -92,6 +92,48 @@ Always start here to verify your environment:
- **Cause**: `CODENCER_CONTINUE_ON_FAILURE=1` is set in the environment or the explicit continue flag was used.
- **Fix**: Unset the environment variable or omit the continue flag.
+### 2.13 "planner bearer token required" from `codencer-relayd`
+**Symptoms**: `codencer-relayd status`, `instances`, `connectors`, or `enrollment-token create` fails locally.
+- **Cause**: No planner token was supplied and the selected config file does not contain `planner_token` or `planner_tokens`.
+- **Fix**:
+ - Generate one locally: `./bin/codencer-relayd planner-token create --config .codencer/relay/config.json --write-config --name operator --scope '*'`
+ - Or pass `--token ` explicitly.
+
+### 2.14 Connector is online but the instance never appears on the relay
+**Symptoms**: `codencer-connectord status` shows activity, but `/api/v2/instances` stays empty.
+- **Cause**: The instance is known locally but not marked `share=true`, or the connector is pointed at the wrong daemon URL.
+- **Fix**:
+ - Inspect local config: `./bin/codencer-connectord list`
+ - Share the intended daemon explicitly: `./bin/codencer-connectord share --daemon-url http://127.0.0.1:8085`
+ - Re-check local status: `./bin/codencer-connectord status --json`
+ - Re-check relay view: `./bin/codencer-relayd instances --config .codencer/relay/config.json`
+
+### 2.15 Relay returns `connector_disabled`
+**Symptoms**: Step, gate, artifact, or result routes start failing with a `403` and `connector_disabled`.
+- **Cause**: The relay-side connector record was explicitly disabled.
+- **Fix**:
+ - Inspect connector state: `./bin/codencer-relayd connectors --config .codencer/relay/config.json`
+ - Re-enable it: `./bin/codencer-relayd connector enable --config .codencer/relay/config.json`
+
+### 2.16 MCP returns `origin_denied`, `session_not_found`, or `protocol_version_mismatch`
+**Symptoms**: Browser-style or session-based MCP callers fail on `/mcp`.
+- **Cause**:
+ - `origin_denied`: the request Origin is not allowed by relay config
+ - `session_not_found`: the caller reused an expired or deleted `MCP-Session-Id`
+ - `protocol_version_mismatch`: the caller changed `MCP-Protocol-Version` after initialization
+- **Fix**:
+ - Add the caller origin to `allowed_origins`, or omit the Origin header for non-browser clients
+ - Re-run `initialize` to get a fresh `MCP-Session-Id`
+ - Keep the same negotiated `MCP-Protocol-Version` for the session
+
+### 2.17 Relay evidence routes work for result but not logs/artifacts/validations
+**Symptoms**: `/api/v2/steps/{id}/result` works, but logs or evidence routes fail.
+- **Cause**: The step route may exist but the connector was enrolled against a different daemon, the instance is unshared, or the step lives on another shared instance that is currently offline.
+- **Fix**:
+ - Verify the instance is still advertised and online through `codencer-relayd instances`
+ - Check the connector config and `share=true` state with `codencer-connectord list`
+ - Re-run the connector and inspect `status.json`
+
---
## 3. Interpreting Step States
@@ -112,8 +154,8 @@ Every task result includes a `state`. Understanding the difference between **Inf
## 4. Antigravity & Broker Issues (Experimental)
### 4.1 "Broker bind error: connection refused"
-- **Cause**: The Windows-side `agent-broker.exe` is not running.
-- **Fix**: Start the broker on the host machine. Verify port 8088 is open.
+- **Cause**: The Windows-side broker is not running.
+- **Fix**: Run `make build-broker`, start the resulting broker binary on the host machine, and verify port 8088 is open.
### 4.2 "No instances discovered"
- **Cause**: Antigravity is not active in your IDE or the `.gemini` daemon directory is hidden/unreachable.
diff --git a/docs/VALIDATION_SCENARIO.md b/docs/VALIDATION_SCENARIO.md
index a19fcae..5de1f8e 100644
--- a/docs/VALIDATION_SCENARIO.md
+++ b/docs/VALIDATION_SCENARIO.md
@@ -3,11 +3,11 @@
This scenario is designed to validate the reliability of the Bridge's execution, evidence harvesting, and reporting flow using a real (non-simulated) Codex-first execution.
## Objective
-Update the internal application version string in `internal/app/version.go`.
+Update the internal application version string in `internal/app/version.go` to a temporary validation marker.
## 1. Scenario Details
-- **Task**: "Update the Version variable in `internal/app/version.go` from its current value to `v0.1.0-alpha`."
+- **Task**: "Update the Version variable in `internal/app/version.go` from its current value to `v0.2.0-alpha-validation`."
- **Adapter**: `codex` (Non-simulated).
- **Target File**: `internal/app/version.go`.
- **Why Safe?**:
@@ -29,7 +29,7 @@ run_id: "validation-run-01"
# [OPTIONAL] step_id: "bump-version-01"
# [OPTIONAL] phase_id: "phase-execution-validation-run-01"
title: "Internal Version Bump"
-goal: "Update internal/app/version.go to set Version = \"v0.1.0-alpha\""
+goal: "Update internal/app/version.go to set Version = \"v0.2.0-alpha-validation\""
adapter_profile: "codex"
constraints:
- "Do not change the package name"
@@ -55,7 +55,7 @@ constraints:
## 4. Verification Steps
1. **Automated Submission (Recommended)**:
- Ensure the daemon is running in a separate terminal (`make run`), then execute:
+ Ensure the daemon is running in a separate terminal (`make start` for real mode), then execute:
```bash
make validate
```
@@ -70,7 +70,7 @@ constraints:
A narrower direct-input equivalent is also possible when you only need goal/title/adapter-style fields:
```bash
./bin/orchestratorctl submit validation-run-01 \
- --goal "Update internal/app/version.go to set Version = \"v0.1.0-alpha\"" \
+ --goal "Update internal/app/version.go to set Version = \"v0.2.0-alpha-validation\"" \
--title "Internal Version Bump" \
--adapter codex \
--wait --json
@@ -84,7 +84,7 @@ constraints:
```json
{
"state": "completed",
- "summary": "Updated internal/app/version.go to v0.1.0-alpha",
+ "summary": "Updated internal/app/version.go to v0.2.0-alpha-validation",
"artifacts": {
"result.json": "/home/lookman/projects/codencer/artifacts/val-step-01-a1/result.json",
"stdout.log": "/home/lookman/projects/codencer/artifacts/val-step-01-a1/stdout.log"
diff --git a/docs/WSL_WINDOWS_ANTIGRAVITY.md b/docs/WSL_WINDOWS_ANTIGRAVITY.md
new file mode 100644
index 0000000..6b9bcd4
--- /dev/null
+++ b/docs/WSL_WINDOWS_ANTIGRAVITY.md
@@ -0,0 +1,137 @@
+# WSL, Windows, and Agent Broker Topology
+
+This document describes the recommended v2 operator topology for Codencer when repos and execution live in WSL/Linux while an IDE or agent-broker may live on Windows.
+
+It is guidance for operators, not an automated smoke-proof matrix.
+
+## Recommended Default Layout
+
+- **WSL/Linux**
+ - Codencer daemon
+ - Git clone
+ - worktrees
+ - artifacts
+ - connector
+ - local executor binaries and local adapter execution
+- **Windows**
+ - IDE
+ - agent-broker and IDE-side companion process, if used
+- **Anywhere reachable by the operator**
+ - relay
+
+The default recommendation is simple:
+- keep the daemon and connector on the same side as the repo
+- keep execution and artifacts local to that side
+- treat the relay as remote control plane only
+
+## Trust Boundaries
+
+The trust model is intentionally narrow:
+
+- **Planner**
+ - decides what to do
+ - calls relay HTTP API or relay MCP
+ - does not get raw shell or arbitrary file access from Codencer
+- **Relay**
+ - authenticates planners and connectors
+ - routes requests to the correct shared instance
+ - records audit events
+ - exposes the canonical remote MCP surface at `/mcp`
+ - does not execute code and does not plan
+- **Connector**
+ - opens outbound websocket session to relay
+ - advertises only explicitly shared instances
+ - proxies only an allowlisted local daemon API surface
+ - does not expose a general tunnel
+- **Daemon**
+ - owns run, step, gate, artifact, and state-machine truth
+ - executes work locally through adapters
+ - exposes `/mcp/call` only as a local compatibility/admin bridge
+ - must not be exposed directly to the internet
+- **agent-broker**
+ - is separate from relay
+ - is optional
+ - serves IDE-side discovery/binding concerns, not remote planner control
+
+## Practical WSL / Windows Model
+
+When using WSL and Windows together:
+
+1. Keep the repository checkout in WSL.
+2. Run `orchestratord` in WSL with that repo as `--repo-root`.
+3. Run `codencer-connectord` in WSL so it can talk to the daemon over local loopback without crossing trust domains.
+4. Run the relay wherever you want to terminate remote planner auth.
+5. If you use the agent-broker, keep it on the Windows/IDE side and bind from the daemon when needed.
+
+Concrete command sketch:
+
+```bash
+# WSL
+make build
+mkdir -p .codencer/relay
+./bin/codencer-relayd planner-token create --config .codencer/relay/config.json --write-config --name operator --scope '*'
+./bin/codencer-relayd --config .codencer/relay/config.json
+./bin/orchestratord --repo-root /home//Projects/my-repo
+./bin/codencer-relayd enrollment-token create --config .codencer/relay/config.json --label wsl-dev --json
+./bin/codencer-connectord enroll --relay-url http://127.0.0.1:8090 --daemon-url http://127.0.0.1:8085 --enrollment-token
+./bin/codencer-connectord run
+```
+
+```powershell
+# Windows
+make build-broker
+.\bin\agent-broker.exe
+```
+
+```bash
+# WSL, when the Windows broker is in use
+export CODENCER_ANTIGRAVITY_BROKER_URL=http://127.0.0.1:8088
+./bin/orchestratorctl antigravity list
+./bin/orchestratorctl antigravity bind
+```
+
+Build note:
+- `make build` builds the daemon, CLI, connector, and relay binaries
+- `make build-broker` builds the Windows-side agent-broker from the nested `cmd/broker` module
+
+This avoids the most common problems:
+- daemon exposed beyond loopback
+- artifacts split across Windows and WSL unexpectedly
+- connector proxying through a cross-side filesystem layout it does not control
+- relay being mistaken for an execution host
+
+## Loopback and Cross-Side Cautions
+
+- Shared loopback between WSL and Windows can work, but it is an operator convenience, not a new trust boundary.
+- Do not assume artifact paths are meaningful off the daemon side. Use result, validation, and artifact APIs rather than raw paths.
+- Do not run the connector on Windows while the daemon and repo live in WSL unless you intentionally want a cross-side local hop and understand the failure modes.
+- Do not expose the daemon directly just because the relay exists. The relay should be the public remote surface.
+
+## Antigravity Guidance
+
+Antigravity remains local-side execution metadata and binding infrastructure:
+- use it when a repo needs to target a live IDE-side agent context
+- do not treat it as the relay
+- do not treat it as a planner
+- do not assume it widens the safe remote surface
+
+The broker and relay are different things:
+- **agent-broker**: local/cross-side IDE bridge
+- **relay**: authenticated remote control plane for planner calls
+
+The most common stable arrangement for this repo is:
+- WSL: repo, worktrees, daemon, connector, artifacts
+- Windows: Antigravity IDE and agent-broker
+- relay: WSL, local server, or VPS
+- planner client: relay HTTP/MCP only
+
+## Operator Checklist
+
+- daemon and repo on the same side
+- connector on the same side as the daemon
+- relay exposed instead of the daemon
+- only explicitly shared instances advertised
+- connector discovery and sharing are explicit (`discover`, `list`, `share`, `unshare`)
+- relay audit is operator-visible through the relay CLI
+- agent-broker kept separate from relay concerns
+- results, validations, and artifacts inspected through APIs and CLI, not raw cross-side paths
diff --git a/docs/design/v2_relay_contracts.md b/docs/design/v2_relay_contracts.md
new file mode 100644
index 0000000..09414be
--- /dev/null
+++ b/docs/design/v2_relay_contracts.md
@@ -0,0 +1,799 @@
+# Codencer V2 Relay Contracts
+
+> [!IMPORTANT]
+> This file is historical design context for the v2 relay build-out.
+> It is not the canonical source of current runtime behavior.
+>
+> For current self-host/runtime truth, use:
+> - [README.md](../../README.md)
+> - [docs/SELF_HOST_REFERENCE.md](../SELF_HOST_REFERENCE.md)
+> - [docs/RELAY.md](../RELAY.md)
+> - [docs/CONNECTOR.md](../CONNECTOR.md)
+> - [docs/mcp/relay_tools.md](../mcp/relay_tools.md)
+
+Status: verified against the current repository on 2026-04-12.
+
+This file separates:
+- `Verified current repo reality`: code already present in this repo.
+- `Locked v2 contract`: exact interface to implement next where the current repo is incomplete or inconsistent.
+
+## 1. Reusable Current Surfaces
+
+### 1.1 CLI
+
+Verified current repo reality:
+
+| Surface | Verified current behavior | Reuse |
+| --- | --- | --- |
+| `cmd/orchestratord` | Local daemon. Starts HTTP API. Supports `--config` and `--repo-root`. | Reuse as the local control plane. |
+| `cmd/orchestratorctl` | Local operator CLI for `run`, `step`, `submit`, `gate`, `antigravity`, `doctor`, `instance`, `version`. | Reuse as local admin/debug surface. Do not make it the relay protocol. |
+| `cmd/codencer-connectord` | `enroll` and `run`. Persists connector config locally. Opens outbound websocket to relay. | Canonical connector entrypoint. |
+| `cmd/codencer-relayd` | Self-hostable relay daemon with sqlite store. | Canonical relay entrypoint. |
+| `cmd/broker` | Optional Antigravity host-side broker for discovery/binding. | Reuse only for Antigravity topology; not part of relay protocol. |
+
+### 1.2 Local Daemon REST API
+
+Verified current repo reality:
+
+| Method | Path | Notes |
+| --- | --- | --- |
+| `GET` | `/api/v1/runs` | Filters supported via query. |
+| `POST` | `/api/v1/runs` | Starts a run. Request body is run metadata. |
+| `GET` | `/api/v1/runs/{id}` | Returns run. |
+| `PATCH` | `/api/v1/runs/{id}` | Supports `{"action":"abort"}`. |
+| `GET` | `/api/v1/runs/{id}/steps` | Lists steps for a run. |
+| `POST` | `/api/v1/runs/{id}/steps` | Accepts `domain.TaskSpec`. |
+| `GET` | `/api/v1/runs/{id}/gates` | Lists gates for a run. |
+| `GET` | `/api/v1/steps/{id}` | Returns step. |
+| `GET` | `/api/v1/steps/{id}/result` | Returns normalized result envelope. |
+| `GET` | `/api/v1/steps/{id}/validations` | Returns validations. |
+| `GET` | `/api/v1/steps/{id}/artifacts` | Returns artifacts for latest attempt. |
+| `GET` | `/api/v1/steps/{id}/logs` | Returns log artifact content via artifact lookup. |
+| `GET` | `/api/v1/artifacts/{id}/content` | Returns artifact bytes/content. |
+| `POST` | `/api/v1/gates/{id}` | Supports `{"action":"approve"}` and `{"action":"reject"}`. |
+| `GET` | `/api/v1/compatibility` | Runtime-derived adapter/environment truth. |
+| `GET` | `/api/v1/routing` | Current routing config surface. |
+| `GET` | `/api/v1/benchmarks` | Benchmark surface. |
+| `GET` | `/api/v1/instance` | Stable daemon identity and local addressing info. |
+| `GET` | `/api/v1/antigravity/instances` | Antigravity discovery. |
+| `GET` | `/api/v1/antigravity/status` | Bound Antigravity instance status. |
+| `POST`,`DELETE` | `/api/v1/antigravity/bind` | Bind/unbind Antigravity instance. |
+| `POST` | `/mcp/call` | Local MCP-like JSON-RPC tool shim. |
+
+### 1.3 Instance Identity
+
+Verified current repo reality:
+
+- `domain.InstanceInfo` includes stable `id`.
+- The daemon persists `daemon_instance_id` in sqlite settings.
+- `/api/v1/instance` returns:
+ - `id`
+ - `version`
+ - `repo_root`
+ - `state_dir`
+ - `workspace_root`
+ - `host`
+ - `port`
+ - `base_url`
+ - `execution_mode`
+ - `pid`
+ - `started_at`
+
+Reuse decision:
+
+- Treat daemon `id` as the canonical local instance identity for connector and relay routing.
+- Do not invent a second local instance identity source in connector config.
+
+### 1.4 Adapter Registry and Capability Surface
+
+Verified current repo reality:
+
+- Adapters are registered in bootstrap, not discovered from docs.
+- Current registered adapter IDs:
+ - `codex`
+ - `claude`
+ - `qwen`
+ - `ide-chat`
+ - `openclaw-acpx`
+ - `antigravity`
+ - `antigravity-broker`
+- `/api/v1/compatibility` returns `domain.CompatibilityInfo`:
+ - `tier`
+ - `adapters[]`
+ - `environment`
+- Each adapter entry includes:
+ - `id`
+ - `available`
+ - `status`
+ - `mode`
+ - `capabilities[]`
+
+Reuse decision:
+
+- Relay should consume and expose this runtime-derived compatibility surface.
+- Do not hardcode support matrices in relay or MCP.
+
+### 1.5 Artifact and Result Retrieval
+
+Verified current repo reality:
+
+- Result retrieval is service-backed:
+ - `RunService.GetResultByStep`
+ - `ensureResultEnvelope` fills `version`, `run_id`, `step_id`, `state`, `summary`.
+- Artifact retrieval is service-backed:
+ - `RunService.GetArtifact`
+ - `RunService.GetArtifactContent`
+ - `RunService.GetLogsByStep`
+- `GET /api/v1/steps/{id}/logs` no longer dereferences a raw path in the handler.
+- `GET /api/v1/artifacts/{id}/content` exists and is the canonical content read path.
+
+Reuse decision:
+
+- Connector and relay should only use service-backed retrieval surfaces.
+- Do not add direct path reads in connector, relay, or MCP.
+
+### 1.6 Broker Integration
+
+Verified current repo reality:
+
+- `AntigravityService` supports:
+ - direct discovery mode
+ - broker mode when `brokerURL` is configured
+- Broker mode uses:
+ - `GET /instances`
+ - `POST /binding`
+ - `DELETE /binding`
+ - `GET /binding`
+- Bind/unbind is repo-root keyed.
+- Broker persists binding outside Codencer state.
+
+Reuse decision:
+
+- Keep broker integration as an executor-side topology detail.
+- Do not route planner traffic through the Antigravity broker.
+
+### 1.7 Current MCP Shim Status
+
+Verified current repo reality:
+
+- Local daemon exposes `/mcp/call` through `internal/mcp` as a legacy compatibility/admin bridge.
+- Relay exposes `/mcp` and `/mcp/call` through `internal/relay/mcp_server.go`.
+- The relay MCP surface supports `initialize`, `tools/list`, and `tools/call`.
+- Local daemon tool names:
+ - `orchestrator.start_run`
+ - `orchestrator.list_runs`
+ - `orchestrator.start_step`
+ - `orchestrator.retry_step`
+ - `orchestrator.get_status`
+ - `orchestrator.get_step_result`
+ - `orchestrator.get_validations`
+ - `orchestrator.list_artifacts`
+ - `orchestrator.approve_gate`
+ - `orchestrator.reject_gate`
+ - `orchestrator.get_benchmarks`
+ - `orchestrator.get_routing_config`
+- Relay tool names:
+ - `codencer.list_instances`
+ - `codencer.get_instance`
+ - `codencer.start_run`
+ - `codencer.get_run`
+ - `codencer.submit_task`
+ - `codencer.get_step`
+ - `codencer.wait_step`
+ - `codencer.get_step_result`
+ - `codencer.list_step_artifacts`
+ - `codencer.get_artifact_content`
+ - `codencer.get_step_validations`
+ - `codencer.approve_gate`
+ - `codencer.reject_gate`
+ - `codencer.abort_run`
+ - `codencer.retry_step`
+
+Verified remaining limitation:
+
+- The daemon-local MCP surface remains legacy/local-only and should not be used as the public remote integration target.
+
+## 2. Missing Pieces For Secure Remote Planner Callability
+
+Verified gaps in the current repo:
+
+| Area | Verified current repo reality | Blocker |
+| --- | --- | --- |
+| Planner auth | Relay uses static bearer tokens with scopes and optional instance restrictions. | Honest alpha-grade self-host auth; no rotation or enterprise IAM. |
+| Connector auth | Enrollment uses one-time tokens or legacy bootstrap secret, plus signed challenge/response for websocket sessions. | Revocation/disable flows are still operator-light. |
+| Presence/discovery | Relay persists advertised instances and tracks heartbeat-driven session presence. | Offline routing still depends on current relay state and TTL expiry. |
+| Instance descriptor | Relay stores `instance_id`, `connector_id`, `repo_root`, `base_url`, raw compatibility JSON, `last_seen_at`. | Planner-facing normalization is still lightweight. |
+| Cancellation | Local daemon supports honest abort and relay exposes abort passthrough. | Abort is still best-effort unless the adapter actually stops; success is only returned on a real `cancelled` outcome. |
+| Wait semantics | Relay HTTP and MCP both expose bounded `wait_step`. | No streaming/log-tail transport; wait remains poll-based. |
+| Artifact content | Local daemon exposes `/api/v1/artifacts/{id}/content`. Relay proxies it. | Large binary transport remains intentionally bounded. |
+| Artifact metadata by ID | Service can load artifact by ID. | No `GET /api/v1/artifacts/{id}` local endpoint; relay cannot build metadata-rich artifact responses from ID alone without cached context. |
+| Gate lifecycle | Local gate approval/rejection reconciles step and run state. | No local `GET /api/v1/gates/{id}` read surface; relay gate responses cannot return gate object without extra work. |
+| Resource routing | Relay persists observed route hints and probes authorized online shared instances when a `step`, `artifact`, or `gate` route is missing. | Direct lookups still fail closed when no online match exists or multiple instances match. |
+| Capability introspection | Daemon compatibility is runtime-derived and truthful. | Relay lists raw stored rows; planner-facing compatibility contract is not normalized. |
+| Contract drift | `schemas/result.schema.json` lagged behind `domain.StepState`. | Fixed in this change; keep schema and domain state sets aligned. |
+
+## 3. Locked V2 Contract Package
+
+This section defines the exact contract to implement and preserve across connector, relay, and relay-side MCP.
+
+### 3.1 Connector Enrollment
+
+Current implementation:
+
+- Request and response already exist in `internal/relayproto/types.go`.
+- Enrollment now sends connector public key and machine metadata.
+
+Locked v2 contract:
+
+Request:
+
+```json
+{
+ "enrollment_token": "string",
+ "label": "string",
+ "public_key": "base64-ed25519-public-key",
+ "machine": {
+ "hostname": "string",
+ "os": "linux",
+ "arch": "amd64"
+ }
+}
+```
+
+Response:
+
+```json
+{
+ "connector_id": "connector-",
+ "machine_id": "machine-",
+ "relay": {
+ "relay_url": "https://relay.example",
+ "websocket_url": "wss://relay.example/ws/connectors",
+ "heartbeat_interval_seconds": 15
+ }
+}
+```
+
+Rules:
+
+- Connector private key stays local and is used for session signing.
+- Relay stores the connector public key and machine binding.
+- `label` is optional but should be persisted by relay for operator visibility.
+
+### 3.2 Connector Challenge
+
+Current implementation:
+
+- Missing.
+
+Locked v2 contract:
+
+Request:
+
+```json
+{
+ "connector_id": "connector-",
+ "machine_id": "machine-"
+}
+```
+
+Response:
+
+```json
+{
+ "challenge_id": "challenge-",
+ "nonce": "base64url",
+ "relay": {
+ "relay_url": "https://relay.example",
+ "websocket_url": "wss://relay.example/ws/connectors",
+ "heartbeat_interval_seconds": 15
+ }
+}
+```
+
+Proof rule:
+
+- `signature = base64(Ed25519Sign(private_key, challenge_id + ":" + nonce + ":" + connector_id + ":" + machine_id))`
+
+### 3.3 Connector Session Hello
+
+Current implementation:
+
+- Current websocket first message is a signed `hello`, followed by `advertise`.
+
+Locked v2 contract:
+
+```json
+{
+ "type": "hello",
+ "connector_id": "connector-",
+ "machine_id": "machine-",
+ "challenge_id": "challenge-",
+ "signature": "base64-ed25519-signature"
+}
+```
+
+Rules:
+
+- Connector follows `hello` with an `advertise` message carrying one or more shared local instances.
+- Relay treats each advertised `instance.id` as the routing key.
+- Connector config persists connector identity and an explicit shared-instance allowlist.
+
+### 3.4 Connector Heartbeat
+
+Current implementation:
+
+- Missing explicit heartbeat.
+
+Locked v2 contract:
+
+```json
+{
+ "type": "heartbeat",
+ "connector_id": "connector-",
+ "instance_id": "daemon-",
+ "session_id": "session-",
+ "sent_at": "2026-04-12T10:00:15Z"
+}
+```
+
+Rules:
+
+- Relay updates `last_seen_at` on every heartbeat.
+- Relay marks instance offline when heartbeat TTL expires.
+- Heartbeat does not carry planner traffic.
+
+### 3.5 Relay Request Envelope
+
+Current implementation:
+
+- Current internal envelope is `CommandRequest`.
+
+Locked v2 contract:
+
+```json
+{
+ "type": "request",
+ "request_id": "req-",
+ "method": "GET",
+ "path": "/api/v1/steps/step-123/result",
+ "query": "",
+ "content_type": "application/json",
+ "content_encoding": "json",
+ "body": null,
+ "timeout_ms": 15000
+}
+```
+
+Rules:
+
+- `path` must be in the connector allowlist.
+- `body` is a JSON value when `content_encoding == "json"`.
+- `body` is a string when `content_encoding == "utf-8"`.
+- `body` is a base64 string when `content_encoding == "base64"`.
+
+### 3.6 Relay Response Envelope
+
+Current implementation:
+
+- Current internal envelope is `CommandResponse`.
+
+Locked v2 contract:
+
+```json
+{
+ "type": "response",
+ "request_id": "req-",
+ "status_code": 200,
+ "content_type": "application/json",
+ "content_encoding": "json",
+ "body": {},
+ "error": ""
+}
+```
+
+Rules:
+
+- `error` is empty on success.
+- Relay must not invent success when connector timeout/cancellation is not confirmed.
+- Non-JSON bodies must set `content_encoding` accordingly.
+
+### 3.7 Instance Descriptor
+
+Current implementation:
+
+- Relay currently returns raw `InstanceRecord` rows from storage.
+
+Locked v2 contract:
+
+```json
+{
+ "instance_id": "daemon-",
+ "connector_id": "connector-",
+ "label": "string",
+ "version": "string",
+ "repo_root": "/abs/path",
+ "state_dir": "/abs/path/.codencer/state",
+ "workspace_root": "/abs/path/.codencer/workspaces",
+ "host": "127.0.0.1",
+ "port": 8085,
+ "base_url": "http://127.0.0.1:8085",
+ "execution_mode": "string",
+ "pid": 12345,
+ "started_at": "2026-04-12T10:00:00Z",
+ "online": true,
+ "last_seen_at": "2026-04-12T10:00:15Z",
+ "compatibility": {
+ "tier": 1,
+ "adapters": [],
+ "environment": {
+ "os": "linux",
+ "vscode_detected": false
+ }
+ }
+}
+```
+
+Rules:
+
+- Planner-facing instance discovery returns this shape, not raw relay storage rows.
+- `online` is derived from heartbeat/session state, not assumed from row presence.
+
+### 3.8 Planner `start_run` Request
+
+Current implementation:
+
+- Local daemon already accepts this shape on `POST /api/v1/runs`.
+
+Locked v2 contract:
+
+```json
+{
+ "id": "run-optional",
+ "project_id": "default-project",
+ "conversation_id": "string",
+ "planner_id": "string",
+ "executor_id": "string"
+}
+```
+
+Rules:
+
+- `id` is optional.
+- Relay passes this through unchanged to the local daemon.
+
+### 3.9 Planner `submit_task` Request
+
+Current implementation:
+
+- Local daemon already accepts `domain.TaskSpec` on `POST /api/v1/runs/{run_id}/steps`.
+- `schemas/task.schema.json` is close to code truth and remains the task payload source of truth.
+
+Locked v2 contract:
+
+```json
+{
+ "version": "v1",
+ "project_id": "string",
+ "run_id": "run-optional",
+ "phase_id": "phase-optional",
+ "step_id": "step-optional",
+ "title": "string",
+ "goal": "string",
+ "context": {
+ "summary": "string"
+ },
+ "constraints": ["string"],
+ "allowed_paths": ["string"],
+ "forbidden_paths": ["string"],
+ "validations": [
+ {
+ "name": "string",
+ "command": "string"
+ }
+ ],
+ "acceptance": ["string"],
+ "stop_conditions": ["string"],
+ "policy_bundle": "string",
+ "adapter_profile": "string",
+ "timeout_seconds": 300,
+ "is_simulation": false
+}
+```
+
+Rules:
+
+- Relay must not rewrite planner intent fields except to fill omitted `run_id`, `phase_id`, and `step_id` in the daemon-compatible way.
+
+### 3.10 `wait_step` Request/Response
+
+Current implementation:
+
+- Missing as a relay and MCP contract.
+
+Locked v2 contract:
+
+Request:
+
+```json
+{
+ "interval_ms": 1000,
+ "timeout_ms": 300000,
+ "include_result": true
+}
+```
+
+Response:
+
+```json
+{
+ "step_id": "step-",
+ "state": "completed",
+ "terminal": true,
+ "timed_out": false,
+ "step": {},
+ "result": {}
+}
+```
+
+Rules:
+
+- Relay implements this by polling existing step/result surfaces.
+- No new daemon wait endpoint is required.
+- `result` is omitted when `include_result == false` or the step is not terminal.
+
+### 3.11 Artifact Content Response
+
+Current implementation:
+
+- Local daemon and relay currently return raw proxied content for `/artifacts/{id}/content`.
+
+Locked v2 contract:
+
+```json
+{
+ "artifact_id": "artifact-",
+ "name": "stdout.log",
+ "type": "log",
+ "mime_type": "text/plain",
+ "encoding": "utf-8",
+ "size": 1234,
+ "hash": "sha256:",
+ "content": "string-or-base64"
+}
+```
+
+Rules:
+
+- Planner-facing relay API and relay-side MCP return this JSON shape.
+- Local daemon may continue to expose raw bytes on `/api/v1/artifacts/{id}/content`.
+- To support this cleanly, daemon should expose artifact metadata by ID or relay must persist enough metadata from list responses.
+
+### 3.12 Gate Action Request/Response
+
+Current implementation:
+
+- Local daemon accepts `POST /api/v1/gates/{id}` with `{"action":"approve"}` or `{"action":"reject"}` and returns empty `200`.
+
+Locked v2 contract:
+
+Request:
+
+```json
+{
+ "reason": "string"
+}
+```
+
+Response:
+
+```json
+{
+ "gate_id": "gate-",
+ "run_id": "run-",
+ "step_id": "step-",
+ "state": "approved",
+ "resolved_at": "2026-04-12T10:00:00Z"
+}
+```
+
+Rules:
+
+- Path encodes the action:
+ - `POST /api/v2/gates/{gate_id}/approve`
+ - `POST /api/v2/gates/{gate_id}/reject`
+- Relay must not return a synthetic success body without confirming the resulting gate state.
+
+## 4. Exact API List
+
+### 4.1 Planner-Facing Relay HTTP API
+
+Lock this list:
+
+| Method | Path | Contract |
+| --- | --- | --- |
+| `GET` | `/api/v2/instances` | Returns `[]InstanceDescriptor`. |
+| `GET` | `/api/v2/instances/{instance_id}` | Returns one `InstanceDescriptor`. |
+| `POST` | `/api/v2/instances/{instance_id}/runs` | `start_run` request. |
+| `GET` | `/api/v2/instances/{instance_id}/runs` | Returns runs. |
+| `GET` | `/api/v2/instances/{instance_id}/runs/{run_id}` | Returns run. |
+| `POST` | `/api/v2/instances/{instance_id}/runs/{run_id}/steps` | `submit_task` request. |
+| `POST` | `/api/v2/instances/{instance_id}/runs/{run_id}/abort` | `abort_run` request. |
+| `GET` | `/api/v2/instances/{instance_id}/runs/{run_id}/gates` | Returns gates for run. |
+| `GET` | `/api/v2/steps/{step_id}` | Returns step. |
+| `GET` | `/api/v2/steps/{step_id}/result` | Returns result. |
+| `GET` | `/api/v2/steps/{step_id}/validations` | Returns validations. |
+| `GET` | `/api/v2/steps/{step_id}/artifacts` | Returns artifacts. |
+| `POST` | `/api/v2/steps/{step_id}/wait` | `wait_step` request/response. |
+| `POST` | `/api/v2/steps/{step_id}/retry` | `retry_step` request. |
+| `GET` | `/api/v2/artifacts/{artifact_id}/content` | `artifact content response`. |
+| `POST` | `/api/v2/gates/{gate_id}/approve` | `gate action response`. |
+| `POST` | `/api/v2/gates/{gate_id}/reject` | `gate action response`. |
+
+### 4.2 Connector-Facing Relay API
+
+Lock this list:
+
+| Method | Path | Contract |
+| --- | --- | --- |
+| `POST` | `/api/v2/connectors/enroll` | Connector enrollment. |
+| `POST` | `/api/v2/connectors/challenge` | Connector challenge. |
+| `GET` | `/ws/connectors` | Websocket upgrade. First message is `hello`. |
+
+### 4.3 Relay-Side MCP Tool List
+
+Lock this list:
+
+- `codencer.list_instances`
+- `codencer.get_instance`
+- `codencer.start_run`
+- `codencer.get_run`
+- `codencer.submit_task`
+- `codencer.get_step`
+- `codencer.wait_step`
+- `codencer.get_step_result`
+- `codencer.list_step_artifacts`
+- `codencer.get_artifact_content`
+- `codencer.get_step_validations`
+- `codencer.approve_gate`
+- `codencer.reject_gate`
+- `codencer.abort_run`
+- `codencer.retry_step`
+
+Rules:
+
+- No raw shell.
+- No arbitrary filesystem access.
+- No direct connector/session management tools for planners.
+
+### 4.4 Local Daemon Additions Required
+
+Required additions to support the locked relay contract cleanly:
+
+| Addition | Why |
+| --- | --- |
+| Allow remote abort passthrough by permitting `PATCH /api/v1/runs/{id}` in connector allowlist and relay route table. | Remote cancellation is otherwise missing. |
+| `GET /api/v1/gates/{id}` or equivalent gate action response body. | Relay cannot return a truthful `gate action response` without reading the updated gate. |
+| `GET /api/v1/artifacts/{id}` or equivalent metadata source by artifact ID. | Relay cannot build the locked JSON artifact-content response from content bytes alone. |
+
+Not required:
+
+- No daemon-side `wait_step` endpoint. Relay can poll current step/result surfaces.
+
+## 5. Exact Dependency Graph
+
+### 5.1 Must Land Before Connector
+
+- Stable daemon `InstanceInfo.ID`.
+- Runtime-derived `/api/v1/compatibility`.
+- Honest abort semantics in `RunService`.
+- Repo-root correctness in run/recovery paths.
+- Service-backed artifact/log retrieval.
+- Result envelope normalization.
+
+Reason:
+
+- Connector identity, instance discovery, and remote truthfulness depend on these existing daemon guarantees.
+
+### 5.2 Must Land Before Relay
+
+- Connector config persistence.
+- Connector enrollment.
+- Connector hello and heartbeat contracts.
+- Connector allowlist for all planner-approved proxy paths.
+- Stable instance descriptor shape.
+
+Reason:
+
+- Relay cannot truthfully list or route instances until connector registration and presence are stable.
+
+### 5.3 Must Land Before MCP
+
+- Planner-facing relay HTTP API must be fixed first.
+- `wait_step`, `abort_run`, artifact-content, and gate-action relay responses must be stable first.
+- Resource routing must not depend on accidental observation of unrelated responses.
+
+Reason:
+
+- Relay-side MCP should be a thin mapping over stable relay APIs, not a second place where semantics are invented.
+
+### 5.4 Overlap and Conflict Areas
+
+| Area | Conflict |
+| --- | --- |
+| `internal/app/routes.go` vs connector allowlist | Adding a daemon route is insufficient if connector still blocks it. |
+| `domain.InstanceInfo` / compatibility types vs relay storage | Relay persistence and planner-facing discovery must stay aligned with daemon identity shape. |
+| Gate/action contracts | Relay cannot promise gate response bodies until daemon can supply or relay can reconstruct gate state. |
+| Artifact-content contracts | Relay JSON response needs artifact metadata source; current raw passthrough is insufficient. |
+| MCP tool naming | Local `orchestrator.*` and relay `relay.*` tool names are different surfaces. Do not merge them implicitly. |
+
+## 6. Acceptance Criteria Per Phase
+
+### Phase 0: Core Hardening
+
+- Daemon repo-root behavior is independent of process cwd.
+- `PATCH /api/v1/runs/{id}` is honest about cancellation.
+- Run, step, and gate terminal state reconciliation is stable.
+- `/api/v1/instance` returns stable `id`.
+- `/api/v1/compatibility` reflects runtime truth, not hardcoded claims.
+- `/api/v1/artifacts/{id}/content` and `/api/v1/steps/{id}/logs` are service-backed.
+- `schemas/result.schema.json` matches actual result state surface.
+
+### Phase 1: Connector
+
+- Connector persists enrollment config locally.
+- Connector discovers local daemon identity via `/api/v1/instance`.
+- Connector fetches runtime compatibility via `/api/v1/compatibility`.
+- Connector authenticates to relay with the locked enrollment/challenge/hello contract.
+- Connector heartbeat updates relay presence.
+- Connector only proxies allowlisted daemon operations.
+- Connector supports remote abort passthrough.
+
+### Phase 2: Relay
+
+- Relay persists connectors, instance descriptors, routes, and audit events in sqlite.
+- Relay exposes the locked planner-facing HTTP API.
+- Relay lists normalized instance descriptors with truthful online/offline state.
+- Relay supports run creation, step submission, wait, artifact fetch, gate action, and abort.
+- Relay routing does not require accidental prior observation to resolve core resources.
+
+### Phase 3: Relay-Side MCP
+
+- Relay-side MCP is a thin mapping over the planner-facing relay HTTP API.
+- Tool list matches Section 4.3 exactly.
+- No raw shell or arbitrary file access is exposed.
+- Error handling preserves upstream truth and does not synthesize completion.
+
+### Phase 4: Integration and Docs
+
+- End-to-end tests cover planner -> relay -> connector -> local daemon -> adapter -> result/artifact/gate paths.
+- Self-host docs match the locked contracts in this file.
+- Windows/WSL and Antigravity topology docs match the actual supported path.
+- Docs do not claim challenge, heartbeat, abort, or wait support until those contracts are actually implemented.
+
+## 7. Recommended Next Implementation Order
+
+1. Finish connector/relay auth and presence contracts:
+ - challenge endpoint
+ - hello proof
+ - heartbeat
+ - normalized instance descriptor
+2. Add remote cancellation and wait:
+ - connector allowlist for run abort
+ - relay `PATCH /runs/{id}`
+ - relay `POST /steps/{id}/wait`
+3. Add resource-specific truth surfaces needed by relay:
+ - local `GET /api/v1/gates/{id}` or equivalent
+ - local `GET /api/v1/artifacts/{id}` or equivalent
+4. Normalize planner-facing relay responses:
+ - JSON artifact-content response
+ - JSON gate-action response
+5. Only then expand relay-side MCP to the locked tool list.
+
+## 8. Unresolved Risks
+
+- Relay routing for `step`, `artifact`, and `gate` IDs now probes authorized online shared instances when stored route hints are missing, but still fails closed when no match is online or multiple matches exist.
+- Connector presence now uses signed challenge/response plus heartbeat-driven session state, but relay status is still alpha-grade operational metadata rather than enterprise fleet management.
+- Gate action responses can be routed directly through the local gate read surface, but richer planner-facing gate summaries are still lightweight.
+- Artifact lookup now has a local metadata-by-ID surface, but artifact transfer remains intentionally bounded and not designed for bulk binary delivery.
+- Local and relay MCP shims are still JSON-RPC-like compatibility layers, not a standard MCP server transport.
diff --git a/docs/internal/BETA_CONFIRMATION_REPORT.md b/docs/internal/BETA_CONFIRMATION_REPORT.md
new file mode 100644
index 0000000..26df0a6
--- /dev/null
+++ b/docs/internal/BETA_CONFIRMATION_REPORT.md
@@ -0,0 +1,76 @@
+# Beta Confirmation Report
+
+Decision date: 2026-04-23
+
+Release truth: `v0.2.0-beta`
+
+Beta confirmed: `yes`
+
+## Commands Run
+
+Fresh Phase 7 confirmation evidence:
+
+- `make build-supported`
+- `./scripts/smoke_test_v1.sh`
+- `./scripts/smoke_test_v1.sh`
+- `make smoke`
+- `PLANNER_TOKEN=... RELAY_CONFIG=... RELAY_URL=... DAEMON_URL=... SMOKE_SCENARIOS=status,audit,share-control,multi-instance,mcp,mcp-sdk ./scripts/self_host_smoke.sh`
+- `go test ./internal/cloud/... -count=1`
+- `go test ./internal/relay ./internal/cloud ./cmd/mcp-sdk-smoke -count=1`
+- `make cloud-smoke`
+- `CLOUD_RELAY_CONFIG=... CLOUD_RUNTIME_DAEMON_URL=http://127.0.0.1:18085 CLOUD_SMOKE_MCP=1 CLOUD_SMOKE_SDK=1 ./scripts/cloud_smoke.sh`
+- `make verify-beta`
+- detached temporary `git worktree` run of `make build-supported && make verify-beta`
+- `make cloud-stack-smoke`
+- `make verify-beta-docker`
+
+## Outcomes
+
+| Check | Outcome | Notes |
+| --- | --- | --- |
+| Supported build surface | Pass | `make build-supported` rebuilt the main supported binaries plus the MCP SDK smoke helper. Local `/usr/local/opt/grpc/lib` linker warnings still appeared, but they were non-blocking. |
+| Local smoke confirmation | Pass | `./scripts/smoke_test_v1.sh` passed twice and `make smoke` passed, preserving the documented local beta proof from the public entrypoints. |
+| Fresh self-host relay/runtime confirmation | Pass | The fresh self-host smoke with `status,audit,share-control,multi-instance,mcp,mcp-sdk` re-proved relay HTTP, share-control, multi-instance routing, canonical relay MCP, and official Go SDK interop. |
+| Cloud regression + smoke confirmation | Pass | `go test ./internal/cloud/... -count=1`, `make cloud-smoke`, and the composed cloud smoke with `CLOUD_RUNTIME_DAEMON_URL=http://127.0.0.1:18085 CLOUD_SMOKE_MCP=1 CLOUD_SMOKE_SDK=1` all passed. |
+| Relay/cloud MCP + SDK confirmation | Pass | `go test ./internal/relay ./internal/cloud ./cmd/mcp-sdk-smoke -count=1` re-proved the canonical relay/cloud MCP paths and kept the official Go SDK helper green. |
+| Repo-level supported verification | Pass | `make verify-beta` reran main-module tests, local smoke, self-host relay/runtime MCP+SDK smoke, cloud binary smoke, and Docker compose config validation from the active checkout. |
+| Fresh-location repo verification | Pass | A detached temporary `git worktree` reran `make build-supported && make verify-beta` successfully from a fresh location. |
+| Docker-backed cloud stack proof | Pass | `make cloud-stack-smoke` passed on a host with a live Docker daemon. |
+| Final-tree Docker-inclusive verifier | Pass | `make verify-beta-docker` re-ran the supported repo verifier plus the Docker-backed cloud stack baseline after the Phase 7 doc normalization updates landed. |
+
+## Notes On Environment And Method
+
+- The fresh-location confirmation proof used a detached temporary `git worktree` at the current `HEAD` before running `make build-supported && make verify-beta`.
+- That method preserved the Git metadata required by worktree-sensitive checks while still proving the repo from a fresh location away from the active checkout.
+- `make cloud-stack-smoke` ran on a host with a live Docker daemon.
+- Local `/usr/local/opt/grpc/lib` linker search-path warnings still appeared during build-oriented steps, but they did not block any required proof.
+
+## Decision Reasoning
+
+Beta is confirmed because all of the frozen Phase 0 through Phase 6 blocker classes are closed and the fresh Phase 7 proofs are green:
+
+- local-only proof is green
+- self-host relay/runtime proof is green
+- self-host cloud binary and composed cloud proof are green
+- Docker-backed cloud stack proof is green
+- planner/client MCP and official Go SDK proof are green
+- provider connector proof remains green within the documented narrow scope
+- public docs, version strings, and frozen internal beta docs now agree on the same support contract
+
+## Remaining Outside The Beta Promise
+
+These items remain intentionally outside the current beta promise:
+
+- `agent-broker`
+- VS Code extension runtime proof
+- daemon-local MCP as a public remote contract
+- secondary adapters such as `qwen`, `antigravity*`, `ide-chat`, and `openclaw-acpx`
+- product-specific ChatGPT, Claude Code, Claude Desktop, or marketplace publication proof
+- live vendor-account proof for every provider
+- enterprise IAM / SSO
+- billing
+- cloud UI / public SaaS productization
+
+## Remaining Non-Blocking Open Items
+
+- BG-020: local linker environment still emits `/usr/local/opt/grpc/lib` search-path warnings during builds, but all required builds and proofs passed.
diff --git a/docs/internal/BETA_FINALIZATION_PLAN.md b/docs/internal/BETA_FINALIZATION_PLAN.md
new file mode 100644
index 0000000..3774dee
--- /dev/null
+++ b/docs/internal/BETA_FINALIZATION_PLAN.md
@@ -0,0 +1,266 @@
+# Beta Finalization Plan
+
+Status: Beta-confirmed final audited record
+
+Last audited: 2026-04-23
+
+Source of truth: current repository code, tests, smoke runs, and build surfaces. Older internal docs are historical unless they still match code.
+
+This document preserves the frozen beta phase plan plus the final audited outcomes. Phase ordering below is historical; beta was confirmed in Phase 7 on 2026-04-23.
+
+## Current State
+
+- Current release truth is `v0.2.0-beta`.
+- The repo is beta-confirmed as of 2026-04-23.
+- The repo is already larger than the older local-only plan. It now contains:
+ - local daemon + CLI + storage + worktree execution
+ - self-host relay + runtime connector + relay MCP
+ - self-host cloud control-plane beta track
+ - provider connector platform
+ - secondary IDE/broker surfaces
+
+## Repo Truth Rules
+
+- Code beats docs when they disagree.
+- Tests and smoke runs beat older summaries when they disagree.
+- Out-of-beta-scope does not imply deletion.
+- Secondary or compatibility surfaces can stay in tree if they are classified truthfully.
+- Beta must be a contract, not a hope.
+
+## Target Beta Definition
+
+A truthful beta means all of the following are true at the same time:
+
+1. The local daemon + CLI path is stable, documented, and publicly testable.
+2. The self-host relay + connector path is stable, documented, and publicly testable over narrow HTTP and MCP contracts.
+3. The self-host cloud control-plane path is stable, scoped correctly, and publicly testable for bootstrap, tenancy, runtime claim/list/proxy, and audit.
+4. Planner/client compatibility claims are narrow and proven. No universal client claims.
+5. Provider connector claims are narrow and proven. No vendor-depth completeness claims.
+6. Public docs, internal docs, version strings, smoke commands, and support labels all agree.
+
+## Supported Beta Tracks
+
+These are the tracks the repo should finalize for beta:
+
+1. Local runtime track
+ - `orchestratord`
+ - `orchestratorctl`
+ - SQLite ledger
+ - worktree/provisioning/artifact/validation flow
+ - simulation-mode smoke
+ - narrow primary adapter promise
+2. Self-host relay/runtime track
+ - `codencer-relayd`
+ - `codencer-connectord`
+ - connector enrollment/session/share/status/discover
+ - relay HTTP planner surface
+ - relay MCP public surface
+ - official Go SDK interoperability for relay MCP
+3. Self-host cloud track
+ - `codencer-cloudd`
+ - `codencer-cloudctl`
+ - `codencer-cloudworkerd`
+ - org/workspace/project/membership/token/install/event/audit flows
+ - runtime connector claim/list/instance visibility
+ - tenant-scoped runtime HTTP/MCP proxying in composed mode
+4. Planner/client integration track
+ - generic relay HTTP client path
+ - generic relay MCP client path
+ - official Go SDK path to relay MCP
+ - generic cloud HTTP client path
+ - generic cloud MCP path in composed mode
+5. Provider connector track
+ - GitHub
+ - GitLab
+ - Jira
+ - Linear
+ - Slack
+ - narrow validate/ingest/action/status claims only
+
+## Excluded But Kept
+
+These can remain in repo without being part of the beta promise:
+
+- VS Code extension
+- `agent-broker`
+- `ide-chat` adapter
+- `qwen` adapter
+- `openclaw-acpx` adapter
+- `antigravity` and `antigravity-broker` as primary beta paths
+- daemon-local `/mcp/call`
+- relay `/mcp/call` and cloud `/api/cloud/v1/mcp/call` beyond compatibility claims
+- public SaaS UI
+- enterprise IAM / SSO
+- billing
+- universal client compatibility claims
+- hard kill / hard cancellation claims
+- cloud-native runtime enrollment lifecycle
+- vendor-depth provider automation completeness
+
+## Frozen Phase Plan And Exit Criteria
+
+### Phase 1: Local Core Finalization
+
+Goal:
+- Freeze the canonical local beta promise and primary adapter set.
+
+Exit criteria:
+- Local smoke remains green.
+- Legacy six-input local submission proof is either repeatably green or explicitly downgraded from beta promise.
+- Policy/schema contract drift is resolved or excluded from beta promise.
+- Primary adapter support table is explicit and truthful.
+
+### Phase 2: Relay + Runtime Connector Finalization
+
+Goal:
+- Freeze the self-host relay/connector contract.
+
+Exit criteria:
+- Share/unshare behavior is deterministic and truthful.
+- Self-host HTTP + MCP + SDK smoke paths are first-class and documented.
+- Connector status/discover/share/unshare docs match real behavior.
+- Relay/connector auth, scope, and route behavior remain green under tests.
+
+### Phase 3: Cloud Self-Host Finalization
+
+Goal:
+- Make the cloud control plane safe enough to claim beta for self-host use.
+
+Exit criteria:
+- Tenant scope leaks are fixed.
+- Token, event, audit, and runtime route authorization is correct.
+- Runtime proxy paths are exercised in tests and smoke.
+- Cloud docs stay narrow and honest.
+
+### Phase 4: Planner / Client Integration Finalization
+
+Goal:
+- Freeze planner/client compatibility claims.
+
+Exit criteria:
+- Relay HTTP, relay MCP, and relay official SDK claims are explicitly proven.
+- Cloud HTTP/MCP compatibility claims are either proven or narrowed.
+- Local daemon MCP remains compatibility-only and is labeled that way everywhere.
+- No product-specific ChatGPT/Claude overclaims remain.
+
+### Phase 5: Provider Connector Finalization
+
+Goal:
+- Promote a narrow provider matrix to beta-ready truth.
+
+Exit criteria:
+- Jira webhook deferment is enforced or implemented honestly.
+- Event history and action logging are fit for audit.
+- Per-provider mock smoke coverage exists for validate/ingest/action/status.
+- Provider docs match code exactly.
+
+### Phase 6: Release Engineering / Public Testability
+
+Goal:
+- Make the beta verifiable by outsiders without tribal knowledge.
+
+Exit criteria:
+- Build/test/smoke commands are stable.
+- Docker compose baseline is validated and smoke-tested.
+- Historical internal docs are no longer used as current release truth.
+- Secondary surfaces are either fixed or clearly excluded from beta.
+
+### Phase 7: Beta Confirmation
+
+Goal:
+- Confirm beta without adding new scope.
+
+Exit criteria:
+- Every beta-blocker gap is closed or explicitly removed from the beta promise.
+- Final verification matrix is green for the beta tracks.
+- README, CHANGELOG, Makefile versioning, and support matrix agree.
+- Repo state is still stable after a full confirmation pass.
+
+## Beta Confirmation Criteria
+
+Beta can be claimed only when:
+
+- `go test ./...` passes in the main module.
+- beta-track smoke commands pass from documented entrypoints.
+- cloud scope bugs and relay/share correctness gaps are closed.
+- provider connector claims are narrowed to what is actually proven.
+- public docs and internal beta docs agree on the same support contract.
+- remaining secondary surfaces are clearly labeled as non-beta.
+
+## Release Readiness Criteria
+
+Release-readiness for the beta merge requires:
+
+- truthful version string
+- truthful changelog entry
+- stable build commands
+- stable smoke commands
+- no known P1 security/scope gaps in beta-track surfaces
+- explicit support/classification matrix
+- explicit gap register
+- explicit workstream/ownership plan
+- explicit verification matrix
+
+## Phase 0 Outcome
+
+- Phase 0 does not declare beta.
+- Phase 0 does define the beta contract and the remaining work.
+- Current repo state remained alpha until later phases closed the registered blockers and the final confirmation pass reran the frozen matrix.
+
+## Phase 1 WS-C1 Outcome (2026-04-23)
+
+- WS-C1 closed the cloud scope/security blockers from Phase 0: token-revocation scope, event scoping, audit scoping, and runtime HTTP under-enforcement.
+- WS-C1 also closed the thin-proof gaps for cloud runtime HTTP, cloud MCP streamable behavior, and official Go SDK access to `/api/cloud/v1/mcp`.
+- Public cloud docs and smoke entrypoints were updated so the documented proof now matches the actual repo behavior.
+- WS-C1 did not promote the whole repo to beta. The remaining unfinished areas at that point were relay, local, provider, release, and broader planner/client work.
+- At that point, Docker-based packaging proof remained a later release-engineering task because the Docker-backed smoke proof had not yet run on a Docker-capable host.
+
+## Phase 2 WS-R1 Outcome (2026-04-23)
+
+- WS-R1 closed the Phase 0 relay/runtime blocker around `share --instance-id` persisting false-positive shared state.
+- WS-R1 also hardened relay/runtime correctness around live-set re-advertise, stale connector-session pruning, and relay MCP principal parity.
+- Public relay/connector/self-host docs and smoke entrypoints were updated so the documented proof now matches the actual self-host relay/runtime behavior.
+- WS-R1 did not promote the whole repo to beta. The remaining unfinished areas at that point were local-core proof, broader planner/client freezing, provider work, and release verification.
+
+## Phase 3 WS-L1 Outcome (2026-04-23)
+
+- WS-L1 closed the conflicting local parity evidence from Phase 0 by fixing the local same-run wait/finalization race and re-running the legacy six-input smoke twice successfully.
+- WS-L1 also hardened local truth around step results and retry lifecycle: gated/rejected/manual-attention states now surface correctly in `step result`, and retry moves the parent run back to `running` immediately.
+- Public local docs and internal support labels were updated so the local adapter matrix, compatibility surfaces, and local smoke entrypoints now match the actual repo proof.
+- WS-L1 did not promote the whole repo to beta. The remaining unfinished areas at that point were planner/client freezing, provider work, release/public repeatability, and final beta confirmation.
+
+## Phase 4 WS-P1 Outcome (2026-04-23)
+
+- WS-P1 froze the public and internal planner/client compatibility matrix against the current repo truth instead of historical expectations.
+- Relay HTTP, relay MCP, cloud HTTP, cloud MCP, and official Go SDK access to relay/cloud MCP are now explicitly documented as proven within narrow repo-exercised scope.
+- Generic MCP clients remain expected-only, while ChatGPT-style and Claude-style paths remain compatibility-only; the local daemon stays out of the public remote MCP promise.
+- Public planner/client docs now include a cloud MCP tools page, generic HTTP/MCP examples, and checked-in Claude Code style `.mcp.json` examples for local tester packaging.
+- WS-P1 did not promote the whole repo to beta. The remaining unfinished areas at that point were provider connector finalization, release/public repeatability, and final beta confirmation.
+
+## Phase 5 WS-PC1 Outcome (2026-04-23)
+
+- WS-PC1 closed the provider connector beta blockers from Phase 0: repeated webhook history overwrite, Jira webhook deferment drift, and incomplete provider action/audit logging.
+- Connector event history is now append-only instead of overwrite-on-conflict, and webhook/poll ingests persist enough metadata to reconstruct provider deliveries more truthfully in the cloud store.
+- Jira remains polling-first by design, and the routed webhook surface now rejects Jira webhook calls truthfully instead of ingesting them as if they were supported.
+- Provider action logs now persist request payloads, response payloads, and completion timestamps, and provider webhook failure/deferment paths now leave explicit audit evidence.
+- Public provider docs and internal support matrices now freeze the provider matrix to narrow, code-backed claims: Slack is the strongest local tester path, while GitHub, GitLab, Jira, and Linear remain intentionally narrower operator/package surfaces.
+- WS-PC1 did not promote the whole repo to beta. The remaining unfinished areas at that point were release/public repeatability and final beta confirmation.
+
+## Phase 6 WS-RE1 Outcome (2026-04-23)
+
+- WS-RE1 added explicit repo-level public verification entrypoints: `make build-supported`, `make verify-beta`, and `make verify-beta-docker`.
+- WS-RE1 also added a visible CI workflow under `.github/workflows/public-testability.yml` that mirrors the supported non-Docker public verification path.
+- Public tester routing is now explicit: README, setup, relay, cloud, planner/client, and provider docs all point to the right track instead of forcing testers to infer the repo promise from scattered pages.
+- A new public tester guide (`docs/BETA_TESTING.md`) now freezes the supported track matrix, exact commands, and the current support boundaries in one place.
+- Deployment packaging truth is tighter: the Docker cloud image now takes its version string from compose/build args instead of only a hard-coded Dockerfile literal, and the docs now distinguish Docker baseline proof from binary-native composed proof.
+- WS-RE1 completed a full supported non-Docker verification pass from the active checkout and repeated that same pass from a detached temporary `git worktree` at the current `HEAD`.
+- At the WS-RE1 handoff, the remaining repo-wide step was final beta confirmation, including a rerun of Docker-backed stack smoke on a Docker-capable host.
+
+## Phase 7 Beta Confirmation Outcome (2026-04-23)
+
+- The frozen beta verification matrix was rerun from the working tree and from a detached temporary `git worktree` at the current `HEAD`.
+- Fresh Phase 7 confirmation evidence included `make build-supported`, `./scripts/smoke_test_v1.sh` twice, `make smoke`, a fresh self-host smoke with `status,audit,share-control,multi-instance,mcp,mcp-sdk`, `go test ./internal/cloud/... -count=1`, `go test ./internal/relay ./internal/cloud ./cmd/mcp-sdk-smoke -count=1`, `make cloud-smoke`, the composed cloud smoke with `CLOUD_RELAY_CONFIG=... CLOUD_RUNTIME_DAEMON_URL=http://127.0.0.1:18085 CLOUD_SMOKE_MCP=1 CLOUD_SMOKE_SDK=1`, `make verify-beta`, the detached temporary `git worktree` rerun of `make build-supported && make verify-beta`, `make cloud-stack-smoke`, and the final-tree `make verify-beta-docker` rerun.
+- `make cloud-stack-smoke` passed on a host with a live Docker daemon.
+- Local `/usr/local/opt/grpc/lib` linker warnings still appeared during build-oriented steps, but they remained non-blocking.
+- The repo status, version strings, and public tester docs now agree on `v0.2.0-beta`.
+- Beta is confirmed for the supported local, self-host relay/runtime, self-host cloud, planner/client integration, and provider connector tracks, with the previously frozen compatibility/deferred boundaries unchanged.
diff --git a/docs/internal/BETA_GAP_REGISTER.md b/docs/internal/BETA_GAP_REGISTER.md
new file mode 100644
index 0000000..caf2d5f
--- /dev/null
+++ b/docs/internal/BETA_GAP_REGISTER.md
@@ -0,0 +1,180 @@
+# Beta Gap Register
+
+Severity scale:
+
+- `critical`: direct security/scope blocker
+- `high`: correctness blocker for a beta track
+- `medium`: important proof/contract gap
+- `low`: useful cleanup, not a beta gate by itself
+
+This register preserves the historical blocker list plus the phase-by-phase closure record. As of 2026-04-23, no beta-blocking gaps remain open.
+
+References below to WS-specific "remain outside" or "remaining work" notes describe the handoff state at the end of that phase, not current open beta blockers.
+
+| ID | Title | Category | Impact | Severity | Affected files / areas | Beta-blocker | Next phase | Owner / workstream |
+| --- | --- | --- | --- | --- | --- | --- | --- | --- |
+| BG-001 | Cloud token revocation ignores tenant scope | Cloud control plane | A scoped token can revoke another tenant token if it knows the ID. | `critical` | `internal/cloud/router.go`, `internal/cloud/store.go` | Yes | Phase 3 | WS-C1 Cloud |
+| BG-002 | Cloud events API leaks cross-tenant event history | Cloud control plane | `GET /api/cloud/v1/events` can expose events outside the caller scope when `installation_id` is omitted. | `critical` | `internal/cloud/router.go`, `internal/cloud/store.go` | Yes | Phase 3 | WS-C1 Cloud |
+| BG-003 | Cloud runtime HTTP route scopes are under-enforced | Cloud control plane | Cloud HTTP can proxy step/gate operations with weaker scopes than intended. | `critical` | `internal/cloud/runtime_api.go`, `internal/relay/auth.go` | Yes | Phase 3 | WS-C1 Cloud |
+| BG-004 | Cloud audit API is only org-filtered | Cloud control plane | Workspace/project-scoped callers can read sibling audit rows. | `high` | `internal/cloud/router.go`, `internal/cloud/auth.go` | Yes | Phase 3 | WS-C1 Cloud |
+| BG-005 | `share --instance-id` can persist an unroutable shared instance | Relay / connector | Operators can see a โsharedโ entry that will never advertise or route. | `high` | `internal/connector/admin.go`, `internal/connector/registry.go`, `docs/CONNECTOR.md` | Yes | Phase 2 | WS-R1 Relay/Connector |
+| BG-006 | Provider webhook history overwrites repeated events | Provider connectors | Event storage is too shallow for a trustworthy connector audit trail. | `high` | `internal/cloud/router.go`, `internal/cloud/store.go`, provider normalizers | Yes | Phase 5 | WS-PC1 Providers |
+| BG-007 | Jira webhook deferment is not enforced in runtime code | Provider connectors | Docs say Jira webhook ingest is deferred, but the route can still accept it. | `high` | `internal/cloud/connectors/jira.go`, `internal/cloud/router.go`, `docs/CLOUD*.md` | Yes | Phase 5 | WS-PC1 Providers |
+| BG-008 | Provider action logs omit request body and completion time | Provider connectors | Publishability and audit quality are weaker than the control-plane story implies. | `medium` | `internal/cloud/router.go`, `internal/cloud/store.go` | Yes | Phase 5 | WS-PC1 Providers |
+| BG-009 | Cloud runtime HTTP proxy paths are thinner in proof than relay | Planner/client + cloud | Claim/list/visibility is tested, but live run/step proxy proof is still weak. | `medium` | `internal/cloud/runtime_api.go`, `internal/cloud/runtime_api_test.go` | Yes | Phase 3 | WS-C1 Cloud |
+| BG-010 | Cloud MCP streamable compatibility proof is incomplete | Planner/client integration | No direct proof for cloud SSE `GET`, `DELETE`, call alias, browser-origin handling, or SDK interop. | `medium` | `internal/cloud/mcp_server.go`, `internal/cloud/mcp_server_test.go`, `cmd/mcp-sdk-smoke` | Yes | Phase 4 | WS-P1 Planner/Clients |
+| BG-011 | Nested broker module tests do not compile | Secondary surface | `cmd/broker` builds, but its own test suite is broken. | `medium` | `cmd/broker/task_test.go` | No | Phase 6 | WS-S1 Secondary |
+| BG-012 | VS Code extension exists without meaningful repo proof | Secondary surface | Extension remains a code surface that is easy to overclaim. | `medium` | `extension/*` | No | Phase 6 | WS-S1 Secondary |
+| BG-013 | Agent-broker task sessions are in-memory only | Secondary surface | Restart orphaning makes it unsafe to include in the beta promise. | `medium` | `cmd/broker/main.go`, `cmd/broker/README.md` | No | Phase 6 | WS-S1 Secondary |
+| BG-014 | Release automation is manual-only | Release engineering | No repo CI pipeline is visible; public beta proof is still operator-driven. | `medium` | repo root, `Makefile`, scripts, missing `.github/` workflows | Yes | Phase 6 | WS-RE1 Release |
+| BG-015 | Historical internal docs contradict current repo truth | Release engineering | Older docs still imply โno cloudโ or overclaim completion/test stability. | `medium` | `docs/internal/*`, `docs/10_implementation_prompts.md` | Yes | Phase 6 | WS-RE1 Release |
+| BG-016 | Policy schema does not match runtime policy model | Local core | Weakens any stable external policy contract claim. | `medium` | `schemas/policy.schema.json`, `internal/domain/policy.go`, `internal/service/policy_service.go` | No | Phase 1 | WS-L1 Local Core |
+| BG-017 | Primary adapter proof is uneven beyond simulation | Local core | Codex/Claude/Qwen support is present, but release claims must stay narrow until proof is stronger. | `medium` | `internal/adapters/*`, `README.md`, smoke coverage | Yes | Phase 1 | WS-L1 Local Core |
+| BG-018 | Legacy same-run local parity evidence is conflicting | Local core | One delegated run failed, while the direct Phase 0 rerun passed. The path needs deterministic repeatability before being promised. | `medium` | `scripts/smoke_test_v1.sh`, `internal/service/run_service.go`, `cmd/orchestratorctl/main.go` | No | Phase 1 | WS-L1 Local Core |
+| BG-019 | Cloud docs overstate current smoke coverage for events | Docs / release truth | `docs/CLOUD.md` says `scripts/cloud_smoke.sh` covers events, but it does not. | `low` | `docs/CLOUD.md`, `scripts/cloud_smoke.sh` | No | Phase 6 | WS-RE1 Release |
+| BG-020 | Linker environment still emits `grpc` search-path warnings | Release engineering | Builds succeed, but the build environment carries noisy warning output. | `low` | local toolchain / build env | No | Phase 6 | WS-RE1 Release |
+
+## Phase 1 WS-C1 Update (2026-04-23)
+
+This section records the cloud-control outcomes from the Phase 1 WS-C1 execution round without rewriting the historical Phase 0 table above.
+
+| Gap | Status after WS-C1 | Evidence | Notes |
+| --- | --- | --- | --- |
+| BG-001 | `closed` | `TestTokenRevokeRequiresAuthorizedScopeAndRevokedTokenFailsAcrossCloudSurfaces`, `go test ./internal/cloud/... -count=1` | Revocation now loads the target token, enforces tenant scope against the target org/workspace/project, and revoked tokens fail both cloud HTTP and cloud MCP auth. |
+| BG-002 | `closed` | `TestEventsListingRespectsTokenTenantScope`, `make cloud-smoke` | Event listing is now tenant-scoped even when `installation_id` is omitted, and foreign-installation reads are rejected. |
+| BG-003 | `closed` | `internal/cloud/runtime_api_test.go`, composed cloud smoke with runtime HTTP | Nested runtime proxy routes now require the intended scopes (`steps:write`, `gates:read`, `runs:write`), and the relay bridge no longer widens caller scopes implicitly. |
+| BG-004 | `closed` | `TestAuditListingRespectsProjectScope`, `go test ./internal/cloud/... -count=1` | Audit listing now respects workspace/project scope instead of only filtering at org level. |
+| BG-009 | `closed` | composed cloud smoke with claimed runtime HTTP run/create + submit-task | Cloud runtime proof now includes a live claimed-instance path instead of claim/list-only coverage. |
+| BG-010 | `closed` for cloud-side proof | `internal/cloud/mcp_server_test.go`, composed cloud smoke with `CLOUD_SMOKE_MCP=1 CLOUD_SMOKE_SDK=1` | Cloud MCP now has direct proof for SSE `GET`, `DELETE`, call alias, origin handling, session ownership, revoked-token denial, and official Go SDK interop. Broader planner/client release labeling still belongs to WS-P1. |
+| BG-019 | `closed` | `docs/CLOUD.md`, `docs/CLOUD_SELF_HOST.md`, `scripts/cloud_smoke.sh` | Public docs now match the actual smoke coverage for events, runtime HTTP, cloud MCP, and optional SDK proof. |
+
+Cloud-adjacent items that remain outside WS-C1:
+
+- `make cloud-stack-smoke` is still required on a Docker-capable host for packaging/deployment proof and stays with WS-RE1.
+- Broader planner/client compatibility wording still belongs to WS-P1 even though the cloud-side MCP/SDK proof is now present.
+
+## Phase 2 WS-R1 Update (2026-04-23)
+
+This section records the relay/runtime outcomes from the Phase 2 WS-R1 execution round without rewriting the historical Phase 0 table above.
+
+| Gap | Status after WS-R1 | Evidence | Notes |
+| --- | --- | --- | --- |
+| BG-005 | `closed` | `TestShareInstanceByInstanceIDRequiresResolvableLocalInstance`, `TestRelayConnectorUnsharePrunesRoutabilityAndReshareRestoresIt`, `SMOKE_SCENARIOS=status,audit,share-control,mcp,mcp-sdk ./scripts/self_host_smoke.sh` | `share --instance-id` now refuses to persist `share=true` unless the selector resolves back to a healthy local daemon. Unshare removes relay visibility and routing; re-share by `instance_id` restores both. |
+
+Additional relay/runtime reductions landed during WS-R1:
+
+- Relay MCP now carries the authenticated planner principal directly for internal route calls, which closes the duplicate-token-name scope/instance replay bug on the relay MCP path.
+- Connector heartbeat handling now re-advertises when the live reachable shared-instance set changes even without a config edit, which keeps relay presence aligned when a shared daemon drops out and later returns.
+- Hub pruning now removes stale per-instance mappings for a replaced connector session instead of waiting for TTL or read-loop teardown.
+- Public self-host docs and smoke entrypoints now describe the real proof boundary for relay HTTP, relay MCP, official Go SDK interop, share-control, and multi-instance targeting.
+
+Relay/runtime items that remain outside WS-R1:
+
+- Broader planner/client compatibility freezing still belongs to WS-P1 even though relay-side HTTP/MCP/SDK proof is now materially stronger.
+- Full release packaging and clean-checkout repeatability still belong to WS-RE1.
+
+## Phase 3 WS-L1 Update (2026-04-23)
+
+This section records the local-core outcomes from the Phase 3 WS-L1 execution round without rewriting the historical Phase 0 table above.
+
+| Gap | Status after WS-L1 | Evidence | Notes |
+| --- | --- | --- | --- |
+| BG-018 | `closed` | `go test ./internal/app ./internal/service ./cmd/orchestratorctl -count=1`, `./scripts/smoke_test_v1.sh` x2, `make smoke` | WS-L1 fixed the local same-run wait/finalization race by making local `step wait` follow persisted step lifecycle state before returning the result payload. The legacy six-input smoke now auto-starts a temporary simulation daemon when needed and repeated cleanly twice. |
+| BG-017 | `closed` as a beta-claim blocker | `README.md`, `docs/SETUP.md`, `docs/internal/BETA_SUPPORT_CLASSIFICATION.md`, `go test ./internal/adapters/... ./internal/mcp -count=1` | WS-L1 did not fabricate live-adapter proof. Instead it froze the local adapter table truthfully: `codex` remains simulation-heavy, `claude` keeps narrow wrapper claims, `qwen` stays secondary, and daemon-local `/mcp/call` remains compatibility-only. |
+
+Additional local-core reductions landed during WS-L1:
+
+- `GetResultByStep` now overlays post-attempt step lifecycle truth, so gated, rejected, and manual-attention steps no longer report stale โcompletedโ results.
+- `RetryStep` now reconciles the parent run back to `running` immediately, and `orchestratorctl step retry` exposes the existing local retry route cleanly.
+- Public local docs now describe `/api/v1/compatibility` as a diagnostic surface rather than a support certificate and document the exact local smoke proof entrypoints.
+
+Local-core items that remain outside WS-L1:
+
+- BG-016 still remains as a non-beta-blocking schema/runtime policy drift item unless a later phase chooses to promote the external policy contract.
+- Repo-wide release/public repeatability still belongs to WS-RE1 even though the local proof matrix is now materially stronger.
+
+## Phase 4 WS-P1 Update (2026-04-23)
+
+This section records the planner/client outcomes from the Phase 4 WS-P1 execution round without rewriting the historical Phase 0 table above.
+
+| Gap | Status after WS-P1 | Evidence | Notes |
+| --- | --- | --- | --- |
+| BG-010 | `closed` end-to-end for planner/client release labeling | `go test ./internal/relay ./internal/cloud ./cmd/mcp-sdk-smoke -count=1`, `RELAY_CONFIG=... SMOKE_SCENARIOS=status,audit,share-control,mcp,mcp-sdk ./scripts/self_host_smoke.sh`, `CLOUD_RELAY_CONFIG=... CLOUD_RUNTIME_DAEMON_URL=http://127.0.0.1:8085 CLOUD_SMOKE_MCP=1 CLOUD_SMOKE_SDK=1 ./scripts/cloud_smoke.sh`, `docs/mcp/integrations.md`, `docs/mcp/cloud_tools.md` | WS-C1 had already closed the cloud-side code/proof gap. WS-P1 closed the remaining planner/client blocker by freezing the public/internal compatibility matrix, adding cloud MCP packaging parity, and keeping ChatGPT/Claude product paths at compatibility-only instead of overclaiming direct proof. |
+
+Additional planner/client reductions landed during WS-P1:
+
+- The public planner/client contract now names relay `/mcp` and cloud `/api/cloud/v1/mcp` as the canonical remote MCP session paths and pushes the `*/mcp/call` endpoints down to compatibility-only aliases.
+- Public docs now expose repo-proven generic relay/cloud HTTP and MCP entrypoints, plus checked-in Claude Code style `.mcp.json` examples for local tester packaging.
+- Internal support labels now distinguish `proven`, `expected-only`, and `compatibility-only` planner/client paths instead of treating all product-style integrations as vague docs-only expectations.
+
+Planner/client items that remain outside WS-P1:
+
+- Product-specific ChatGPT, Claude Code, Claude Desktop, Claude.ai, or Anthropic/OpenAI API publication workflows remain compatibility-only and are not direct repo proof.
+- Repo-wide release/public repeatability still belongs to WS-RE1 even though the planner/client proof matrix is now materially stronger.
+
+## Phase 5 WS-PC1 Update (2026-04-23)
+
+This section records the provider-connector outcomes from the Phase 5 WS-PC1 execution round without rewriting the historical Phase 0 table above.
+
+| Gap | Status after WS-PC1 | Evidence | Notes |
+| --- | --- | --- | --- |
+| BG-006 | `closed` | `TestStoreCreateConnectorEventPreservesRepeatedSourceEventHistory`, `TestWebhookHistoryPreservesRepeatedSourceEventIDs`, `go test ./internal/cloud/... -count=1` | Connector event history is now append-only. Migration 4 rebuilds `connector_events` without the overwrite-on-conflict constraint, and webhook/poll ingests now keep provider metadata instead of silently replacing prior rows. |
+| BG-007 | `closed` | `TestJiraWebhookRouteReturnsDeferredWithoutPersistingEvents`, `go test ./internal/cloud -run 'Test(ServerAdminAndConnectorFlows|WebhookHistoryPreservesRepeatedSourceEventIDs|JiraWebhookRouteReturnsDeferredWithoutPersistingEvents|ConnectorActionLogsCaptureRequestCompletionAndAuditDetails|WorkerRunOncePollsJiraAndPersistsSnapshot|StoreCreateConnectorEventPreservesRepeatedSourceEventHistory)' -count=1` | Jira remains polling-first. Routed webhook calls now return a truthful deferred/not-implemented response, do not persist events, and do not emit false-positive success audit rows. |
+| BG-008 | `closed` | `TestConnectorActionLogsCaptureRequestCompletionAndAuditDetails`, `go test ./internal/cloud/... -count=1` | Provider action logs now capture request payloads, response payloads, started/completed timestamps, and richer audit details. Webhook verification/deferment/normalize failures now also create explicit audit rows. |
+
+Additional provider truth frozen during WS-PC1:
+
+- Slack remains the strongest provider path with real install/bootstrap/local-test proof through routed tests and `make cloud-smoke`.
+- GitHub, GitLab, and Linear remain intentionally narrow operator/package surfaces even though validation and action code paths are directly proven in unit tests.
+- Jira remains polling-first only; action-only or webhook-driven Jira installs are not part of the beta promise.
+
+Provider items that remain outside WS-PC1:
+
+- live vendor-account proof for every provider remains out of scope for the current beta promise
+- provider-specific end-to-end smoke beyond Slack remains optional future depth, not a current beta blocker
+- a public API for listing connector action logs is still not part of the current beta contract
+
+## Phase 6 WS-RE1 Update (2026-04-23)
+
+This section records the release-engineering and public-testability outcomes from the Phase 6 WS-RE1 execution round without rewriting the historical Phase 0 table above.
+
+| Gap | Status after WS-RE1 | Evidence | Notes |
+| --- | --- | --- | --- |
+| BG-014 | `closed` | `.github/workflows/public-testability.yml`, `make verify-beta`, detached temporary `git worktree` rerun of `make build-supported && make verify-beta` | The repo now has a visible CI workflow plus explicit supported verification targets and a clean-checkout-friendly verification script. |
+| BG-015 | `closed` | `docs/internal/BETA_*.md`, `docs/10_implementation_prompts.md`, `docs/internal/GAP_AUDIT.md`, `docs/internal/PROGRESS.md`, `docs/internal/TASKS.md`, `docs/internal/IMPLEMENTATION_PLAN.md`, `docs/internal/cloud_v1_finish_log.md`, `docs/internal/v2_finish_log.md` | Frozen beta docs remain the current program truth, and older planning / backlog documents are now marked as historical instead of competing with current release guidance. |
+| BG-020 | `still open` | `make build-supported`, `make verify-beta`, detached temporary `git worktree` rerun | Builds and tests stay green, but the local linker environment still emits `/usr/local/opt/grpc/lib` search-path warnings. This remains noisy rather than beta-blocking. |
+
+Additional WS-RE1 reductions landed in this round:
+
+- `make verify-beta` is now a real repo-level verification command that self-starts the temporary relay/runtime proof instead of assuming hidden operator setup.
+- A new public tester guide freezes the supported tracks and exact commands in `docs/BETA_TESTING.md`.
+- Docker cloud image version metadata is now parameterized through compose/build args instead of living only as a hard-coded string in the Dockerfile.
+- Public README/setup/cloud/self-host docs now route testers to the correct track and state the real proof boundary for Docker baseline vs binary-native composed proof.
+
+Historical handoff items after WS-RE1:
+
+- final repo-wide beta confirmation had to rerun the frozen matrix once more without widening scope
+- Docker-backed packaging proof still depended on a Docker-capable host before the repo could claim it as directly re-verified in that finalization run
+
+## Phase 7 Final Confirmation Update (2026-04-23)
+
+Final beta confirmation reran the frozen matrix and closed the last repo-wide proof item:
+
+- `make build-supported` passed
+- `./scripts/smoke_test_v1.sh` passed twice, and `make smoke` passed
+- a fresh self-host smoke with `status,audit,share-control,multi-instance,mcp,mcp-sdk` passed
+- `go test ./internal/cloud/... -count=1` passed
+- `go test ./internal/relay ./internal/cloud ./cmd/mcp-sdk-smoke -count=1` passed
+- `make cloud-smoke` passed
+- the composed cloud smoke with `CLOUD_RELAY_CONFIG=... CLOUD_RUNTIME_DAEMON_URL=http://127.0.0.1:18085 CLOUD_SMOKE_MCP=1 CLOUD_SMOKE_SDK=1` passed
+- `make verify-beta` passed
+- the supported verification reran successfully from a detached temporary `git worktree` at the current `HEAD` via `make build-supported && make verify-beta`
+- `make cloud-stack-smoke` passed on a host with a live Docker daemon
+- the final-tree `make verify-beta-docker` rerun passed after the Phase 7 truth-normalization updates landed
+- local `/usr/local/opt/grpc/lib` linker warnings still appeared during build-oriented steps, but stayed non-blocking
+
+Current blocker truth after Phase 7:
+
+- no beta-blocking gaps remain open
+- BG-020 remains open as non-blocking build-noise only
+- BG-011, BG-012, BG-013, and BG-016 remain outside the beta promise unless later promoted
diff --git a/docs/internal/BETA_SUPPORT_CLASSIFICATION.md b/docs/internal/BETA_SUPPORT_CLASSIFICATION.md
new file mode 100644
index 0000000..9ddb9e3
--- /dev/null
+++ b/docs/internal/BETA_SUPPORT_CLASSIFICATION.md
@@ -0,0 +1,97 @@
+# Beta Support Classification
+
+This matrix separates current repo truth from the frozen beta contract.
+
+This is a final audited classification snapshot after Phase 7 beta confirmation. The beta contract labels below are frozen release truth, not forward-looking intent.
+
+Phase 7 note:
+
+- Beta was confirmed on 2026-04-23.
+- Every surface labeled `canonical` or `supported-beta target` below passed its frozen beta confirmation gate.
+
+## Label Legend
+
+- `canonical`: core product surface; must be right for beta.
+- `supported-beta target`: included in the frozen beta promise, but not treated as a canonical core surface.
+- `secondary`: useful but not part of the primary beta promise.
+- `compatibility`: kept for compatibility/admin reasons, not as the preferred contract.
+- `experimental`: shallow or intentionally non-primary.
+- `partial`: real code exists, but important proof or correctness is still missing.
+- `deferred`: intentionally outside the beta promise.
+
+## Local Runtime / Execution
+
+| Surface | Current label | Beta contract label | Proof level | Notes |
+| --- | --- | --- | --- | --- |
+| Daemon core (`orchestratord`, state machine, SQLite, worktrees, artifacts, validations, recovery) | `canonical` | `canonical` | Phase 3 local smoke + repo tests | WS-L1 re-proved the same-run local barrier and local evidence retrieval path. |
+| CLI (`orchestratorctl`) | `canonical` | `canonical` | Phase 3 local smoke + repo tests | WS-L1 aligned `step wait`, `step result`, and `step retry` with the persisted local lifecycle. |
+| Instance identity (`/api/v1/instance`, manifest, `orchestratorctl instance`) | `canonical` | `canonical` | Phase 3 local smoke + repo tests | Proven local identity/operator surface. |
+| Simulation mode | `canonical` | `canonical` | Phase 3 local smoke + repo tests | Primary deterministic proof path. |
+| Task/result schemas | `canonical` | `canonical` | Repo tests + runtime use | Main task/result contracts are real. |
+| `/api/v1/compatibility` | `compatibility` | `compatibility` | Repo tests + runtime use | Diagnostic surface for runtime availability/binding state, not a beta-support certificate. |
+| Policy schema + policy runtime contract | `partial` | `partial` | Code + drift audit | Schema/runtime drift still exists. |
+| `codex` adapter | `partial` | `supported-beta target` | Phase 3 local smoke + conformance tests | Primary intended local beta adapter, but current repo proof is still simulation-heavy and not live-binary proven. |
+| `claude` adapter | `partial` | `supported-beta target` | Fake-binary tests | Good wrapper proof, no live authenticated proof. |
+| `qwen` adapter | `partial` | `secondary` | Conformance tests | Kept, not primary beta promise, and still simulation-only in checked-in proof. |
+| `ide-chat` adapter | `experimental` | `deferred` | Code only/manual proxy model | Not a stable local execution contract. |
+| `openclaw-acpx` adapter | `experimental` | `deferred` | Unit tests only | Explicitly experimental. |
+| `antigravity` + `antigravity-broker` adapters | `partial` | `secondary` | Unit/integration tests | Environment-specific and not required for beta. |
+
+## Runtime Connectivity / Control
+
+| Surface | Current label | Beta contract label | Proof level | Notes |
+| --- | --- | --- | --- | --- |
+| Relay HTTP planner API | `partial` | `supported-beta target` | Self-host smoke + repo tests | Phase 2 re-proved relay HTTP over share-control and multi-instance self-host smoke. |
+| Connector enrollment/session/auth | `partial` | `supported-beta target` | Self-host smoke + repo tests | Phase 2 hardened live-set re-advertise and stale reconnect pruning without broad auth-model changes. |
+| Connector discover/list/status/share/unshare | `partial` | `supported-beta target` | Self-host smoke + repo tests | Phase 2 fixed `share --instance-id` so unresolved local instances fail loudly instead of persisting fake shared state. |
+| Relay audit/admin status/connectors/instances | `partial` | `supported-beta target` | Self-host smoke + repo tests | Honest docs remain in place and self-host smoke now exercises the share-control/admin view more directly. |
+| Best-effort abort semantics | `compatibility` | `compatibility` | README + code | Keep, but do not overclaim hard cancellation. |
+
+## Planner / Client Integrations
+
+| Surface | Current label | Beta contract label | WS-P1 proof status | Notes |
+| --- | --- | --- | --- | --- |
+| Generic relay HTTP client path | `partial` | `supported-beta target` | `proven` | Narrow bearer-token HTTP planner flow is re-proven by relay integration tests and the current self-host smoke path. |
+| Generic relay MCP client path | `partial` | `supported-beta target` | `proven` | Canonical endpoint is `/mcp`; Phase 2 and Phase 4 both re-proved initialize/list/call, SSE bootstrap, aliasing, and scoped routing. |
+| Official Go SDK path to relay MCP | `partial` | `supported-beta target` | `proven` | Explicitly proven for relay MCP only, not for relay REST HTTP. |
+| Generic cloud HTTP client path | `partial` | `supported-beta target` | `proven` | Composed cloud smoke and runtime tests now cover tenant-scoped run create/get plus submit-task over cloud HTTP. |
+| Generic cloud MCP client path | `partial` | `supported-beta target` | `proven` | Canonical endpoint is `/api/cloud/v1/mcp`; cloud-side initialize/list/call, stream/delete, aliasing, and token-bound session behavior are directly proven. |
+| Official Go SDK path to cloud MCP | `partial` | `supported-beta target` | `proven` | Explicitly proven for cloud MCP only, not for cloud REST HTTP. |
+| Generic MCP clients beyond the manual JSON-RPC callers and official Go SDK helper | `compatibility` | `compatibility` | `expected-only` | Codencer's MCP protocol surface is proven, but product-specific desktop/client interoperability is not claimed universally. |
+| ChatGPT-style planner path via relay/cloud | `compatibility` | `compatibility` | `compatibility-only` | Remote MCP pattern only. Public docs now point to the canonical relay/cloud MCP endpoints without claiming repo-executed ChatGPT setup. |
+| Claude-style planner path via relay/cloud | `compatibility` | `compatibility` | `compatibility-only` | Remote MCP pattern only and explicitly separate from the local `claude` execution adapter proof. |
+| Daemon-local `/mcp/call` | `compatibility` | `compatibility` | `compatibility-only` | Local compatibility/admin bridge, not the public remote planner MCP contract. |
+| Relay `/mcp/call` alias | `compatibility` | `compatibility` | `compatibility-only` | Repo-tested POST alias, but `/mcp` remains the canonical session path. |
+| Cloud `/api/cloud/v1/mcp/call` alias | `compatibility` | `compatibility` | `compatibility-only` | Repo-tested POST alias, but `/api/cloud/v1/mcp` remains the canonical session path. |
+| VS Code extension | `partial` | `secondary` | Code only | Exists, but proof is too thin for beta promise. |
+| `agent-broker` Windows bridge | `experimental` | `secondary` | Build only; tests broken in nested module | Keep out of beta promise. |
+
+## Cloud Control Plane
+
+| Surface | Current label | Beta contract label | Proof level | Notes |
+| --- | --- | --- | --- | --- |
+| Org/workspace/project/membership/token APIs | `partial` | `supported-beta target` | Cloud smoke + repo tests | Phase 1 fixed token-revocation scope and revoked-token denial across cloud surfaces. |
+| Installation/event/audit APIs | `partial` | `supported-beta target` | Cloud smoke + repo tests | Phase 1 fixed event and audit tenant filtering and added focused scope tests. |
+| Runtime connector claim/list/enable/disable/sync | `partial` | `supported-beta target` | Repo tests + composed cloud smoke | Claimed-runtime control is now proven in a live composed flow. |
+| Runtime instance registry/visibility | `partial` | `supported-beta target` | Repo tests + composed cloud smoke | Phase 1 added direct proof that claimed instances remain tenant-scoped and visible through cloud runtime control. |
+| Cloud runtime HTTP proxy | `partial` | `supported-beta target` | Repo tests + composed cloud smoke | Phase 1 hardened nested route scopes and added live run/create/get + submit-task proof. |
+| Cloud MCP | `partial` | `supported-beta target` | Repo tests + composed cloud smoke + SDK smoke | Phase 1 aligned cloud MCP auth/session/origin behavior with the cloud HTTP tenant model. |
+| Cloud worker (`codencer-cloudworkerd`) | `partial` | `supported-beta target` | Repo tests | Jira polling-first only. |
+| Docker self-host baseline (`deploy/cloud`) | `partial` | `supported-beta target` | Compose config + repo-level `make verify-beta` + direct `make cloud-stack-smoke` proof components | Final beta confirmation re-proved the Docker baseline on a host with a live Docker daemon. The Docker stack remains a narrow self-host baseline rather than a managed SaaS deployment story. |
+
+## Provider Connectors
+
+| Surface | Current label | Beta contract label | Proof level | Notes |
+| --- | --- | --- | --- | --- |
+| GitHub connector | `partial` | `supported-beta target` | Unit tests + provider matrix freeze | Validation, actions, and status are proven; install/bootstrap, routed ingest, audit depth, and local operator packaging remain narrow. |
+| GitLab connector | `partial` | `supported-beta target` | Unit tests + provider matrix freeze | Validation, actions, and status are proven; install/bootstrap, routed ingest, audit depth, and local operator packaging remain narrow. |
+| Jira connector | `partial` | `supported-beta target` | Unit tests + worker tests + router deferment test | Polling-first ingest is real, webhook ingest is explicitly deferred, and routed webhook calls now fail truthfully instead of silently ingesting. |
+| Linear connector | `partial` | `supported-beta target` | Unit tests + provider matrix freeze | Validation and action coverage are real, but install/bootstrap, routed ingest, audit depth, and local operator packaging remain narrow. |
+| Slack connector | `partial` | `supported-beta target` | Unit tests + router tests + cloud smoke | Strongest current provider path: install/bootstrap/local-test proof is real, but the overall surface is still intentionally narrow rather than marketplace-complete. |
+
+## Historical / Internal Surfaces
+
+| Surface | Current label | Beta contract label | Proof level | Notes |
+| --- | --- | --- | --- | --- |
+| Older internal progress/plan docs | `deferred` | `deferred` | Audit only | Historical context, not release truth. |
+| Historical โno cloudโ guidance | `deferred` | `deferred` | Audit only | No longer matches repo contents. |
diff --git a/docs/internal/BETA_VERIFICATION_MATRIX.md b/docs/internal/BETA_VERIFICATION_MATRIX.md
new file mode 100644
index 0000000..de858aa
--- /dev/null
+++ b/docs/internal/BETA_VERIFICATION_MATRIX.md
@@ -0,0 +1,126 @@
+# Beta Verification Matrix
+
+This matrix records the historical phase-by-phase evidence plus the final Phase 7 confirmation pass that locked beta on 2026-04-23.
+
+## Executed In Phase 0
+
+| Scenario | Command / method | Result | Proof type | Notes |
+| --- | --- | --- | --- | --- |
+| Main module test suite | `go test ./...` | Pass | Direct Phase 0 run | Main module only. |
+| Main binaries build | `make build` | Pass | Direct Phase 0 run | Builds daemon, CLI, relay, connector. |
+| Cloud binaries build | `make build-cloud` | Pass | Direct Phase 0 run | Builds `codencer-cloud*`. |
+| Broker binary build | `make build-broker` | Pass | Direct Phase 0 run | Nested module builds successfully. |
+| MCP SDK helper build | `make build-mcp-sdk-smoke` | Pass | Direct Phase 0 run | Helper binary available. |
+| Local smoke | `make smoke` | Pass | Direct Phase 0 run | Single-step simulation proof. |
+| Legacy six-input local smoke | `make start-sim && ./scripts/smoke_test_v1.sh && make stop` | Pass | Direct Phase 0 run | Re-run before beta because one delegated run reported a conflicting failure. |
+| Self-host relay/connector smoke | `./scripts/self_host_smoke.sh` with `status,audit` | Pass | Direct Phase 0 run | HTTP relay/runtime proof. |
+| Self-host relay MCP + SDK smoke | `SMOKE_SCENARIOS=status,audit,mcp,mcp-sdk ./scripts/self_host_smoke.sh` | Pass | Direct Phase 0 run | Relay MCP and official Go SDK proof. |
+| Cloud binary smoke | `make cloud-smoke` | Pass | Direct Phase 0 run | Bootstrap/status/install/audit proof. |
+| Docker compose config validation | `make cloud-stack-config` | Pass | Direct Phase 0 run | Compose baseline renders cleanly. |
+| Broker nested module tests | `cd cmd/broker && go test ./...` | Fail | Direct Phase 0 run | `task_test.go` syntax error blocks test proof. |
+
+## Executed In Phase 1 (WS-C1 Cloud)
+
+| Scenario | Command / method | Result | Proof type | Notes |
+| --- | --- | --- | --- | --- |
+| Cloud control-plane and scope regression suite | `go test ./internal/cloud/... -count=1` | Pass | Direct Phase 1 run | Covers token revoke scope, event/audit filtering, runtime proxy scope, cloud MCP parity, and cloud SDK interop tests. |
+| Cloud MCP SDK helper compile | `go test ./cmd/mcp-sdk-smoke -count=1` | Pass | Direct Phase 1 run | Confirms the helper binary still builds cleanly after the cloud proof updates. |
+| Baseline cloud smoke | `make cloud-smoke` | Pass | Direct Phase 1 run | Proves bootstrap, status, install, webhook ingest, events, audit, and worker-once behavior. |
+| Composed cloud runtime HTTP + MCP + SDK smoke | `CLOUD_RELAY_CONFIG=... CLOUD_RUNTIME_DAEMON_URL=... CLOUD_SMOKE_MCP=1 CLOUD_SMOKE_SDK=1 ./scripts/cloud_smoke.sh` | Pass | Direct Phase 1 run | Proves claimed runtime visibility, run create/get, submit-task, cloud MCP initialize/list/call, and official Go SDK interoperability in one composed flow. |
+| Docker compose cloud stack smoke | `make cloud-stack-smoke` | Blocked | Direct Phase 1 run | Docker CLI was present, but the local Docker daemon/socket was unavailable (`/Users/lookman/.docker/run/docker.sock`). |
+| Docker compose config validation | `make cloud-stack-config` | Pass | Direct Phase 1 run | Compose baseline still renders cleanly even though the local Docker daemon was unavailable. |
+
+## Executed In Phase 2 (WS-R1 Relay / Runtime)
+
+| Scenario | Command / method | Result | Proof type | Notes |
+| --- | --- | --- | --- | --- |
+| Relay + connector regression suite | `go test ./internal/connector ./internal/relay ./cmd/codencer-connectord -count=1` | Pass | Direct Phase 2 run | Covers share validation, live-set re-advertise, relay hub pruning, relay MCP parity, and relay HTTP integration. |
+| Relay MCP SDK helper compile | `go test ./cmd/mcp-sdk-smoke -count=1` | Pass | Direct Phase 2 run | Confirms the official SDK smoke helper still builds cleanly during WS-R1. |
+| Self-host smoke with share-control, MCP, and SDK | `PLANNER_TOKEN=... RELAY_CONFIG=... RELAY_URL=... DAEMON_URL=... SMOKE_SCENARIOS=status,audit,share-control,mcp,mcp-sdk ./scripts/self_host_smoke.sh` | Pass | Direct Phase 2 run | Proves enrollment, connector session, relay HTTP flow, unshare -> not routable, re-share by `instance_id` -> routable, manual relay MCP flow, and official Go SDK interop. |
+| Self-host smoke with multi-instance targeting | `PLANNER_TOKEN=... RELAY_CONFIG=... RELAY_URL=... DAEMON_URL=... SMOKE_SCENARIOS=status,audit,share-control,multi-instance,mcp,mcp-sdk ./scripts/self_host_smoke.sh` | Pass | Direct Phase 2 run | Re-exercises explicit instance targeting and route isolation through one connector serving two daemons. |
+| Self-host smoke script syntax | `bash -n scripts/self_host_smoke.sh` | Pass | Direct Phase 2 run | Guards the updated self-host smoke entrypoint. |
+
+## Executed In Phase 3 (WS-L1 Local Core)
+
+| Scenario | Command / method | Result | Proof type | Notes |
+| --- | --- | --- | --- | --- |
+| Local daemon/app/service/CLI regression suite | `go test ./internal/app ./internal/service ./cmd/orchestratorctl -count=1` | Pass | Direct Phase 3 run | Covers local wait/result truth, retry lifecycle, gate routes, evidence retrieval, and CLI wait/retry behavior. |
+| Local adapter + daemon-local MCP package suite | `go test ./internal/adapters/... ./internal/mcp -count=1` | Pass | Direct Phase 3 run | Confirms the current local adapter package coverage and keeps daemon-local MCP classified as compatibility-only. |
+| Legacy six-input smoke syntax | `bash -n scripts/smoke_test_v1.sh` | Pass | Direct Phase 3 run | Guards the updated local parity smoke entrypoint. |
+| Legacy six-input local smoke, run 1 | `./scripts/smoke_test_v1.sh` | Pass | Direct Phase 3 run | Auto-started a temporary simulation daemon, exercised six submit modes in one run, and fetched result/logs/artifacts/validations. |
+| Legacy six-input local smoke, run 2 | `./scripts/smoke_test_v1.sh` | Pass | Direct Phase 3 run | Repeated from a clean daemon lifecycle to close the conflicting Phase 0 parity evidence. |
+| Baseline local smoke | `make smoke` | Pass | Direct Phase 3 run | Re-proved the standard local happy path after the WS-L1 wait/result/retry changes. |
+
+## Executed In Phase 4 (WS-P1 Planner / Client Integrations)
+
+| Scenario | Command / method | Result | Proof type | Notes |
+| --- | --- | --- | --- | --- |
+| Relay/cloud MCP regression suite and SDK helper compile | `go test ./internal/relay ./internal/cloud ./cmd/mcp-sdk-smoke -count=1` | Pass | Direct Phase 4 run | Re-proved the canonical relay/cloud MCP surfaces and kept the official Go SDK helper green under the frozen planner/client contract. |
+| Self-host relay HTTP + MCP + SDK proof against the canonical relay surface | `RELAY_CONFIG=... SMOKE_SCENARIOS=status,audit,share-control,mcp,mcp-sdk ./scripts/self_host_smoke.sh` | Pass | Direct Phase 4 run | Re-proved relay `/api/v2` plus canonical relay `/mcp`, compatibility `/mcp/call`, official Go SDK interop, and share-control routing on a fresh PTY-held daemon and relay. |
+| Composed cloud HTTP + MCP + SDK proof against the canonical cloud surface | `CLOUD_RELAY_CONFIG=... CLOUD_RUNTIME_DAEMON_URL=http://127.0.0.1:8085 CLOUD_SMOKE_MCP=1 CLOUD_SMOKE_SDK=1 ./scripts/cloud_smoke.sh` | Pass | Direct Phase 4 run | Re-proved cloud `/api/cloud/v1/runtime/...`, canonical `/api/cloud/v1/mcp`, compatibility `/api/cloud/v1/mcp/call`, and official Go SDK interop on top of a live local daemon. |
+| Claude Code example config syntax | `python3 -m json.tool docs/mcp/examples/claude-code-relay.mcp.json` and `python3 -m json.tool docs/mcp/examples/claude-code-cloud.mcp.json` | Pass | Direct Phase 4 run | Confirms the checked-in project-scoped `.mcp.json` examples are valid JSON before they are referenced publicly. |
+
+## Executed In Phase 5 (WS-PC1 Provider Connectors)
+
+| Scenario | Command / method | Result | Proof type | Notes |
+| --- | --- | --- | --- | --- |
+| Provider connector regression suite | `go test ./internal/cloud/... -count=1` | Pass | Direct Phase 5 run | Re-proved append-only event history, Jira webhook deferment, action/audit attribution, provider worker polling, and the provider connector package suite after the WS-PC1 fixes. |
+| Provider connector package suite | `go test ./internal/cloud/connectors -count=1` | Pass | Direct Phase 5 run | Re-ran the mocked provider validation/webhook/action/status coverage for GitHub, GitLab, Jira, Linear, and Slack. |
+| Focused routed-provider proof | `go test ./internal/cloud -run 'Test(ServerAdminAndConnectorFlows|WebhookHistoryPreservesRepeatedSourceEventIDs|JiraWebhookRouteReturnsDeferredWithoutPersistingEvents|ConnectorActionLogsCaptureRequestCompletionAndAuditDetails|WorkerRunOncePollsJiraAndPersistsSnapshot|StoreCreateConnectorEventPreservesRepeatedSourceEventHistory)' -count=1` | Pass | Direct Phase 5 run | Directly proves Slack install/validate/webhook/event/audit flow, append-only repeated event history, Jira webhook deferment, Jira polling persistence, and connector action-log completeness. |
+| Cloud smoke after provider fixes | `make cloud-smoke` | Pass | Direct Phase 5 run | Re-proved the generic cloud install/validate/webhook/events/audit path on top of the updated provider connector persistence and audit behavior. |
+
+## Executed In Phase 6 (WS-RE1 Release Engineering / Public Testability)
+
+| Scenario | Command / method | Result | Proof type | Notes |
+| --- | --- | --- | --- | --- |
+| Supported build surface | `make build-supported` | Pass | Direct Phase 6 run | Builds the main local/relay binaries, cloud binaries, and the MCP SDK helper in one public target. |
+| Repo-level supported verification from working tree | `make verify-beta` | Pass | Direct Phase 6 run | Runs main-module tests, local smoke, self-host relay/runtime MCP+SDK smoke, cloud binary smoke, and Docker compose config validation with temporary bootstrap for the relay/runtime slice. |
+| Repo-level supported verification from a fresh location | detached temporary `git worktree` run of `make build-supported && make verify-beta` | Pass | Direct Phase 6 run | Re-proved the supported non-Docker path away from the active checkout while preserving the Git metadata required by worktree-sensitive checks. |
+| Docker compose config validation after deployment packaging changes | `make cloud-stack-config` | Pass | Direct Phase 6 run | Confirms the compose file still renders cleanly after version/build-arg wiring changes. |
+| Docker compose cloud stack smoke | `make cloud-stack-smoke` | Blocked | Environment-limited in Phase 6 | Docker CLI is available, but the local Docker daemon/socket is unavailable in this environment. This remains the only deployment proof deferred to final beta confirmation on a Docker-capable host. |
+
+## Executed In Phase 7 (Beta Confirmation)
+
+| Scenario | Command / method | Result | Proof type | Notes |
+| --- | --- | --- | --- | --- |
+| Supported build surface rerun | `make build-supported` | Pass | Direct Phase 7 run | Rebuilt the primary supported binaries and the MCP SDK helper before final confirmation. Local `/usr/local/opt/grpc/lib` linker warnings still appeared, but they were non-blocking. |
+| Legacy six-input local smoke rerun, run 1 | `./scripts/smoke_test_v1.sh` | Pass | Direct Phase 7 run | Re-ran the six-input local parity smoke from the documented entrypoint during final confirmation. |
+| Legacy six-input local smoke rerun, run 2 | `./scripts/smoke_test_v1.sh` | Pass | Direct Phase 7 run | Repeated the same smoke from a fresh daemon lifecycle to preserve the two-run parity proof. |
+| Baseline local smoke rerun | `make smoke` | Pass | Direct Phase 7 run | Re-ran the standard local happy path during final confirmation. |
+| Fresh self-host relay/runtime smoke | `PLANNER_TOKEN=... RELAY_CONFIG=... RELAY_URL=... DAEMON_URL=... SMOKE_SCENARIOS=status,audit,share-control,multi-instance,mcp,mcp-sdk ./scripts/self_host_smoke.sh` | Pass | Direct Phase 7 run | Re-proved relay HTTP, share-control, multi-instance routing isolation, canonical relay MCP, and official Go SDK interop. |
+| Cloud control-plane + provider regression suite rerun | `go test ./internal/cloud/... -count=1` | Pass | Direct Phase 7 run | Re-proved cloud scope, runtime proxy, cloud MCP, provider connector, and worker-path coverage. |
+| Relay/cloud MCP regression suite + SDK helper rerun | `go test ./internal/relay ./internal/cloud ./cmd/mcp-sdk-smoke -count=1` | Pass | Direct Phase 7 run | Re-proved the canonical relay/cloud MCP surfaces and kept the official Go SDK helper green. |
+| Baseline cloud smoke rerun | `make cloud-smoke` | Pass | Direct Phase 7 run | Re-proved bootstrap, status, install, events, audit, and worker-once behavior. |
+| Composed cloud runtime HTTP + MCP + SDK smoke rerun | `CLOUD_RELAY_CONFIG=... CLOUD_RUNTIME_DAEMON_URL=http://127.0.0.1:18085 CLOUD_SMOKE_MCP=1 CLOUD_SMOKE_SDK=1 ./scripts/cloud_smoke.sh` | Pass | Direct Phase 7 run | Re-proved claimed runtime visibility, cloud HTTP run/create/get + submit-task, canonical cloud MCP, and official Go SDK interoperability on the confirmation daemon URL. |
+| Repo-level supported verification rerun | `make verify-beta` | Pass | Direct Phase 7 run | Re-ran main-module tests, local smoke, self-host relay/runtime MCP+SDK smoke, cloud binary smoke, and Docker compose config validation from the active checkout. |
+| Fresh-location repo verification rerun | detached temporary `git worktree` run of `make build-supported && make verify-beta` | Pass | Direct Phase 7 run | Re-proved the supported non-Docker path from a fresh location while preserving the Git metadata required by worktree-sensitive checks. |
+| Docker compose cloud stack smoke rerun | `make cloud-stack-smoke` | Pass | Direct Phase 7 run | Re-proved the Docker-backed self-host cloud baseline on a host with a live Docker daemon. |
+| Final-tree Docker-inclusive repo verifier | `make verify-beta-docker` | Pass | Direct Phase 7 run | Re-ran the supported repo verifier plus the Docker-backed cloud stack baseline after the Phase 7 truth-normalization updates landed. |
+
+## Repo-Test Proof Already Present
+
+| Scenario | Proof source | Coverage level | Notes |
+| --- | --- | --- | --- |
+| Relay HTTP run/step/result/gate/artifact routing | `internal/relay/integration_test.go` | Strong | Strongest remote HTTP proof. |
+| Relay MCP streamable contract + call alias | `internal/relay/mcp_server_test.go` | Strong | Phase 2 added duplicate-token-name principal regression coverage and alias `tools/call` proof. |
+| Relay official Go SDK interop | `internal/relay/mcp_server_test.go` | Strong | Explicit SDK proof. |
+| Connector enrollment/session/challenge/heartbeat | `internal/connector/*_test.go` | Strong | Phase 2 added live reachable-set re-advertise proof beyond config-only changes. |
+| Cloud org/workspace/project/membership/token surfaces | `internal/cloud/*_test.go`, `cmd/codencer-cloudctl/main_test.go` | Strong | Phase 1 added revoke-target scope checks and revoked-token denial coverage. |
+| Cloud runtime registry / claim / scope | `internal/cloud/runtime_api_test.go` | Strong | Phase 1 added nested runtime-scope enforcement and live proxy proof. |
+| Cloud MCP streamable contract + call alias | `internal/cloud/mcp_server_test.go` | Strong | Includes initialize/list/call/stream/delete, origin handling, token-bound sessions, and revoked-token denial. |
+| Cloud official Go SDK interop | `internal/cloud/mcp_server_test.go`, `cmd/mcp-sdk-smoke` | Strong | Repo tests plus composed smoke now prove official Go SDK access to `/api/cloud/v1/mcp`. |
+| Provider connector mocks + routed provider proof | `internal/cloud/connectors/*_test.go`, `internal/cloud/worker_test.go`, `internal/cloud/router_test.go`, `internal/cloud/store_test.go` | Medium | Stronger than Phase 0 because routed install/webhook/action/history coverage now exists, but still mock/provider-fixture proof rather than live vendor-account proof. |
+
+## Remaining Beta-Gate Work
+
+No additional proof remains required for the frozen beta tracks. Final confirmation reran the working-tree matrix, the fresh-location detached `git worktree` matrix, the Docker-backed cloud stack baseline, and the final-tree Docker-inclusive verifier.
+
+## Out Of Beta Verification Scope
+
+These do not block beta unless later promoted into the beta promise:
+
+- VS Code extension runtime proof
+- `agent-broker` runtime proof
+- `ide-chat` end-to-end proof
+- live vendor-account proof for every provider
+- enterprise/cloud SaaS concerns outside self-host scope
diff --git a/docs/internal/BETA_WORKSTREAMS_AND_OWNERSHIP.md b/docs/internal/BETA_WORKSTREAMS_AND_OWNERSHIP.md
new file mode 100644
index 0000000..64b48d1
--- /dev/null
+++ b/docs/internal/BETA_WORKSTREAMS_AND_OWNERSHIP.md
@@ -0,0 +1,238 @@
+# Beta Workstreams And Ownership
+
+This document records the merge-safe work split used for beta finalization. It now serves as a historical ownership record after the Phase 7 beta confirmation pass.
+
+References below to "remaining work" describe the handoff state at the end of that workstream, not current open beta blockers.
+
+## Workstream Map
+
+| Workstream | Goal | Primary future owner | File ownership boundary | Merge order | Conflict notes |
+| --- | --- | --- | --- | --- | --- |
+| WS-L1 Local Core Finalization | Freeze the local daemon/CLI beta contract. | Local core lead | `cmd/orchestratord`, `cmd/orchestratorctl`, `internal/app`, `internal/service`, `internal/state`, `internal/storage/sqlite`, `internal/workspace`, `schemas`, local smoke scripts | 3 | Do not mix relay/cloud scope fixes into this stream. |
+| WS-R1 Relay + Connector Finalization | Freeze self-host remote runtime contract. | Relay lead | `cmd/codencer-relayd`, `cmd/codencer-connectord`, `internal/relay`, `internal/connector`, `docs/RELAY.md`, `docs/CONNECTOR.md`, `docs/SELF_HOST_REFERENCE.md`, `scripts/self_host_smoke.sh` | 2 | Owns share/discover/status semantics. |
+| WS-C1 Cloud Control Plane Finalization | Fix cloud scope/security bugs and runtime proxy proof. | Cloud lead | `cmd/codencer-cloudd`, `cmd/codencer-cloudctl`, `cmd/codencer-cloudworkerd`, `internal/cloud`, `deploy/cloud`, `docs/CLOUD.md`, `docs/CLOUD_SELF_HOST.md` | 1 | No provider connector code edits here unless coordinated with WS-PC1. |
+| WS-P1 Planner / Client Integration Finalization | Freeze client compatibility claims and proofs. | Planner/client lead | `internal/relay/mcp_*`, `internal/cloud/mcp_*`, `internal/mcp`, `cmd/mcp-sdk-smoke`, `docs/mcp/*` | 4 | Touch relay/cloud protocol tests, not their auth/business logic, unless coordinated. |
+| WS-PC1 Provider Connector Finalization | Make the connector platform narrowly beta-ready. | Provider platform lead | `internal/cloud/connectors/*`, `internal/cloud/worker.go`, `docs/CLOUD_CONNECTORS.md`, provider fixtures | 5 | Avoid editing `internal/cloud/router.go` concurrently with WS-C1 unless a shared API change is agreed first. |
+| WS-RE1 Release Engineering / Public Testability | Make the repo externally verifiable. | Release lead | `README.md`, `CHANGELOG.md`, `Makefile`, `docs/internal/*`, smoke scripts, release notes, packaging docs | 6 | Prefer landing after functional work stabilizes. |
+| WS-S1 Secondary / Compatibility Surfaces | Keep secondary surfaces truthful and non-blocking. | Secondary surfaces lead | `cmd/broker`, `extension`, `internal/adapters/ide`, `internal/adapters/antigravity`, `internal/adapters/openclaw_acpx`, `internal/adapters/qwen` | 7 | Read-only until primary beta blockers are cleared unless a bug is actively harmful. |
+
+## Subagent Assignment Pattern
+
+Future multi-agent implementation rounds should use a bounded split like this:
+
+- `Agent LC-A`: local core contracts, smoke, schema, and adapter support table.
+- `Agent RC-A`: relay/connector auth, share-control, and self-host smoke.
+- `Agent CL-A`: cloud scope/security fixes and runtime proxy proof.
+- `Agent PI-A`: relay/cloud MCP compatibility and SDK proof.
+- `Agent PC-A`: provider eventing, action logs, Jira deferment, provider smoke.
+- `Agent RE-A`: public docs, changelog, release checklist, packaging proof.
+- `Agent SX-A`: broker/extension/secondary cleanup only after primary streams settle.
+
+## Merge Order
+
+1. WS-C1
+ - close security and scope leaks first
+2. WS-R1
+ - tighten self-host runtime correctness
+3. WS-L1
+ - finalize the local contract once remote blockers are not changing shared semantics
+4. WS-P1
+ - freeze client compatibility claims on top of stable relay/cloud behavior
+5. WS-PC1
+ - finalize provider connector claims after cloud tenancy and audit semantics settle
+6. WS-RE1
+ - align public docs, smoke targets, and release material
+7. WS-S1
+ - handle secondary surfaces without blocking beta tracks
+
+## Merge Discipline
+
+- No two write-capable workers should edit `internal/cloud/router.go` or `internal/cloud/runtime_api.go` at the same time.
+- No two write-capable workers should edit `internal/relay/*` and `scripts/self_host_smoke.sh` at the same time.
+- `README.md` and `CHANGELOG.md` should be owned by WS-RE1 only after the support matrix is frozen.
+- `extension/*` and `cmd/broker/*` should stay read-only until the primary beta tracks are finished, unless a defect is actively harmful.
+- Provider connector workers should stay inside `internal/cloud/connectors/*` unless they have an approved shared API change with WS-C1.
+
+## Non-Removal Rule For Later Phases
+
+- Secondary or excluded surfaces are not deletion targets by default.
+- Remove code only if it is dead, dangerous, or actively misleading and not worth preserving.
+- Prefer classification plus truthful docs over cleanup-by-deletion.
+
+## WS-C1 Status Update (2026-04-23)
+
+WS-C1 is complete for code, focused tests, and binary-native smoke:
+
+- closed BG-001, BG-002, BG-003, BG-004, BG-009, BG-010, and BG-019
+- proved cloud runtime HTTP over a claimed runtime connector in composed mode
+- proved cloud MCP streamable behavior and official Go SDK interoperability in composed mode
+- updated the public cloud docs to match the actual smoke and proof entrypoints
+
+Historical handoff items after WS-C1:
+
+- Docker-backed packaging proof remained release-engineering/package work pending a Docker-capable host
+- broader planner/client release wording remained with WS-P1 even though the cloud-side MCP/SDK proof was already present
+
+Recommended handoff after WS-C1:
+
+1. continue with WS-R1 per the frozen merge order
+2. keep `internal/cloud/*` read-only unless a later workstream discovers a concrete cloud regression
+
+## WS-R1 Status Update (2026-04-23)
+
+WS-R1 is complete for code, focused tests, and self-host smoke:
+
+- closed BG-005
+- fixed relay MCP principal replay so internal MCP route calls keep the authenticated planner identity
+- hardened connector live-set presence so reachable shared-instance changes trigger a fresh advertise even without a config edit
+- tightened stale connector-session pruning in the relay hub
+- updated self-host smoke and public docs to match the actual relay/runtime proof boundary
+
+Proof landed in this round:
+
+- `go test ./internal/connector ./internal/relay ./cmd/codencer-connectord -count=1`
+- `go test ./cmd/mcp-sdk-smoke -count=1`
+- self-host smoke with `status,audit,share-control,mcp,mcp-sdk`
+- self-host smoke with `status,audit,share-control,multi-instance,mcp,mcp-sdk`
+
+Remaining work that still touches relay/runtime but is not a WS-R1 blocker:
+
+- broader planner/client compatibility freezing still belongs to WS-P1
+- release packaging and clean-checkout repeatability still belong to WS-RE1
+
+Recommended handoff after WS-R1:
+
+1. continue with WS-L1 or WS-P1 per the frozen merge order and current release needs
+2. keep `internal/relay/*` and `internal/connector/*` read-only unless a later workstream discovers a concrete regression
+
+## WS-L1 Status Update (2026-04-23)
+
+WS-L1 is complete for code, focused tests, and local smoke:
+
+- closed BG-018 by fixing the local same-run wait/finalization race and re-proving the legacy six-input smoke twice
+- closed BG-017 as a beta-claim blocker by freezing the local adapter support table to the actual repo proof boundary
+- aligned local `step result`, `step wait`, and `step retry` with the persisted local lifecycle truth
+- updated local public docs so `/api/v1/compatibility`, daemon-local MCP, and the local smoke entrypoints are described honestly
+
+Proof landed in this round:
+
+- `go test ./internal/app ./internal/service ./cmd/orchestratorctl -count=1`
+- `go test ./internal/adapters/... ./internal/mcp -count=1`
+- `bash -n scripts/smoke_test_v1.sh`
+- `./scripts/smoke_test_v1.sh` (twice)
+- `make smoke`
+
+Remaining work that still touches local-adjacent truth but is not a WS-L1 blocker:
+
+- BG-016 policy schema/runtime drift remains outside the current beta promise unless later promoted
+- repo-wide release/public repeatability still belongs to WS-RE1
+
+Recommended handoff after WS-L1:
+
+1. continue with WS-P1 per the frozen merge order
+2. keep `internal/app/*`, `internal/service/*`, and local smoke scripts read-only unless a later workstream discovers a concrete local regression
+
+## WS-P1 Status Update (2026-04-23)
+
+WS-P1 is complete for planner/client contract freezing, focused verification, and public packaging docs:
+
+- closed the remaining planner/client blocker around ambiguous compatibility wording after BG-010's cloud-side proof had already landed in WS-C1
+- froze the public and internal planner/client matrix to `proven`, `expected-only`, `compatibility-only`, and `unsupported` claims that match current code and smoke
+- added cloud MCP packaging parity with a dedicated public tools page plus generic HTTP/MCP examples
+- added checked-in Claude Code style `.mcp.json` examples while keeping ChatGPT-style and Claude-style product integrations at compatibility-only
+
+Proof landed in this round:
+
+- `go test ./internal/relay ./internal/cloud ./cmd/mcp-sdk-smoke -count=1`
+- `RELAY_CONFIG=... SMOKE_SCENARIOS=status,audit,share-control,mcp,mcp-sdk ./scripts/self_host_smoke.sh`
+- `CLOUD_RELAY_CONFIG=... CLOUD_RUNTIME_DAEMON_URL=http://127.0.0.1:8085 CLOUD_SMOKE_MCP=1 CLOUD_SMOKE_SDK=1 ./scripts/cloud_smoke.sh`
+- `python3 -m json.tool docs/mcp/examples/claude-code-relay.mcp.json`
+- `python3 -m json.tool docs/mcp/examples/claude-code-cloud.mcp.json`
+
+Remaining work that still touches planner/client-adjacent truth but is not a WS-P1 blocker:
+
+- product-specific ChatGPT, Claude Code, Claude Desktop, Claude.ai, or Anthropic/OpenAI API publication workflows remain compatibility-only unless a later release phase chooses to exercise them directly
+- repo-wide release/public repeatability still belongs to WS-RE1
+
+Recommended handoff after WS-P1:
+
+1. continue with WS-PC1 per the frozen merge order
+2. keep `internal/relay/mcp_*`, `internal/cloud/mcp_*`, `cmd/mcp-sdk-smoke`, and `docs/mcp/*` read-only unless a later workstream discovers a concrete regression
+
+## WS-PC1 Status Update (2026-04-23)
+
+WS-PC1 is complete for provider code, focused tests, and public/internal truth freezing:
+
+- closed BG-006, BG-007, and BG-008
+- rebuilt connector event storage as append-only history instead of overwrite-on-conflict
+- enforced Jira webhook deferment truthfully at the routed webhook surface
+- enriched provider action logs and audit details so routed provider operations are attributable enough for beta testing
+- updated the public provider docs and internal beta matrices to match the narrow provider matrix now proven by the repo
+
+Proof landed in this round:
+
+- `go test ./internal/cloud/... -count=1`
+- `go test ./internal/cloud/connectors -count=1`
+- `go test ./internal/cloud -run 'Test(ServerAdminAndConnectorFlows|WebhookHistoryPreservesRepeatedSourceEventIDs|JiraWebhookRouteReturnsDeferredWithoutPersistingEvents|ConnectorActionLogsCaptureRequestCompletionAndAuditDetails|WorkerRunOncePollsJiraAndPersistsSnapshot|StoreCreateConnectorEventPreservesRepeatedSourceEventHistory)' -count=1`
+- `make cloud-smoke`
+
+Remaining work that still touches provider-adjacent truth but is not a WS-PC1 blocker:
+
+- live vendor-account proof for every provider remains outside the current beta promise
+- provider-specific end-to-end smoke beyond Slack remains optional future depth, not a current blocker
+- repo-wide release/public repeatability still belongs to WS-RE1
+
+Recommended handoff after WS-PC1:
+
+1. continue with WS-RE1 per the frozen merge order
+2. keep `internal/cloud/connectors/*`, `internal/cloud/worker.go`, and provider docs read-only unless a later workstream discovers a concrete regression
+
+## WS-RE1 Status Update (2026-04-23)
+
+WS-RE1 is complete for public tester routing, repo-level verification entrypoints, CI visibility, and non-Docker repeatability:
+
+- closed BG-014 by adding a visible GitHub Actions workflow for the supported public verification path
+- closed BG-015 by making the frozen beta docs the current release truth and marking older planning/backlog docs as historical
+- added `make build-supported`, `make verify-beta`, and `make verify-beta-docker` as explicit repo-level verification commands
+- added `scripts/verify_beta.sh` so the supported non-Docker verification path self-starts the temporary relay/runtime proof instead of assuming hidden setup
+- updated public docs so local, relay/runtime, cloud, planner/client, and provider testers route to the correct track immediately
+
+Proof landed in this round:
+
+- `make build-supported`
+- `make verify-beta`
+- detached temporary `git worktree` run of `make build-supported && make verify-beta`
+- `make cloud-stack-config`
+
+Historical handoff items after WS-RE1:
+
+- final beta confirmation had to rerun the frozen matrix one more time without widening scope
+- Docker-backed packaging proof still depended on a Docker-capable host for direct re-confirmation
+
+Recommended handoff after WS-RE1:
+
+1. continue with Phase 7 beta confirmation only
+2. keep the beta-track docs, verify targets, and CI workflow aligned with the frozen matrix during final confirmation
+
+## Phase 7 Status Update (2026-04-23)
+
+Phase 7 is complete:
+
+- reran the frozen beta matrix from the working tree
+- reran `./scripts/smoke_test_v1.sh` twice and `make smoke`
+- reran the fresh self-host smoke with `status,audit,share-control,multi-instance,mcp,mcp-sdk`
+- reran `go test ./internal/cloud/... -count=1`
+- reran `go test ./internal/relay ./internal/cloud ./cmd/mcp-sdk-smoke -count=1`
+- reran `make cloud-smoke`
+- reran the composed cloud smoke with `CLOUD_RELAY_CONFIG=... CLOUD_RUNTIME_DAEMON_URL=http://127.0.0.1:18085 CLOUD_SMOKE_MCP=1 CLOUD_SMOKE_SDK=1`
+- reran `make verify-beta` from the working tree
+- reran the supported verification from a detached temporary `git worktree` at the current `HEAD`
+- reran `make cloud-stack-smoke` successfully on a live Docker daemon host
+- reran `make verify-beta-docker` successfully on the final tree after the Phase 7 truth-normalization updates landed
+- observed the usual local `/usr/local/opt/grpc/lib` linker warnings without any blocking failures
+- confirmed the repo as `v0.2.0-beta`
+
+Program status after beta confirmation:
+
+- the primary beta workstreams are complete
+- future work should be treated as post-beta maintenance, deferred-surface cleanup, or deeper proof beyond the current beta promise
diff --git a/docs/internal/GAP_AUDIT.md b/docs/internal/GAP_AUDIT.md
index 780b75b..f498d99 100644
--- a/docs/internal/GAP_AUDIT.md
+++ b/docs/internal/GAP_AUDIT.md
@@ -2,42 +2,45 @@
> [!WARNING]
> **INTERNAL DEVELOPER DOCUMENT**: This file is for project maintainers and contains technical debt audits, task backlogs, and roadmap tracking.
> For the official **User Guide**, please refer to the [README.md](../../README.md).
+>
+> **Historical status note**: This file is legacy audit context. It is not the current release contract. Use the frozen `BETA_*` docs in this directory for current beta truth.
## Current Reality
+- **Repo Status**: [ALIGNED] The current repository truth is **`v0.2.0-alpha`** and **open-source alpha for the v2 local/self-host path**.
- **Lifecycle Meaning Cleanup**: [RESOLVED] Explicitly defined Run (Session), Step (Planner Unit), and Attempt (Execution Try) in domain code and README. Verified that no bridge-side decision logic is implied.
- **Terminology Inconsistency**: [RESOLVED] Renamed all outcome indicators to `State` (RunState, StepState, Result.State) for uniform operator experience.
- **Ergonomics**: [RESOLVED] Tightened the `submit` -> `wait` -> `result` sequence and established the **Canonical Local Runbook** in `EXAMPLES.md`.
- **Trust & Transparency**: [RESOLVED] Added "Known Limitations" and clarified the distinction between simulation and real-mode execution in README.
-- **Release Surface**: [RESOLVED] Performed a unified v1 Truth-Pass across all README, setup, examples, and guide documentation.
-- **OpenClaw Status**: [ALIGNED] Maintained as Experimental Alpha-tier for v1.0. Future promotion to stable requires sustained user verification.
+- **Release Surface**: [RESOLVED] Performed a unified truth-pass across README, setup, examples, and guide documentation for the current v2 self-host alpha surface.
+- **OpenClaw Status**: [ALIGNED] Maintained as Experimental Alpha-tier for the current `v0.2.0-alpha` repo state. Any future promotion requires sustained user verification.
## Feature Status Matrix
| Component | Status | Implementation Type | Notes |
| :--- | :--- | :--- | :--- |
-| **Orchestration Core** | โ
**Ready (Stable)** | Native (SQLite) | Persistent ledger, state machine, and Git Worktrees. |
-| **CLI & MCP Layer** | โ
**Ready (Stable)** | Native | Human-readable hints, logs, and structured JSON. |
-| **Codex Adapter** | โ
**Ready (Stable)** | CLI Wrapper | High-fidelity relay with artifact harvesting. |
+| **Orchestration Core** | โ
**Implemented (Alpha)** | Native (SQLite) | Persistent ledger, state machine, and Git Worktrees. |
+| **CLI & MCP Layer** | โ
**Implemented (Alpha)** | Native | Human-readable hints, logs, and structured JSON. |
+| **Codex Adapter** | โ
**Implemented (Alpha)** | CLI Wrapper | High-fidelity relay with artifact harvesting. |
| **OpenClaw Adapter** | ๐งช **Experimental (Alpha)** | ACPX Wrapper | Functional alpha; basic lifecycle tracking. |
-| **Claude Adapter** | ๐ข **Supported (Beta)** | CLI Wrapper | Uses `claude -p --output-format json` with stdin prompt delivery, cwd-based execution, synthesized result mapping, and fake-binary integration coverage. Live authenticated Claude service calls are not exercised in repo tests. |
-| **Qwen Adapter** | ๐ก **Functional** | CLI Wrapper | Basic subprocess wrapper; narrower evidence extraction than Codex/Claude. |
-| **Simulation Mode** | โ
**Ready (Stable)** | Native | Robust stubs for orchestrator validation. |
+| **Claude Adapter** | ๐ข **Implemented Wrapper (Alpha)** | CLI Wrapper | Uses `claude -p --output-format json` with stdin prompt delivery, cwd-based execution, synthesized result mapping, and fake-binary integration coverage. Live authenticated Claude service calls are not exercised in repo tests. |
+| **Qwen Adapter** | ๐ก **Functional (Alpha)** | CLI Wrapper | Basic subprocess wrapper; narrower evidence extraction than Codex/Claude. |
+| **Simulation Mode** | โ
**Implemented (Alpha)** | Native | Robust stubs for orchestrator validation. |
| **Adaptive Routing** | ๐งช **Prototype** | Heuristic | Static fallback chain; not yet benchmark-driven. |
-| **Governance** | โ
**Ready (Stable)** | Manual | MIT Licensed; `CONTRIBUTING.md` authored. |
-| **Diagnostics** | โ
**Ready (Stable)** | CLI | `doctor` command verifies versions and environment. |
+| **Governance** | โ
**Implemented (Alpha)** | Manual | MIT Licensed; `CONTRIBUTING.md` authored. |
+| **Diagnostics** | โ
**Implemented (Alpha)** | CLI | `doctor` command verifies versions and environment. |
## Known Technical Debt & Limitations
- **Adaptive Routing**: Routing is currently based on a static heuristic chain; benchmark-driven optimization is documented but not dynamic.
- **Process Introspection**: CLI-wrapped adapters provide limited visibility beyond standard streams.
- **Simulation Limits**: Simulation Mode stubs all actions; it validates the orchestrator's state-machine but does not test real agent logic.
-## V1 Publication Audit (Phase V1.F3)
+## Alpha Publication Audit (Historical V1.F3 Tracking)
### ๐จ Critical Publication Blockers (Must Fix)
1. **LICENSE**: โ
RESOLVED (MIT).
2. **CONTRIBUTING.md**: โ
RESOLVED.
3. **Repository Noise**: โ
RESOLVED (`codencer.db` removed/ignored).
-4. **Makefile Version**: โ
RESOLVED (`v1.0-release-candidate`).
+4. **Makefile Version**: โ
RESOLVED (`v0.2.0-alpha`).
5. **Setup Reproducibility**: โ
RESOLVED (`make setup build` verified).
### ๐ก Trust & Readability Gaps (Should Fix)
@@ -45,12 +48,12 @@
2. **Internal Documentation Noise**: โ
RESOLVED (Upgraded headers and README navigation).
3. **Example Parity**: Ensure `examples/tasks/*.yaml` are 100% compliant with the latest schema.
-### ๐ Ready for Release Finalization
+### ๐ Ready for Alpha Publication Hardening
1. **Governance**: โ
RESOLVED (MIT & CONTRIBUTING.md).
2. **Health**: Enhance `doctor` with version checks for `git` and `sqlite3`.
3. **Documentation Quality Audit**: โ
PASS (Rendering issues fixed; Quickstart aligned).
-## V1 Publication Readiness Audit (Batch V1.F5)
+## Alpha Publication Readiness Audit (Historical V1.F5 Tracking)
### ๐น Current First-Run Path
1. `make setup build`: Creates directories and compiles binaries.
@@ -75,10 +78,10 @@
---
- [x] Audit Trust & Readiness Alignment (Final Alignment Complete)
-- [x] V1 Publication Readiness Audit (Batch V1.F5 Complete)
+- [x] Alpha Publication Readiness Audit (Historical Batch V1.F5 Complete)
- [x] Harden `doctor` with binary version checking (Batch V1.R1 Complete)
- [x] Align Smoke Test with modern CLI ergonomics (Batch V1.R1 Complete)
- [x] Batch R2: Final Metadata & Release Notes (Complete)
- - [x] Update version strings to `v1.0-release-candidate`
+ - [x] Update version strings to `v0.2.0-alpha`
- [x] Create `CHANGELOG.md`
- [x] Final Sanity Audit
diff --git a/docs/internal/IMPLEMENTATION_PLAN.md b/docs/internal/IMPLEMENTATION_PLAN.md
index 6abb98b..d8d377d 100644
--- a/docs/internal/IMPLEMENTATION_PLAN.md
+++ b/docs/internal/IMPLEMENTATION_PLAN.md
@@ -1,5 +1,9 @@
# Codencer Implementation Plan
+> [!WARNING]
+> **HISTORICAL PLANNING DOCUMENT**: This plan reflects an earlier local-first repo shape and is not current release truth.
+> Use the frozen `BETA_*` docs in this directory for the current beta contract, verification matrix, and workstream state.
+
## Overview
Codencer is a local orchestration bridge for coding agents. It separates architectural planning from implementation execution, acting as a deterministic control plane that manages runs, state, policy gates, and artifacts.
diff --git a/docs/internal/PROGRESS.md b/docs/internal/PROGRESS.md
index b723171..6f5c262 100644
--- a/docs/internal/PROGRESS.md
+++ b/docs/internal/PROGRESS.md
@@ -2,6 +2,8 @@
> [!WARNING]
> **INTERNAL DEVELOPER DOCUMENT**: This file is for project maintainers and contains technical debt audits, task backlogs, and roadmap tracking.
> For the official **User Guide**, please refer to the [README.md](../../README.md).
+>
+> **Historical status note**: This progress log is legacy context. It is not the current release contract. Use the frozen `BETA_*` docs in this directory for current beta truth.
## Phase 1: MVP Foundation (Completed)
- [x] Daemon bootstrap & SQLite Ledger
diff --git a/docs/internal/TASKS.md b/docs/internal/TASKS.md
index 0c8832b..98f440e 100644
--- a/docs/internal/TASKS.md
+++ b/docs/internal/TASKS.md
@@ -2,6 +2,8 @@
> [!WARNING]
> **INTERNAL DEVELOPER DOCUMENT**: This file is for project maintainers and contains technical debt audits, task backlogs, and roadmap tracking.
> For the official **User Guide**, please refer to the [README.md](../../README.md).
+>
+> **Historical status note**: This backlog is legacy context. It is not the current release contract. Use the frozen `BETA_*` docs in this directory for current beta truth.
## Priority 1 โ Refactor and strengthen orchestration runtime [COMPLETE]
- [x] Extract orchestration workflow out of monolithic `DispatchStep()`.
diff --git a/docs/internal/cloud_v1_finish_log.md b/docs/internal/cloud_v1_finish_log.md
new file mode 100644
index 0000000..4e148ea
--- /dev/null
+++ b/docs/internal/cloud_v1_finish_log.md
@@ -0,0 +1,539 @@
+# Codencer Cloud V1 Finish Log
+
+> [!WARNING]
+> **HISTORICAL FINISH LOG**: This log records an earlier cloud hardening pass and is not the current release contract.
+> Use the frozen `BETA_*` docs in this directory for current beta truth and remaining work.
+
+Last updated: 2026-04-15
+
+## Current Cloud Hardening Pass
+
+Mission for this pass:
+
+- add a cloud-scoped canonical remote surface decision and implementation
+- harden tenancy with memberships, roles, and better audit attribution
+- deepen the priority connector lifecycle without adding more connector breadth
+- add a real Docker-based self-host baseline and deployment smoke
+
+Exact blockers locked for this pass:
+
+1. Cloud-scoped runtime control had no cloud MCP surface
+2. Tenancy lacked first-class memberships, roles, and ownership semantics
+3. Provider installations lacked stronger owner/health/timestamp lifecycle depth
+4. The repo had no Docker deployment baseline for the cloud stack
+
+### Current Hardening Ownership Map
+
+- Access and cloud MCP:
+ - `internal/cloud/auth.go`
+ - `internal/cloud/router.go`
+ - `internal/cloud/server.go`
+ - `internal/cloud/mcp_server.go`
+ - `internal/cloud/mcp_tools.go`
+ - `internal/cloud/membership_api.go`
+ - `cmd/codencer-cloudctl/main.go`
+- Connector lifecycle hardening:
+ - `internal/cloud/connectors/*`
+ - `internal/cloud/worker.go`
+- Deployment baseline:
+ - `deploy/cloud/*`
+ - `Makefile`
+- Truth/docs:
+ - `README.md`
+ - `docs/CLOUD.md`
+ - `docs/CLOUD_CONNECTORS.md`
+ - `docs/CLOUD_SELF_HOST.md`
+ - this file
+
+### Current Hardening Verification Ledger
+
+| Merge | Scope | Checks | Result | Notes |
+| --- | --- | --- | --- | --- |
+| 1 | membership + role + audit attribution + cloud MCP | `go test ./internal/cloud/... ./cmd/codencer-cloudctl ./cmd/codencer-cloudd ./cmd/codencer-cloudworkerd` | passed | API and cloud MCP coverage landed together |
+| 2 | connector lifecycle hardening | `go test ./internal/cloud/connectors ./internal/cloud/...` | passed | provider config validation and lifecycle persistence remained green |
+| 3 | deployment baseline | `docker compose --env-file deploy/cloud/.env.example -f deploy/cloud/docker-compose.yml config` | passed | compose file, env wiring, mounts, and healthcheck syntax validated |
+| 4 | compose smoke | `./deploy/cloud/smoke.sh` | blocked | Docker CLI was installed, but the Docker daemon/socket was unavailable in this environment, so the stack could not be started live |
+
+## Current Deepening Pass
+
+This pass is narrower than the original cloud-foundation push.
+
+Mission for the current pass:
+
+- make cloud the tenant-aware control plane for Codencer runtime when cloud mode is used
+- keep the local daemon, relay, and connector execution doctrine intact
+- deepen the priority provider connectors instead of adding more shallow breadth
+- keep claims about connector and cloud maturity exact
+
+This pass does **not** add UI, billing, or new low-priority connectors.
+
+## Runtime Control-Plane Gap Lock
+
+Current code truth at the start of this pass:
+
+- cloud control-plane APIs exist under `/api/cloud/v1/*`
+- relay runtime APIs still exist separately under `/api/v2/*`, `/mcp`, and `/ws/connectors`
+- `codencer-cloudd` can compose relay in-process, but that is still process composition rather than tenant-aware runtime ownership
+- cloud token scope governs cloud admin APIs only
+- relay planner tokens still govern runtime routing and instance visibility
+- cloud stores provider connector installations, but not tenant-scoped Codencer runtime connector installations or runtime instances
+
+Exact blockers locked for this pass:
+
+1. No cloud-side Codencer runtime installation model
+ - missing tenant-scoped record for local Codencer connector identity, machine metadata, enabled state, last seen, health, and last error
+
+2. No cloud-side runtime instance registry
+ - missing tenant-scoped record for shared instances, instance metadata, connector ownership, enabled state, and last seen
+
+3. No cloud-scoped runtime API surface
+ - missing cloud routes for runtime connectors, instances, and runtime inspection under org/workspace/project scope
+
+4. No cloud/relay auth alignment
+ - composed cloud mode does not translate tenant ownership and cloud token scope into runtime visibility
+
+5. Provider connectors still remain thin alpha integrations
+ - GitHub, GitLab, Linear, and Slack each expose one minimal action
+ - Jira remains polling-first with limited health depth
+ - docs compress code existence and verified depth too aggressively
+
+## Current Pass Ownership Map
+
+### A. Runtime Model + Store
+
+- Owner: write worker
+- Scope:
+ - add tenant-scoped Codencer runtime installation and runtime instance models
+ - extend cloud store and migrations
+ - add store tests for runtime registry behavior
+- Files:
+ - `internal/cloud/models.go`
+ - `internal/cloud/store.go`
+ - `internal/cloud/*_test.go` for runtime model/store coverage
+- Status: completed
+
+### B. Cloud Runtime API + Auth + Relay Alignment
+
+- Owner: Lead
+- Scope:
+ - cloud runtime routes
+ - cloud token scope enforcement for runtime resources
+ - composed relay alignment and tenant-scoped runtime visibility
+ - cloudctl runtime admin surfaces
+- Files:
+ - `internal/cloud/auth.go`
+ - `internal/cloud/server.go`
+ - `internal/cloud/router.go`
+ - `cmd/codencer-cloudd/main.go`
+ - `cmd/codencer-cloudctl/main.go`
+ - `internal/relay/*` as needed for composed cloud alignment
+- Status: completed
+
+### C. Priority Connector Deepening
+
+- Owner: write worker
+- Scope:
+ - stronger validation and provider-specific status detail where practical
+ - richer action surface for priority providers
+ - stronger normalization tests
+ - no new providers
+- Files:
+ - `internal/cloud/connectors/types.go`
+ - `internal/cloud/connectors/common.go`
+ - `internal/cloud/connectors/github.go`
+ - `internal/cloud/connectors/gitlab.go`
+ - `internal/cloud/connectors/jira.go`
+ - `internal/cloud/connectors/linear.go`
+ - `internal/cloud/connectors/slack.go`
+ - matching tests under `internal/cloud/connectors/*_test.go`
+- Status: completed
+
+### D. Docs / Truth / Verification
+
+- Owner: Lead
+- Scope:
+ - update cloud docs and connector matrix
+ - record exact verification after each merge
+ - keep claims narrow and evidence-based
+- Files:
+ - `docs/CLOUD.md`
+ - `docs/CLOUD_CONNECTORS.md`
+ - `docs/CLOUD_SELF_HOST.md`
+ - this finish log
+- Status: completed
+
+## Current Merge Order
+
+1. Update runtime blocker lock and ownership log
+2. Merge cloud runtime model/store foundation
+3. Re-run `go test ./internal/cloud/...`
+4. Merge cloud runtime API/auth/alignment work
+5. Re-run focused cloud + relay tests
+6. Merge provider connector deepening
+7. Re-run `go test ./internal/cloud/connectors ./internal/cloud/...`
+8. Update docs and self-host/cloud truth
+9. Run broad verification: `go test ./...`, `make build`, `make build-cloud`
+
+## Current Pass Delivery Snapshot
+
+Implemented in this pass:
+
+- cloud-side runtime registry foundation:
+ - `RuntimeConnectorInstallation`
+ - `RuntimeInstance`
+ - runtime registry migrations and store methods
+- cloud-scoped runtime routes under `/api/cloud/v1/runtime/*`
+- cloud-scoped runtime connector claim/sync/enable/disable flows
+- cloud-scoped runtime instance inspection and instance-scoped HTTP proxying for runs, steps, gates, and artifacts
+- relay helper support for trusted in-process planner principals used by the cloud daemon
+- deeper provider connector action surface and stronger connector tests
+- updated cloud docs and optional runtime-claim smoke wiring
+
+## Current Pass Verification Ledger
+
+| Merge | Scope | Checks | Result | Notes |
+| --- | --- | --- | --- | --- |
+| 1 | runtime blocker lock + ownership map | log update only | passed | this file is the canonical pass log |
+| 2 | runtime model/store foundation + connector depth slices merged | `go test ./internal/cloud/... ./internal/cloud/connectors` | passed | worker slices landed cleanly |
+| 3 | relay in-process planner injection helper | `go test ./internal/relay -run 'TestPlanner|TestServeAsPlanner'` | passed | cloud can now proxy through relay without a second bearer token hop |
+| 4 | cloud runtime API + cloudctl | `go test ./internal/cloud/... ./cmd/codencer-cloudctl ./internal/relay ./cmd/codencer-cloudd ./cmd/codencer-cloudworkerd` | passed | claim/list/disable runtime flows covered |
+| 5 | broad verification | `go test ./...` | passed | repo-wide tests remained green |
+| 6 | build verification | `make build` and `make build-cloud` | passed | core binaries and cloud binaries build |
+| 7 | operator smoke | `bash -n scripts/cloud_smoke.sh` and `make cloud-smoke` | passed | runtime-claim smoke path remains optional and env-driven |
+
+## Mission
+
+Take the current Codencer repository from:
+
+- practical self-host v2 alpha with daemon, connector, relay, relay MCP, and operator tooling
+
+to:
+
+- first-class open-source-based cloud backend/control-plane foundation without UI
+- while preserving the current self-host relay/connector/daemon path
+- and keeping docs, runtime behavior, and public claims truthful
+
+## Repo Truth Lock
+
+Current repo truth at the start of this cloud run:
+
+- Codencer is still explicitly documented and implemented as a local-first bridge.
+- The current shipped runtime is:
+ - local daemon: `orchestratord`
+ - local operator CLI: `orchestratorctl`
+ - self-host relay: `codencer-relayd`
+ - local outbound connector: `codencer-connectord`
+ - optional Windows-side `agent-broker`
+- The repo does **not** currently contain a true cloud domain or tenancy model.
+- The repo does **not** currently contain a SaaS connector platform for GitHub, GitLab, Jira, Linear, or Slack.
+- The repo does **not** currently contain a packaged cloud deployment stack.
+
+Primary supporting file references:
+
+- [README.md](/Users/lookman/Projects/codencer/README.md)
+- [docs/01_product_scope.md](/Users/lookman/Projects/codencer/docs/01_product_scope.md)
+- [docs/02_architecture.md](/Users/lookman/Projects/codencer/docs/02_architecture.md)
+- [docs/SELF_HOST_REFERENCE.md](/Users/lookman/Projects/codencer/docs/SELF_HOST_REFERENCE.md)
+- [docs/RELAY.md](/Users/lookman/Projects/codencer/docs/RELAY.md)
+- [internal/relay/server.go](/Users/lookman/Projects/codencer/internal/relay/server.go)
+- [internal/relay/store/store.go](/Users/lookman/Projects/codencer/internal/relay/store/store.go)
+- [internal/storage/sqlite/migrations.go](/Users/lookman/Projects/codencer/internal/storage/sqlite/migrations.go)
+
+## Reusable Foundations
+
+The existing repo already provides reusable cloud-adjacent foundations:
+
+- narrow relay control-plane patterns:
+ - planner bearer-token auth
+ - connector enrollment tokens
+ - signed connector challenge/response
+ - connector presence/session hub
+ - instance registry and route hints
+ - audit persistence
+- stable local execution model:
+ - repo-bound daemon
+ - worktree isolation
+ - runs / steps / attempts / gates
+ - evidence retrieval
+- operator CLI patterns:
+ - `codencer-relayd` admin helpers
+ - `codencer-connectord` admin helpers
+ - smoke-script pattern for end-to-end verification
+
+These are reusable as a cloud runtime/control-plane substrate.
+
+## Exact Blocker Lock
+
+The following blockers must be addressed before Codencer can be truthfully described as a cloud backend/control-plane:
+
+1. No cloud domain model
+ - missing `org`, `workspace`, `project`, membership, role, and ownership entities
+ - existing `project_id` is only an execution label on runs/tasks
+
+2. No cloud token / access model
+ - current relay planner auth is static config token auth only
+ - no tenant-scoped API token lifecycle, disable/revoke, or attribution model
+
+3. No cloud persistence layer
+ - no cloud DB schema for tenants, tokens, connector installations, external events, or action history
+ - no cloud migration strategy beyond inline sqlite DDL in current local/relay stores
+
+4. No cloud control-plane service
+ - no `codencer-cloudd`-style backend
+ - no cloud admin API
+ - no cloud admin CLI
+
+5. No external connector platform
+ - current connector is a local daemon bridge, not a SaaS integration framework
+ - no installation model, no secrets/config model, no normalized event model, no action dispatch model
+
+6. No top-tier connector implementations
+ - no GitHub, GitLab, Jira, Linear, or Slack connector packages exist in repo code
+
+7. No webhook/polling ingest plane
+ - no provider webhook endpoints
+ - no signature verification routes
+ - no sync cursor or polling state
+
+8. No deployment/self-host cloud story
+ - no packaged cloud service startup flow
+ - no cloud env/config examples
+ - no cloud smoke flow
+
+9. Public docs still say โno cloudโ
+ - current docs explicitly position Codencer as local-first and non-cloud
+ - any cloud additions must reconcile this honestly without breaking self-host truth
+
+## Initial Delivery Target For This Run
+
+Given current repo reality, the maximum safe target for this run is:
+
+- add a real cloud backend foundation to the repo
+- keep existing self-host v2 runtime intact
+- make the new cloud backend reuse the existing relay path where possible
+- implement a reusable external connector platform
+- implement real minimal connectors for the priority set
+- prove what is actually verified
+- explicitly list whatever remains partial
+
+This run must not overclaim:
+
+- enterprise IAM
+- fully managed SaaS maturity
+- full connector parity with vendor ecosystems
+- cloud-hosted execution replacing local execution truth
+
+## Workstreams
+
+### A. Repo Truth + Cloud Gap Lock
+
+- Owner: Lead
+- Scope:
+ - this finish log
+ - blocker lock
+ - ownership map
+ - merge plan
+- Files:
+ - `docs/internal/cloud_v1_finish_log.md`
+- Status: completed
+
+### B. Cloud Domain / Tenancy / Auth Foundation
+
+- Owner: Helmholtz
+- Scope:
+ - cloud config
+ - tenant domain model
+ - token model
+ - installation model
+ - secrets model
+ - audit attribution
+ - cloud store and migrations
+- Files:
+ - `internal/cloud/config.go`
+ - `internal/cloud/models.go`
+ - `internal/cloud/store.go`
+ - `internal/cloud/auth.go`
+ - `internal/cloud/secrets.go`
+ - tests under `internal/cloud/*_test.go`
+- Status: completed
+
+### C. Cloud Control-Plane API / Admin Surfaces
+
+- Owner: Lead
+- Scope:
+ - cloud HTTP admin API
+ - cloud admin CLI
+ - bootstrap flows for org/workspace/project/token/installations
+- Files:
+ - `internal/cloud/server.go`
+ - `internal/cloud/router.go`
+ - `cmd/codencer-cloudd/main.go`
+ - `cmd/codencer-cloudctl/main.go`
+ - tests under `cmd/codencer-cloudctl/*` and `internal/cloud/*_test.go`
+- Status: completed
+
+### D. Connector Platform Foundation
+
+- Owner: Lovelace
+- Scope:
+ - connector registry
+ - connector contract
+ - normalized events/actions
+ - install/validate/action/ingest helpers
+- Files:
+ - `internal/cloud/connectors/registry.go`
+ - `internal/cloud/connectors/types.go`
+ - `internal/cloud/connectors/common.go`
+ - tests under `internal/cloud/connectors/*_test.go`
+- Status: completed
+
+### E. Top-Tier Connectors
+
+- Owner: split
+- Scope:
+ - GitHub
+ - GitLab
+ - Jira
+ - Linear
+ - Slack
+- Files:
+ - `internal/cloud/connectors/github.go`
+ - `internal/cloud/connectors/gitlab.go`
+ - `internal/cloud/connectors/jira.go`
+ - `internal/cloud/connectors/linear.go`
+ - `internal/cloud/connectors/slack.go`
+ - tests under `internal/cloud/connectors/*_test.go`
+- Status: completed
+
+Connector ownership:
+
+- Lovelace:
+ - `internal/cloud/connectors/github.go`
+ - `internal/cloud/connectors/gitlab.go`
+- Ramanujan:
+ - `internal/cloud/connectors/jira.go`
+ - `internal/cloud/connectors/linear.go`
+ - `internal/cloud/connectors/slack.go`
+
+### F. Relay / Cloud Alignment
+
+- Owner: Lead
+- Scope:
+ - keep existing relay intact
+ - align cloud service composition with current relay
+ - preserve explicit instance targeting and local execution truth
+- Files:
+ - `cmd/codencer-cloudd/main.go`
+ - `internal/cloud/server.go`
+ - `internal/cloud/router.go`
+ - `docs/RELAY.md`
+- Status: completed
+
+### G. Deployment / Self-Host / Operator Flow
+
+- Owner: Lead with docs worker support
+- Scope:
+ - cloud docs
+ - cloud examples
+ - cloud smoke path
+ - startup/dependency flow
+- Files:
+ - cloud docs and scripts to be added after runtime shape is concrete
+- Status: completed
+
+### H. Final Test / Harden / Truth Pass
+
+- Owner: Lead
+- Scope:
+ - formatting
+ - targeted tests after each merge
+ - broad builds/tests
+ - final truth summary
+- Status: completed
+
+## Merge Order
+
+1. Repo truth + cloud gap lock
+2. Cloud domain / tenancy / auth foundation
+3. Cloud control-plane API / admin surfaces
+4. Connector platform foundation
+5. Top-tier connector implementations
+6. Relay / cloud alignment
+7. Deployment / self-host / operator docs and smoke
+8. Final hardening, broad tests/builds, and truth pass
+
+## Final Delivery Snapshot
+
+Implemented in this pass:
+
+- new cloud domain, store, auth, and secret foundation under `internal/cloud`
+- cloud admin/control-plane binaries:
+ - `cmd/codencer-cloudd`
+ - `cmd/codencer-cloudctl`
+ - `cmd/codencer-cloudworkerd`
+- cloud admin HTTP surface under `/api/cloud/v1/*`
+- connector registry plus provider implementations for:
+ - GitHub
+ - GitLab
+ - Jira
+ - Linear
+ - Slack
+- provider webhook ingest routes where implemented
+- Jira polling-first worker path
+- installation enable/disable surfaces
+- cloud docs, setup docs, and smoke script
+
+## Verification Ledger
+
+Cloud-focused checks run during this pass:
+
+- `go test ./internal/cloud/...`
+- `go test ./internal/cloud/... ./cmd/codencer-cloudctl ./cmd/codencer-cloudd ./cmd/codencer-cloudworkerd`
+- `go build ./cmd/codencer-cloudctl ./cmd/codencer-cloudd ./cmd/codencer-cloudworkerd`
+- `make build-cloud`
+- `bash -n scripts/cloud_smoke.sh`
+- `make cloud-smoke`
+
+Broad preservation checks run after cloud integration:
+
+- `go test ./...`
+- `make build`
+- `make build-broker`
+- `git diff --check`
+
+Verification outcome:
+
+- cloud control-plane binaries build
+- cloud smoke path passed end-to-end
+- repo-wide tests passed
+- existing local/self-host build targets still pass
+
+## Truthful Alpha Limitations
+
+The cloud backend is real and usable for operator self-use, but these limitations remain explicit:
+
+- bootstrap of the first org is local/store-driven via `codencer-cloudctl bootstrap`; there is no fully HTTP-only first-org bootstrap token flow in this pass
+- the cloud store is SQLite-backed in this alpha pass; Postgres/Redis/object-storage-backed deployment is not implemented
+- identity is service-token/operator-token based; there is no user membership or enterprise IAM model yet
+- GitHub, GitLab, Linear, and Slack connector behavior is unit-tested in-repo, but this pass does not claim live end-to-end verification against hosted provider accounts
+- Jira is intentionally polling-first through `codencer-cloudworkerd`; Jira webhook ingest is not implemented in this pass
+- the cloud control plane does not replace the local daemon/relay execution truth and must not be described as a planner or generic cloud workflow brain
+8. Final hardening / tests / truth pass
+
+## Verification Ledger
+
+| Merge | Scope | Checks | Result | Notes |
+| --- | --- | --- | --- | --- |
+| 1 | repo truth + cloud gap lock | pending | pending | this row will be updated after merge |
+
+## Open Questions Locked For Implementation
+
+- Storage driver:
+ - locked for this pass: sqlite-backed alpha cloud backend using isolated cloud store code
+ - note clearly in docs that this is alpha self-host cloud persistence, not a production HA database posture
+- Relay composition:
+ - locked for this pass: `codencer-cloudd` composes the existing relay handler rather than rewriting relay internals
+- Connector auth depth:
+ - token bootstrap first, OAuth/app-model only where safe and proven
+
+These must be resolved in code and docs, not by aspiration.
diff --git a/docs/internal/v2_finish_log.md b/docs/internal/v2_finish_log.md
new file mode 100644
index 0000000..f6e127d
--- /dev/null
+++ b/docs/internal/v2_finish_log.md
@@ -0,0 +1,143 @@
+# Codencer Practical V2 Delta Finish Log
+
+> [!WARNING]
+> **HISTORICAL FINISH LOG**: This log records an earlier v2 delta pass and is not the current release contract.
+> Use the frozen `BETA_*` docs in this directory for current beta truth and remaining work.
+
+## Goal
+- Close the remaining delta from practical self-host alpha to full practical v2 for real self-use now.
+- Keep Codencer local-first, planner-controlled, evidence-oriented, repo-bound, and truthful about runtime behavior and protocol maturity.
+
+## Delta Blockers Locked From Repo Truth
+- Share control is not yet fully truthful:
+ - a running connector does not visibly re-advertise config-driven share changes
+ - relay instance state is not pruned when a connector stops advertising a previously shared instance
+- Operator discovery is incomplete:
+ - discovery roots exist internally, but there is no operator-facing `codencer-connectord discover` command
+- MCP maturity is only partially proven:
+ - `/mcp` is usable, but `GET /mcp` is still a thin bootstrap instead of a real long-lived SSE session
+ - external interoperability is not yet proven against an official SDK
+- Multi-instance proof is thin:
+ - the repo mostly proves single-instance relay flows rather than a two-instance select-and-target flow
+- Daemon HTTP route proof is thinner than service proof:
+ - direct route tests are missing for abort, gate decision, and step evidence endpoints
+- Relay operator ergonomics still miss a local audit helper:
+ - audit is available over HTTP, but not yet through `codencer-relayd`
+- Broker/runtime docs need a truth pass:
+ - standardize `agent-broker` naming everywhere
+ - keep the in-memory task-session limitation explicit
+
+## Locked Decisions
+- Planner auth remains static-token based in this pass.
+- The canonical remote planner surface remains relay HTTP plus relay-side MCP.
+- Daemon-local MCP remains secondary/compatibility-only and does not gain new planner-facing claims.
+- Add an operator-facing `codencer-connectord discover` command rather than overloading `list`.
+- Prove MCP interoperability with the official Go SDK and keep compatibility claims exact.
+- Make share/unshare propagate without connector restart by reloading config and re-advertising from the running connector.
+
+## Workstream Ownership
+- Lead:
+ - `docs/internal/v2_finish_log.md`
+ - integration, merge control, verification, final docs/smokes/truth pass
+- Worker `Aristotle`:
+ - `internal/app/*`
+ - daemon-facing API tests
+ - daemon/service tests only if route proof exposes a real mismatch
+- Worker `Maxwell`:
+ - `cmd/codencer-connectord/main.go`
+ - `internal/connector/*`
+ - connector tests
+ - `docs/CONNECTOR.md`
+- Worker `Fermat`:
+ - `cmd/codencer-relayd/*`
+ - `internal/relay/server.go`
+ - `internal/relay/router.go`
+ - `internal/relay/audit.go`
+ - `internal/relay/store/*`
+ - relay admin/integration tests
+ - `docs/RELAY.md`
+- Lead after relay merge:
+ - `internal/relay/mcp_server.go`
+ - `internal/relay/mcp_tools.go`
+ - MCP tests
+ - `docs/mcp/*`
+ - official SDK smoke helper
+ - multi-instance smoke/docs
+ - broker/docs naming pass
+
+## Merge Sequence
+1. Lock delta log and ownership
+2. Daemon HTTP proof hardening
+3. Connector discovery plus live share propagation
+4. Relay share-prune plus audit CLI
+5. MCP streamable HTTP maturity plus official SDK smoke
+6. Operator docs/scripts, multi-instance proof, broker truth pass
+7. Formatting, broad verification, final truthful assessment
+
+## Status
+- Merge 1: completed
+- Merge 2: completed
+- Merge 3: completed
+- Merge 4: completed
+- Merge 5: completed
+- Merge 6: completed
+- Merge 7: completed
+
+## Verification Ledger
+- Fresh audit re-confirmed:
+ - `go test ./...`
+ - `make build`
+ - `make build-broker`
+- Current repo state before delta work:
+ - branch: `codex/implement-codencer-v2`
+ - untracked artifact observed: `./orchestratord`
+- Required focused checks after each merge:
+ - daemon: `go test ./internal/app ./internal/service`
+ - connector: `go test ./cmd/codencer-connectord ./internal/connector`
+ - relay: `go test ./internal/relay ./cmd/codencer-relayd`
+ - final: `go test ./...`, `make build`, `make build-broker`, smoke matrix
+- Merge 2 verified:
+ - `go test ./internal/app ./internal/service`
+ - added direct route proof for gate approve/reject, run abort, and step result/validations/logs
+- Merge 3 verified:
+ - `go test ./cmd/codencer-connectord ./internal/connector`
+ - added `codencer-connectord discover`
+ - added live config reload plus re-advertise for share/unshare propagation without connector restart
+- Merge 4 verified:
+ - `go test ./internal/relay ./cmd/codencer-relayd`
+ - made connector advertise authoritative for relay-side shared-instance state
+ - pruned stale instance rows and route hints when share state shrinks
+ - added `codencer-relayd audit --limit N`
+- Merge 5 verified:
+ - `go test ./internal/relay ./cmd/codencer-relayd ./cmd/mcp-sdk-smoke`
+ - upgraded `/mcp` from a one-shot bootstrap to a session-bound SSE stream with keepalive comments
+ - added official Go SDK interoperability proof and a standalone `cmd/mcp-sdk-smoke` helper
+- Merge 6 verified:
+ - `bash -n scripts/self_host_smoke.sh`
+ - `go test ./cmd/mcp-sdk-smoke`
+ - `make build-mcp-sdk-smoke`
+ - `make build-broker`
+ - updated operator docs for `discover`, relay `audit`, broker naming truth, Go 1.25.0+, and the expanded smoke matrix
+- Merge 7 verified:
+ - `gofmt -w $(git diff --name-only -- '*.go')`
+ - `go mod tidy`
+ - `git diff --check`
+ - `go test ./...`
+ - `make build`
+ - `make build-broker`
+ - `make build-mcp-sdk-smoke`
+ - live isolated self-host smoke with `SMOKE_SCENARIOS=all,mcp-sdk` against:
+ - relay on `127.0.0.1:18090`
+ - simulation daemon on `127.0.0.1:18085`
+ - live smoke outcomes:
+ - primary run completed with result, validations, logs, artifacts, audit, MCP, share-control, multi-instance, and official SDK proof paths exercised
+ - optional gate action was skipped because the simulation path produced no gate
+ - optional abort path returned HTTP `500`, matching the repo's best-effort, fail-closed abort truth rather than guaranteed confirmed cancellation
+ - cleanup:
+ - removed stray untracked root artifact `./orchestratord`
+
+## Open Notes
+- No two write-capable workers may edit the same files concurrently.
+- MCP compatibility claims must stay exact; this repo now proves the official Go SDK path and manual JSON-RPC callers, not universal client compatibility.
+- Share/unshare remote invisibility is now proven without restarting the connector in the live smoke flow.
+- Live abort remains best-effort and may return HTTP `500` when cancellation cannot be confirmed quickly; that is expected truth, not a hidden regression.
diff --git a/docs/mcp/cloud_tools.md b/docs/mcp/cloud_tools.md
new file mode 100644
index 0000000..a82ef33
--- /dev/null
+++ b/docs/mcp/cloud_tools.md
@@ -0,0 +1,94 @@
+# Cloud MCP Tools
+
+Codencer exposes a tenant-scoped remote MCP surface from the cloud control plane in composed runtime mode.
+
+This page is about cloud tenancy mode.
+
+If you are operating the self-host relay directly without cloud tenancy, use relay `/mcp` instead and treat [Relay MCP Tools](relay_tools.md) as the source of truth for that boundary.
+
+## Endpoint
+
+Use the cloud MCP endpoint:
+
+- `POST /api/cloud/v1/mcp`
+- `GET /api/cloud/v1/mcp`
+- `DELETE /api/cloud/v1/mcp`
+
+Compatibility path:
+
+- `POST /api/cloud/v1/mcp/call`
+
+The cloud MCP server currently supports:
+
+- `initialize`
+- `notifications/initialized`
+- `tools/list`
+- `tools/call`
+
+## Tool List
+
+- `codencer.list_instances`
+- `codencer.get_instance`
+- `codencer.start_run`
+- `codencer.get_run`
+- `codencer.list_run_gates`
+- `codencer.submit_task`
+- `codencer.get_step`
+- `codencer.wait_step`
+- `codencer.get_step_result`
+- `codencer.list_step_artifacts`
+- `codencer.get_step_logs`
+- `codencer.get_artifact_content`
+- `codencer.get_step_validations`
+- `codencer.approve_gate`
+- `codencer.reject_gate`
+- `codencer.abort_run`
+- `codencer.retry_step`
+
+## Tool Rules
+
+- `codencer.list_instances` and `codencer.get_instance` require `runtime_instances:read`.
+- Mutating tools require explicit `instance_id`.
+- Tool calls respect the same tenant scope and runtime scopes as the cloud HTTP API.
+- Tool calls do not bypass claimed-runtime ownership or org/workspace/project visibility.
+- Routed `step`, `artifact`, and `gate` lookups stay inside the caller's authorized tenant-visible runtime set.
+- `approve_gate`, `reject_gate`, and `retry_step` require explicit `instance_id`.
+- `submit_task` accepts the real Codencer `TaskSpec` shape.
+- `wait_step` is bounded and takes explicit timeout input.
+- `list_run_gates` is the canonical gate-discovery tool for a known run and instance.
+- run listing remains HTTP-only in this phase; there is no `codencer.list_runs` tool.
+- `get_step_logs` returns collected step logs as explicit text or base64-safe content metadata.
+- `get_artifact_content` reads by `artifact_id` and returns text or base64-safe content metadata.
+- `abort_run` returns a successful tool result only when the daemon confirms the active step reached `cancelled`.
+- There is no raw shell tool.
+- There is no arbitrary filesystem browsing tool.
+
+## Transport Notes
+
+- `/api/cloud/v1/mcp` supports session-bound Streamable HTTP `GET`, `POST`, and `DELETE`
+- the cloud daemon returns `MCP-Protocol-Version`
+- the cloud daemon can return `MCP-Session-Id` on `initialize`
+- `GET /api/cloud/v1/mcp` keeps an SSE stream open for the negotiated session and emits keepalive comments
+- `POST /api/cloud/v1/mcp/call` remains as a compatibility alias for simple POST callers; `/api/cloud/v1/mcp` is still the canonical session path
+- the compatibility alias accepts both full JSON-RPC `tools/call` requests and the shorthand top-level `name` / `arguments` form
+- the Codencer tool model remains intentionally request/response-oriented even though the transport now supports a real SSE session
+- cloud MCP sessions are token-bound; a session cannot be reused by a different token, and revoked tokens are rejected across the cloud MCP surface
+
+## Proven Compatibility
+
+- verified in repo tests for initialize, tools/list, tools/call, stream bootstrap, session delete, browser-origin handling, compatibility aliasing, token-bound sessions, and revoked-token denial
+- verified in composed cloud smoke for initialize, list, call, and official Go SDK access
+- not overclaimed as universal client compatibility beyond the integrations directly exercised here
+
+## Payload Notes
+
+`codencer.get_step_logs` and `codencer.get_artifact_content` return structured metadata rather than raw path access.
+
+The payload includes:
+
+- `content_type`
+- `encoding`
+- `text` for textual content
+- `base64` for non-text content
+
+This matches the relay MCP shape so remote MCP clients do not need a different content model for cloud.
diff --git a/docs/mcp/examples/chatgpt-cloud.mcp.json b/docs/mcp/examples/chatgpt-cloud.mcp.json
new file mode 100644
index 0000000..3117906
--- /dev/null
+++ b/docs/mcp/examples/chatgpt-cloud.mcp.json
@@ -0,0 +1,6 @@
+{
+ "label": "codencer-cloud",
+ "endpoint": "https:///api/cloud/v1/mcp",
+ "token": "",
+ "usage": "value-reference-only"
+}
diff --git a/docs/mcp/examples/chatgpt-relay.mcp.json b/docs/mcp/examples/chatgpt-relay.mcp.json
new file mode 100644
index 0000000..347565d
--- /dev/null
+++ b/docs/mcp/examples/chatgpt-relay.mcp.json
@@ -0,0 +1,6 @@
+{
+ "label": "codencer-relay",
+ "endpoint": "https:///mcp",
+ "token": "",
+ "usage": "value-reference-only"
+}
diff --git a/docs/mcp/examples/claude-code-cloud.mcp.json b/docs/mcp/examples/claude-code-cloud.mcp.json
new file mode 100644
index 0000000..54041a8
--- /dev/null
+++ b/docs/mcp/examples/claude-code-cloud.mcp.json
@@ -0,0 +1,11 @@
+{
+ "mcpServers": {
+ "codencer-cloud": {
+ "type": "http",
+ "url": "${CODENCER_CLOUD_MCP_URL:-http://127.0.0.1:8190/api/cloud/v1/mcp}",
+ "headers": {
+ "Authorization": "Bearer ${CODENCER_CLOUD_TOKEN}"
+ }
+ }
+ }
+}
diff --git a/docs/mcp/examples/claude-code-relay.mcp.json b/docs/mcp/examples/claude-code-relay.mcp.json
new file mode 100644
index 0000000..7590e92
--- /dev/null
+++ b/docs/mcp/examples/claude-code-relay.mcp.json
@@ -0,0 +1,11 @@
+{
+ "mcpServers": {
+ "codencer-relay": {
+ "type": "http",
+ "url": "${CODENCER_RELAY_MCP_URL:-http://127.0.0.1:8090/mcp}",
+ "headers": {
+ "Authorization": "Bearer ${CODENCER_PLANNER_TOKEN}"
+ }
+ }
+ }
+}
diff --git a/docs/mcp/examples/claude-desktop-cloud.mcp.json b/docs/mcp/examples/claude-desktop-cloud.mcp.json
new file mode 100644
index 0000000..5c93df7
--- /dev/null
+++ b/docs/mcp/examples/claude-desktop-cloud.mcp.json
@@ -0,0 +1,9 @@
+{
+ "name": "codencer-cloud",
+ "type": "http",
+ "url": "https://cloud.example.com/api/cloud/v1/mcp",
+ "headers": {
+ "Authorization": "Bearer "
+ },
+ "tool_prefix": "codencer."
+}
diff --git a/docs/mcp/examples/claude-desktop-relay.mcp.json b/docs/mcp/examples/claude-desktop-relay.mcp.json
new file mode 100644
index 0000000..03c0189
--- /dev/null
+++ b/docs/mcp/examples/claude-desktop-relay.mcp.json
@@ -0,0 +1,9 @@
+{
+ "name": "codencer-relay",
+ "type": "http",
+ "url": "https://relay.example.com/mcp",
+ "headers": {
+ "Authorization": "Bearer "
+ },
+ "tool_prefix": "codencer."
+}
diff --git a/docs/mcp/examples/gemini-cli-relay.mcp.json b/docs/mcp/examples/gemini-cli-relay.mcp.json
new file mode 100644
index 0000000..6d480a2
--- /dev/null
+++ b/docs/mcp/examples/gemini-cli-relay.mcp.json
@@ -0,0 +1,10 @@
+{
+ "mcpServers": {
+ "codencer-relay": {
+ "httpUrl": "${CODENCER_RELAY_MCP_URL:-http://127.0.0.1:8090/mcp}",
+ "headers": {
+ "Authorization": "Bearer ${CODENCER_PLANNER_TOKEN}"
+ }
+ }
+ }
+}
diff --git a/docs/mcp/integrations.md b/docs/mcp/integrations.md
new file mode 100644
index 0000000..7468251
--- /dev/null
+++ b/docs/mcp/integrations.md
@@ -0,0 +1,191 @@
+# Planner / Client Integration Notes
+
+Codencer has two remote planner surfaces:
+
+- direct self-host relay: HTTP under `/api/v2/...` and MCP at `/mcp`
+- cloud tenancy in composed mode: HTTP under `/api/cloud/v1/runtime/...` and MCP at `/api/cloud/v1/mcp`
+
+Do not point ChatGPT, Claude, or another planner runtime at the local Codencer daemon directly.
+The daemon-local `/mcp/call` endpoint is only a local compatibility/admin bridge.
+
+Provider connectors such as GitHub, GitLab, Jira, Linear, and Slack are separate cloud integrations.
+They are not planner/client entrypoints.
+
+Local execution adapters such as `codex`, local `claude`, `qwen`, and `antigravity*` are also separate.
+They are executor-side adapters, not remote planner surfaces.
+
+## Surface Selection
+
+- Use relay `/api/v2/...` and relay `/mcp` when you are operating the self-host relay directly.
+- Use cloud `/api/cloud/v1/runtime/...` and cloud `/api/cloud/v1/mcp` when cloud tenancy is the public control plane.
+- Treat relay `/mcp/call`, cloud `/api/cloud/v1/mcp/call`, and daemon `/mcp/call` as compatibility POST aliases rather than the primary session contract.
+
+## Compatibility Matrix
+
+| Path | Canonical surface | Status | Direct repo proof | Notes |
+| --- | --- | --- | --- | --- |
+| Relay HTTP | relay `/api/v2/...` | `proven` | relay integration tests + self-host smoke | Narrow instance-scoped planner API. |
+| Relay MCP | relay `/mcp` | `proven` | relay MCP tests + self-host MCP smoke | Session-bound Streamable HTTP plus JSON-RPC POST. |
+| Cloud HTTP | cloud `/api/cloud/v1/runtime/...` | `proven` | cloud runtime tests + composed cloud smoke | Tenant-scoped only in composed mode. |
+| Cloud MCP | cloud `/api/cloud/v1/mcp` | `proven` | cloud MCP tests + composed cloud smoke | Tenant-scoped only in composed mode. |
+| Official Go SDK | relay `/mcp` and cloud `/api/cloud/v1/mcp` | `proven` | MCP server tests + `cmd/mcp-sdk-smoke` + smoke | Proven for MCP only, not for the REST HTTP APIs. |
+| Generic HTTP clients | relay/cloud HTTP surfaces | `proven` | direct `net/http` tests + `curl` smoke | Plain bearer-token JSON callers are the intended HTTP baseline. |
+| Generic MCP clients | relay `/mcp` and cloud `/api/cloud/v1/mcp` | `expected-only` | protocol behavior is repo-proven, but specific client products are not | Do not turn this into a universal desktop/client compatibility claim. |
+| ChatGPT-style remote MCP path | relay `/mcp` or cloud `/api/cloud/v1/mcp` | `compatibility-only` | docs only | Remote MCP only. Use the relay/cloud surface, not the local daemon, and see [integrations/chatgpt.md](integrations/chatgpt.md) for the operator walkthrough. |
+| Claude-style remote MCP path | relay `/mcp` or cloud `/api/cloud/v1/mcp` | `compatibility-only` | docs only | Planner-side remote connector flow only. Separate from the local `claude` executor-side adapter and from local `claude_desktop_config.json`. See [integrations/claude.md](integrations/claude.md). |
+| Gemini CLI remote MCP path | relay `/mcp` or cloud `/api/cloud/v1/mcp` | `expected-only` | docs only | Use Gemini CLI `httpUrl` plus bearer-token headers against the remote MCP surface, not the local daemon. This repo does not directly exercise Gemini CLI product setup. See [integrations/gemini-cli.md](integrations/gemini-cli.md). |
+| Daemon-local MCP | daemon `/mcp/call` | `compatibility-only` | local package tests only | Local compatibility/admin bridge, not the public planner contract. |
+| Local daemon as a public remote MCP target | none | `unsupported` | none | Keep remote planners on relay or cloud, not on the daemon directly. |
+
+## Repo-Proven Entry Points
+
+Relay-side proof:
+
+```bash
+PLANNER_TOKEN= make self-host-smoke
+PLANNER_TOKEN= make self-host-smoke-mcp
+PLANNER_TOKEN= SMOKE_SCENARIOS=status,audit,share-control,mcp,mcp-sdk make self-host-smoke
+```
+
+Cloud-side proof:
+
+```bash
+make cloud-smoke
+make build-cloud build-mcp-sdk-smoke
+CLOUD_RELAY_CONFIG=.codencer/relay/config.json \
+CLOUD_RUNTIME_DAEMON_URL=http://127.0.0.1:8080 \
+CLOUD_SMOKE_MCP=1 \
+CLOUD_SMOKE_SDK=1 \
+make cloud-smoke
+```
+
+Standalone official Go SDK proof:
+
+```bash
+make build-mcp-sdk-smoke
+./bin/mcp-sdk-smoke --endpoint http://127.0.0.1:8090/mcp --token --instance-id
+./bin/mcp-sdk-smoke --endpoint http://127.0.0.1:8190/api/cloud/v1/mcp --token --instance-id
+```
+
+## Generic HTTP Examples
+
+Relay HTTP:
+
+```bash
+curl -fsS \
+ -H "Authorization: Bearer " \
+ -H "Content-Type: application/json" \
+ -d '{"id":"relay-http-demo","project_id":"demo-project"}' \
+ http://127.0.0.1:8090/api/v2/instances//runs
+
+curl -fsS \
+ -H "Authorization: Bearer " \
+ -H "Content-Type: application/json" \
+ -d '{"version":"v1","goal":"Verify relay HTTP planner path","adapter_profile":"codex"}' \
+ http://127.0.0.1:8090/api/v2/instances//runs/relay-http-demo/steps
+```
+
+Cloud HTTP:
+
+```bash
+curl -fsS \
+ -H "Authorization: Bearer " \
+ -H "Content-Type: application/json" \
+ -d '{"id":"cloud-http-demo","project_id":"demo-project"}' \
+ http://127.0.0.1:8190/api/cloud/v1/runtime/instances//runs
+
+curl -fsS \
+ -H "Authorization: Bearer " \
+ -H "Content-Type: application/json" \
+ -d '{"version":"v1","goal":"Verify cloud HTTP planner path","adapter_profile":"codex"}' \
+ http://127.0.0.1:8190/api/cloud/v1/runtime/instances//runs/cloud-http-demo/steps
+```
+
+These bearer-token HTTP examples are repo-proven through direct tests and smoke.
+
+## Generic MCP Examples
+
+For the full tool lists and input rules, see [Relay MCP Tools](relay_tools.md) and [Cloud MCP Tools](cloud_tools.md).
+
+Minimal relay initialize plus compatibility-call flow:
+
+```bash
+curl -fsS -D /tmp/codencer-mcp-headers.txt \
+ -H "Authorization: Bearer " \
+ -H "Content-Type: application/json" \
+ -H "MCP-Protocol-Version: 2025-11-25" \
+ -d '{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2025-11-25"}}' \
+ http://127.0.0.1:8090/mcp
+
+SESSION_ID="$(awk -F': ' 'tolower($1)==\"mcp-session-id\" {gsub(\"\\r\", \"\", $2); print $2}' /tmp/codencer-mcp-headers.txt)"
+
+curl -fsS \
+ -H "Authorization: Bearer " \
+ -H "Content-Type: application/json" \
+ -H "MCP-Session-Id: ${SESSION_ID}" \
+ -H "MCP-Protocol-Version: 2025-11-25" \
+ -d '{"jsonrpc":"2.0","id":2,"name":"codencer.list_instances","arguments":{}}' \
+ http://127.0.0.1:8090/mcp/call
+```
+
+Minimal cloud initialize plus compatibility-call flow:
+
+```bash
+curl -fsS -D /tmp/codencer-cloud-mcp-headers.txt \
+ -H "Authorization: Bearer " \
+ -H "Content-Type: application/json" \
+ -H "MCP-Protocol-Version: 2025-11-25" \
+ -d '{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2025-11-25"}}' \
+ http://127.0.0.1:8190/api/cloud/v1/mcp
+
+SESSION_ID="$(awk -F': ' 'tolower($1)==\"mcp-session-id\" {gsub(\"\\r\", \"\", $2); print $2}' /tmp/codencer-cloud-mcp-headers.txt)"
+
+curl -fsS \
+ -H "Authorization: Bearer " \
+ -H "Content-Type: application/json" \
+ -H "MCP-Session-Id: ${SESSION_ID}" \
+ -H "MCP-Protocol-Version: 2025-11-25" \
+ -d '{"jsonrpc":"2.0","id":2,"name":"codencer.list_instances","arguments":{}}' \
+ http://127.0.0.1:8190/api/cloud/v1/mcp/call
+```
+
+Notes:
+
+- `/mcp` and `/api/cloud/v1/mcp` are the canonical session paths.
+- `POST /mcp/call` and `POST /api/cloud/v1/mcp/call` are compatibility aliases for simple POST callers.
+- The alias accepts both full JSON-RPC `tools/call` bodies and the shorthand top-level `name` / `arguments` form shown above.
+- `GET` streaming requires `Accept: text/event-stream`, `MCP-Session-Id`, and the negotiated `MCP-Protocol-Version`.
+- `DELETE` session close also requires `MCP-Session-Id`.
+- `notifications/initialized` is accepted after `initialize`, but the current repo proof helpers do not depend on it.
+
+## Checked-In MCP Config Examples
+
+For project-scoped Claude Code style HTTP MCP configuration, use the checked-in examples:
+
+- [examples/claude-code-relay.mcp.json](examples/claude-code-relay.mcp.json)
+- [examples/claude-code-cloud.mcp.json](examples/claude-code-cloud.mcp.json)
+
+Those examples use the relay and cloud canonical MCP URLs, plus environment-variable-driven bearer headers.
+They are packaging examples, not repo-executed Claude product proof.
+
+For the narrow operator flow that uses the repo's actual `codencer.*` MCP tool names in ChatGPT, see [integrations/chatgpt.md](integrations/chatgpt.md). The checked-in [examples/chatgpt-relay.mcp.json](examples/chatgpt-relay.mcp.json) and [examples/chatgpt-cloud.mcp.json](examples/chatgpt-cloud.mcp.json) are value-reference templates for ChatGPT app setup, not direct ChatGPT imports.
+
+For the current Claude Desktop and `claude.ai` operator walkthrough, see [integrations/claude.md](integrations/claude.md). It keeps the planner-side remote connector flow separate from the executor-side adapter story, points operators to Anthropic's current `Customize > Connectors` or organization `Settings > Connectors` flow, and calls out that `claude_desktop_config.json` is the separate local-MCP mechanism rather than the remote connector path.
+
+For Gemini CLI style remote HTTP MCP configuration, see [integrations/gemini-cli.md](integrations/gemini-cli.md) and [examples/gemini-cli-relay.mcp.json](examples/gemini-cli-relay.mcp.json). This remains an `expected-only` packaging path aligned to the current official Gemini CLI docs, not a repo-executed product proof. The local environment for this documentation pass did not have `gemini` installed, so this repo does not claim local Gemini CLI validation here.
+
+## ChatGPT-Style And Anthropic API Paths
+
+These remain `compatibility-only` in Codencer's beta contract.
+They are documented patterns, not directly exercised repo integrations.
+
+Current external platform references:
+
+- OpenAI ChatGPT developer mode currently documents remote MCP support and app setup flows through developer mode. Follow the current official OpenAI docs when wiring ChatGPT-style or OpenAI API clients to relay `/mcp` or cloud `/api/cloud/v1/mcp`.
+- Anthropic currently documents remote custom connectors for Claude Desktop and `claude.ai`, plus separate local/project MCP configuration for Claude Code. Follow the current official Anthropic docs when wiring Claude-style clients to relay `/mcp` or cloud `/api/cloud/v1/mcp`.
+
+Keep these claims narrow:
+
+- this repo proves the Codencer relay/cloud MCP protocol surfaces directly
+- this repo does not prove every vendor client UI, approval flow, or publication workflow
+- this repo does not turn the local daemon into a public remote MCP target
diff --git a/docs/mcp/integrations/chatgpt.md b/docs/mcp/integrations/chatgpt.md
new file mode 100644
index 0000000..86f35a5
--- /dev/null
+++ b/docs/mcp/integrations/chatgpt.md
@@ -0,0 +1,318 @@
+# ChatGPT โ Codencer Beta Walkthrough
+
+This walkthrough is frozen to the Codencer `v0.2.0-beta` Wave 2 contract and the OpenAI docs linked here as checked on 2026-04-24.
+
+Use this page together with [Beta Testing](../../BETA_TESTING.md) and [Planner / Client Integration Notes](../integrations.md).
+
+## Status
+
+Codencer status for the ChatGPT path is `compatibility-only`.
+
+What that means in practice:
+
+- Codencer proves the relay/cloud MCP protocol surfaces directly.
+- Codencer does not claim repo-executed proof of the ChatGPT product UI or deployment flow.
+- ChatGPT must target the remote relay `/mcp` surface or the remote cloud `/api/cloud/v1/mcp` surface.
+- Do not point ChatGPT at the daemon-local `/mcp/call` endpoint.
+
+## Prerequisites
+
+Before you open ChatGPT, have all of the following ready:
+
+- A running self-host relay or a running self-host cloud control plane in composed runtime mode.
+- A remote MCP URL:
+ - relay: `https:///mcp`
+ - cloud: `https:///api/cloud/v1/mcp`
+- A Codencer token for that surface:
+ - relay: planner token
+ - cloud: cloud token
+- At least one reachable shared runtime instance:
+ - relay path: shared through the connector and visible from relay `/mcp`
+ - cloud path: claimed into org/workspace/project scope and visible from cloud `/api/cloud/v1/mcp`
+- For cloud, token scopes that cover runtime discovery and run execution. `codencer.list_instances` requires `runtime_instances:read`.
+- A ChatGPT web account where developer mode is available to your user/workspace.
+
+OpenAI plan availability wording is not fully identical across the two current docs:
+
+- the developer guide says developer mode is available in beta on ChatGPT web for Pro, Plus, Business, Enterprise, and Education accounts
+- the Help Center article says apps, full MCP support, and developer mode are available for Business and Enterprise/Edu
+
+Verify current eligibility before rollout in the live OpenAI docs:
+
+- [ChatGPT Developer mode](https://developers.openai.com/api/docs/guides/developer-mode)
+- [Developer mode, and MCP apps in ChatGPT [beta]](https://help.openai.com/en/articles/12584461)
+
+## Step 1 Choose the Codencer surface
+
+Pick one public MCP surface and stay on it for the whole setup:
+
+- Relay self-host path: [Self-Host Relay / Runtime Reference](../../SELF_HOST_REFERENCE.md) and [Relay MCP Tools](../relay_tools.md)
+- Cloud self-host path: [Self-Host Cloud Control Plane Guide](../../CLOUD_SELF_HOST.md) and [Cloud MCP Tools](../cloud_tools.md)
+
+Do not use:
+
+- daemon-local `/mcp/call`
+- `http://127.0.0.1:8085/...`
+- a localhost-only relay or cloud URL that ChatGPT cannot reach remotely
+
+## Step 2 Enable developer mode in ChatGPT
+
+Use the current OpenAI UI path for your plan and verify it against the live docs before rollout.
+
+Current OpenAI guidance checked on 2026-04-24:
+
+- Admin workspace enablement path in the Help Center:
+ - `Workspace Settings -> Permissions & Roles -> Connected Data Developer mode / Create custom MCP connectors`
+- User-level toggle path in the developer guide:
+ - `Settings -> Apps -> Advanced settings -> Developer mode`
+- Enterprise/Edu user path in the Help Center:
+ - `Settings -> Apps -> Advanced Settings -> Developer mode`
+- Create-app entry point after developer mode is enabled:
+ - `Settings -> Apps -> Create`
+ - or workspace-admin path `Workspace Settings -> Apps -> Create`
+
+OpenAIโs current references:
+
+- [ChatGPT Developer mode](https://developers.openai.com/api/docs/guides/developer-mode)
+- [Developer mode, and MCP apps in ChatGPT [beta]](https://help.openai.com/en/articles/12584461)
+
+Keep the claim narrow: OpenAI currently documents ChatGPT as a remote MCP client surface. This Codencer walkthrough does not imply local daemon support in ChatGPT.
+
+## Step 3 Create the ChatGPT app entry
+
+In the ChatGPT create flow, use the Codencer remote MCP endpoint for the surface you chose in Step 1.
+
+Operator inputs:
+
+- relay endpoint: `https:///mcp`
+- cloud endpoint: `https:///api/cloud/v1/mcp`
+- relay token: ``
+- cloud token: ``
+
+The checked-in example files are operator value references only:
+
+- [chatgpt-relay.mcp.json](../examples/chatgpt-relay.mcp.json)
+- [chatgpt-cloud.mcp.json](../examples/chatgpt-cloud.mcp.json)
+
+They are not direct ChatGPT imports. ChatGPT developer mode currently asks you to enter the endpoint and auth/metadata through its own UI. Use the files as copy-pasteable reference values, then verify the exact current ChatGPT auth fields in the OpenAI docs above before rollout.
+
+## Step 4 Run the first tool call from ChatGPT
+
+Start with discovery, not execution.
+
+Ask ChatGPT to call this exact tool first:
+
+- `codencer.list_instances`
+
+Tool arguments:
+
+```json
+{}
+```
+
+Recommended prompt:
+
+```text
+Use only the Codencer app for this turn. Call codencer.list_instances with {} and show me the JSON response.
+```
+
+Expected JSON shape:
+
+```json
+[
+ {
+ "instance_id": "inst-"
+ }
+]
+```
+
+Treat that as the minimum contract you should rely on from ChatGPT.
+
+Additional fields differ by surface:
+
+- relay commonly returns fields such as `connector_id`, `repo_root`, `base_url`, `online`, `status`, `last_seen_at`, and nested `instance`
+- cloud commonly returns tenant-scoped runtime instance fields such as `org_id`, `workspace_id`, `project_id`, `runtime_connector_installation_id`, `repo_root`, `status`, `enabled`, `health`, and `shared`
+
+For the walkthrough, you only need one returned object with a non-empty `instance_id`.
+
+## Step 5 Start a run
+
+After you have a target `instance_id`, ask ChatGPT to call `codencer.start_run`.
+
+Tool arguments:
+
+```json
+{
+ "instance_id": "",
+ "payload": {
+ "id": "chatgpt-smoke-001",
+ "project_id": "chatgpt-smoke"
+ }
+}
+```
+
+Recommended prompt:
+
+```text
+Use only the Codencer app. Call codencer.start_run with the selected instance_id and payload {"id":"chatgpt-smoke-001","project_id":"chatgpt-smoke"}.
+```
+
+`codencer.start_run` is the correct Codencer MCP tool name. There is no ChatGPT-specific alias in this repo.
+
+## Step 6 Submit the task
+
+Use the real Codencer `TaskSpec` shape through `codencer.submit_task`.
+
+For a repeatable compatibility smoke, prefer a simulation task so you can validate the ChatGPT-to-Codencer wiring without depending on a live adapter binary:
+
+```json
+{
+ "instance_id": "",
+ "run_id": "chatgpt-smoke-001",
+ "task": {
+ "version": "v1",
+ "goal": "Compatibility smoke only. Return the repository root in the final summary. Do not edit files.",
+ "is_simulation": true
+ }
+}
+```
+
+Recommended prompt:
+
+```text
+Use only the Codencer app. Call codencer.submit_task with the selected instance_id, run_id "chatgpt-smoke-001", and the simulation task payload. Then show me the returned step JSON.
+```
+
+Expected response shape is a step object. In current repo proof, the minimum fields you should rely on are:
+
+```json
+{
+ "id": "step-",
+ "state": "queued"
+}
+```
+
+Record the returned `id` as `step_id`.
+
+If you want a live execution attempt instead of a compatibility smoke, remove `is_simulation` and add the appropriate `adapter_profile`. That is a separate operator decision and is not required for this walkthrough.
+
+## Step 7 Wait for completion and inspect the result
+
+Use the real Codencer polling/result tools:
+
+1. `codencer.wait_step`
+2. `codencer.get_step_result`
+
+There is no separate `get_result` tool in this repo. The actual result tool name is `codencer.get_step_result`.
+
+Suggested `codencer.wait_step` arguments:
+
+```json
+{
+ "step_id": "",
+ "timeout_ms": 5000,
+ "interval_ms": 100
+}
+```
+
+Suggested `codencer.get_step_result` arguments:
+
+```json
+{
+ "step_id": ""
+}
+```
+
+Recommended prompt:
+
+```text
+Use only the Codencer app. Call codencer.wait_step for the returned step_id with timeout_ms 5000 and interval_ms 100. When it becomes terminal, call codencer.get_step_result for the same step_id and show me the JSON result.
+```
+
+Expected `wait_step` shape:
+
+```json
+{
+ "step_id": "step-",
+ "state": "completed",
+ "terminal": true,
+ "timed_out": false
+}
+```
+
+Expected `get_step_result` shape:
+
+```json
+{
+ "version": "v1",
+ "run_id": "chatgpt-smoke-001",
+ "step_id": "step-",
+ "state": "completed",
+ "summary": "..."
+}
+```
+
+If you need deeper evidence after that, keep using the Codencer MCP tools already frozen in this repo:
+
+- `codencer.get_step_validations`
+- `codencer.get_step_logs`
+- `codencer.list_step_artifacts`
+- `codencer.list_run_gates`
+
+## Verification smoke
+
+Use this as the repeatable operator smoke for the ChatGPT surface:
+
+1. In ChatGPT, select only the Codencer app for the conversation.
+2. Call `codencer.list_instances` with `{}`.
+3. Confirm the response includes at least one object with a non-empty `instance_id`.
+4. Call `codencer.start_run` with a unique run id such as `chatgpt-smoke-001`.
+5. Call `codencer.submit_task` with the simulation task shown above.
+6. Confirm the submit response returns a non-empty `step_id` in the step objectโs `id` field.
+7. Call `codencer.wait_step` until `terminal` is `true`.
+8. Call `codencer.get_step_result`.
+9. Confirm the final result includes a terminal state and a non-empty `summary`.
+
+This smoke proves the ChatGPT remote MCP wiring narrowly. It does not upgrade the Codencer status on this surface beyond `compatibility-only`.
+
+For the repo-level beta proof boundaries, keep using [Beta Testing](../../BETA_TESTING.md) and [Planner / Client Integration Notes](../integrations.md).
+
+## Known Limitations on this surface
+
+- This is a compatibility-only Codencer path, not direct ChatGPT product proof.
+- ChatGPT is documented here only as a remote MCP client. Do not infer local daemon support.
+- OpenAIโs plan-availability wording differs between the two current docs as of 2026-04-24. Verify current eligibility before rollout.
+- OpenAI owns the ChatGPT UI, tool approval UX, draft/publish flow, and any product-side restrictions.
+- The example `.mcp.json` files in this repo are value-reference templates only, not direct ChatGPT imports.
+- Cloud mode requires composed runtime mode plus a claimed runtime instance. A relay planner token does not replace a cloud token, and a cloud token does not replace relay planner auth.
+- Write-style tools in ChatGPT may require explicit confirmation. Plan for operator review when using mutating Codencer tools.
+
+## Troubleshooting
+
+If the app does not appear in ChatGPT:
+
+- Re-check the exact developer mode path in the OpenAI docs.
+- Confirm developer mode is enabled for the current user, not only at the workspace level.
+- Refresh the app entry from ChatGPT app settings after tool metadata changes.
+
+If `codencer.list_instances` returns an empty array:
+
+- relay path: confirm the connector is enrolled, running, and the instance is explicitly shared
+- cloud path: confirm the runtime connector is claimed and the instance is tenant-visible
+- confirm you targeted the relay `/mcp` or cloud `/api/cloud/v1/mcp` endpoint, not the daemon-local surface
+
+If ChatGPT can connect but tool calls fail with auth or scope errors:
+
+- relay path: verify the planner token and the relay scopes required for the tool
+- cloud path: verify the cloud token and runtime scopes, including `runtime_instances:read` for discovery
+
+If `codencer.wait_step` times out:
+
+- call `codencer.get_step_result` and `codencer.get_step_logs` directly for more evidence
+- increase `timeout_ms` for slow environments
+- if you removed `is_simulation`, confirm the target adapter is actually installed and usable on the daemon host
+
+If you accidentally pointed ChatGPT at a local-only URL:
+
+- stop and move the configuration to a remotely reachable relay or cloud host
+- do not expose the daemon-local `/mcp/call` bridge as the public ChatGPT target
diff --git a/docs/mcp/integrations/claude.md b/docs/mcp/integrations/claude.md
new file mode 100644
index 0000000..eed2905
--- /dev/null
+++ b/docs/mcp/integrations/claude.md
@@ -0,0 +1,185 @@
+# Claude Desktop and claude.ai โ Codencer Beta Walkthrough
+
+## Status
+
+Codencer treats this surface as `compatibility-only` in `v0.2.0-beta`.
+
+The only planner-side Codencer surfaces for Anthropic products are:
+
+- relay MCP at `/mcp`
+- cloud MCP at `/api/cloud/v1/mcp`
+
+Keep the distinction explicit:
+
+- Claude Desktop and `claude.ai` remote custom connectors are a planner-side flow.
+- The local `claude` adapter in Codencer is an executor-side adapter.
+- Claude Code is a separate product path with its own local/project MCP configuration model.
+
+For the frozen beta boundary, start with [Public Beta Test Tracks](../../BETA_TESTING.md) and [Planner / Client Integration Notes](../integrations.md).
+
+## Prerequisites
+
+- A working Codencer self-host relay from [Self-Host Relay / Runtime Reference](../../SELF_HOST_REFERENCE.md) or a working composed cloud control plane from [Codencer Self-Host Cloud Control Plane Guide](../../CLOUD_SELF_HOST.md).
+- A public HTTPS URL for the Codencer planner-side MCP surface:
+ - relay: `https:///mcp`
+ - cloud: `https:///api/cloud/v1/mcp`
+- A valid Codencer bearer token for that surface:
+ - relay planner token for relay `/mcp`
+ - cloud API token for cloud `/api/cloud/v1/mcp`
+- A Claude account with access to remote custom connectors in Claude Desktop or `claude.ai`.
+
+Anthropic's current help states that remote custom connectors are brokered through your Claude account and connect from Anthropic's cloud infrastructure, not from your local machine. Verify against:
+
+- [Get started with custom connectors using remote MCP](https://support.claude.com/en/articles/11175166-get-started-with-custom-connectors-using-remote-mcp)
+- [Build custom connectors via remote MCP servers](https://support.claude.com/en/articles/11503834-building-custom-connectors-via-remote-mcp-servers)
+
+That means `http://127.0.0.1`, `http://localhost`, a WSL-only loopback URL, or a daemon-local endpoint is not the correct target for this planner-side path.
+
+## Step 1 Choose the correct Codencer surface
+
+Pick one planner-side Codencer MCP surface and stay on it:
+
+- relay mode: `/mcp`
+- cloud tenancy mode: `/api/cloud/v1/mcp`
+
+Do not point Claude Desktop or `claude.ai` at:
+
+- daemon-local `/mcp/call`
+- relay `/mcp/call` as the canonical target
+- cloud `/api/cloud/v1/mcp/call` as the canonical target
+
+The compatibility-call routes remain secondary aliases. The canonical session paths are documented in [Relay MCP Tools](../relay_tools.md) and [Cloud MCP Tools](../cloud_tools.md).
+
+When the server is reachable and authorized, the tool set exposed to Anthropic clients is the repo-defined `codencer.*` namespace, such as `codencer.list_instances`, `codencer.start_run`, and `codencer.submit_task`.
+
+## Step 2 Keep the product boundary straight
+
+This walkthrough is for Claude Desktop and `claude.ai` remote custom connectors.
+
+It is not the Claude Code setup flow.
+
+Current Anthropic docs show two separate mechanisms:
+
+- Remote custom connectors for Claude Desktop and `claude.ai` are added through the product UI under `Customize > Connectors` or organization `Settings > Connectors`.
+- `claude_desktop_config.json` is the separate local-MCP mechanism for the Claude Desktop chat app.
+- If you are looking for the local-MCP file specifically, the common desktop locations are `~/Library/Application Support/Claude/claude_desktop_config.json` on macOS and `%APPDATA%\\Claude\\claude_desktop_config.json` on Windows.
+
+Current Anthropic references to verify this distinction:
+
+- [Get started with custom connectors using remote MCP](https://support.claude.com/en/articles/11175166-get-started-with-custom-connectors-using-remote-mcp)
+- [Connect Claude Code to tools via MCP](https://code.claude.com/docs/en/mcp)
+- [Use Claude Code Desktop](https://code.claude.com/docs/en/desktop)
+
+Do not publish or rely on a remote-connector-via-`claude_desktop_config.json` claim. That would be false as of 2026-04-24.
+
+## Step 3 Make the endpoint reachable from Anthropic
+
+Before you touch the Claude UI, verify the Codencer endpoint shape operationally:
+
+- self-host relay operators should use the relay path in [Self-Host Relay / Runtime Reference](../../SELF_HOST_REFERENCE.md)
+- cloud operators should use the composed runtime path in [Codencer Self-Host Cloud Control Plane Guide](../../CLOUD_SELF_HOST.md)
+
+Operational checks:
+
+- the URL is public and reachable from Anthropic's cloud
+- the endpoint is relay `/mcp` or cloud `/api/cloud/v1/mcp`
+- the local daemon is not exposed directly
+- connector sharing and runtime claiming are already correct on the Codencer side
+
+Anthropic's help also states that private-network, VPN-only, or firewall-blocked endpoints will not connect unless Anthropic's source IP ranges are allowed. Use the help article above as the product-side source of truth.
+
+## Step 4 Add the remote connector in Claude Desktop or claude.ai
+
+Use Anthropic's current remote connector flow exactly as documented by Anthropic.
+
+For Pro and Max plans, Anthropic currently documents:
+
+1. Open `Customize > Connectors`.
+2. Choose `Add custom connector`.
+3. Enter the remote MCP server URL.
+4. Optionally provide OAuth client settings if your server expects them.
+
+For Team and Enterprise plans, Anthropic currently documents:
+
+1. An owner adds the connector in organization `Settings > Connectors`.
+2. Individual members then enable it from `Customize > Connectors`.
+
+Codencer-specific values to carry into that UI:
+
+- relay URL: `https:///mcp`
+- cloud URL: `https:///api/cloud/v1/mcp`
+
+Use the checked-in JSON files only as value-reference examples:
+
+- [claude-desktop-relay.mcp.json](../examples/claude-desktop-relay.mcp.json)
+- [claude-desktop-cloud.mcp.json](../examples/claude-desktop-cloud.mcp.json)
+
+They are not direct import artifacts for the Anthropic remote connector UI.
+
+## Step 5 Reconcile the current auth reality before expecting success
+
+Codencer's current public self-host auth model on these planner-side surfaces is bearer-token based.
+
+That is visible throughout the Codencer operator docs:
+
+- relay examples use `Authorization: Bearer `
+- cloud examples use `Authorization: Bearer `
+
+Anthropic's current public help for remote custom connectors documents entering the server URL and, optionally, OAuth client settings. It does not document a raw static header field for injecting a Codencer bearer token through the remote connector UI.
+
+Treat that as a real compatibility boundary:
+
+- Codencer's remote MCP surface is repo-documented and repo-tested.
+- Anthropic's product-side remote connector UI flow is current external platform behavior.
+- The combination remains `compatibility-only` because this repo does not directly prove Anthropic's product-specific auth and setup path.
+
+If your Anthropic-side connector setup requires OAuth and your Codencer endpoint only accepts static bearer tokens, you may need an operator-owned auth front door or another compatibility layer before the product flow can succeed end to end.
+
+## Step 6 Enable the connector for a conversation
+
+Once the connector is added successfully in Anthropic's UI:
+
+1. Open a Claude Desktop or `claude.ai` conversation.
+2. Enable the connector from the conversation's connectors/tools picker.
+3. Ask Claude to list the available Codencer tools or to call `codencer.list_instances`.
+
+Expected shape:
+
+- Claude sees `codencer.*` tools from the relay or cloud MCP server
+- Claude operates against the shared instance list already authorized in Codencer
+- Claude does not gain raw shell or arbitrary filesystem access through this path
+
+## Verification smoke
+
+Run Codencer-side proof before doing any Anthropic product check:
+
+- Relay path: [Public Beta Test Tracks](../../BETA_TESTING.md) and [Planner / Client Integration Notes](../integrations.md) point to `PLANNER_TOKEN= make self-host-smoke-mcp`
+- Cloud path: use the composed cloud proof in [Codencer Self-Host Cloud Control Plane Guide](../../CLOUD_SELF_HOST.md)
+
+Then perform a narrow operator smoke in Claude Desktop or `claude.ai`:
+
+1. Enable the connector for one conversation.
+2. Ask Claude to call `codencer.list_instances`.
+3. Confirm that the returned instances match the Codencer relay or cloud visibility you already proved outside the Anthropic UI.
+
+Keep the claim narrow:
+
+- Codencer proves the relay/cloud MCP surface.
+- This repo does not prove Anthropic's product UI flow end to end.
+
+## Known Limitations on this surface
+
+- This path is `compatibility-only`, not direct product proof.
+- The local `claude` adapter is an executor-side adapter and does not convert this planner-side path into a repo-proven Anthropic integration.
+- Claude Code is separate. Its local/project MCP setup is documented elsewhere and should not be conflated with Claude Desktop or `claude.ai` remote custom connectors.
+- `claude_desktop_config.json` is a separate local-MCP mechanism and is not the configuration source for Anthropic remote custom connectors.
+- Codencer's documented public planner auth is currently static bearer-token based. Anthropic's current remote custom connector help centers on URL plus optional OAuth settings.
+- The local daemon remains unsupported as a public remote planner target.
+
+## Troubleshooting
+
+- Connector cannot reach the server: verify that the target is a public relay or cloud MCP URL, not `localhost`, not WSL loopback, and not the daemon-local MCP path.
+- Connector is added but tool calls fail with auth errors: re-check the current auth mismatch between Anthropic's documented remote custom connector setup and Codencer's current bearer-token requirement.
+- No instances appear: verify relay sharing or cloud runtime claiming first on the Codencer side before debugging the Anthropic client.
+- You only see local MCP docs: you are likely in the Claude Code or Claude Desktop local-MCP path. Return to Anthropic's remote connector help and use the product UI flow instead.
+- The doc examples look importable: they are intentionally value-reference files only. Anthropic's product UI and docs remain the source of truth for actual setup steps.
diff --git a/docs/mcp/integrations/gemini-cli.md b/docs/mcp/integrations/gemini-cli.md
new file mode 100644
index 0000000..186282f
--- /dev/null
+++ b/docs/mcp/integrations/gemini-cli.md
@@ -0,0 +1,197 @@
+# Gemini CLI โ Codencer Beta Walkthrough
+
+## Status
+
+This path is `expected-only` in the current Codencer beta contract. The Codencer relay MCP surface itself is repo-proven; Gemini CLI product setup is documented here as an operator packaging path aligned to the current official Gemini CLI references, not as a repo-executed product proof.
+
+This pass was written on `2026-04-24`. The local environment for this pass did not have `gemini` installed, so nothing in this walkthrough was locally validated from this host. Treat the commands and expected outcomes below as doc-aligned operator guidance.
+
+Current references used for this page:
+
+- Codencer beta boundaries: [../../BETA_TESTING.md](../../BETA_TESTING.md)
+- Codencer planner/client matrix: [../integrations.md](../integrations.md)
+- Codencer self-host relay/runtime flow: [../../SELF_HOST_REFERENCE.md](../../SELF_HOST_REFERENCE.md)
+- Codencer relay MCP tools: [../relay_tools.md](../relay_tools.md)
+- Gemini CLI configuration reference: [configuration.md](https://github.com/google-gemini/gemini-cli/blob/main/docs/reference/configuration.md)
+- Gemini CLI MCP server reference: [mcp-server.md](https://github.com/google-gemini/gemini-cli/blob/main/docs/tools/mcp-server.md)
+- Gemini CLI releases: [releases](https://github.com/google-gemini/gemini-cli/releases)
+
+The current stable Gemini CLI release referenced from the official release docs was `v0.39.1`, published on `2026-04-24`.
+
+## Prerequisites
+
+Before you wire Gemini CLI to Codencer, have the relay path working first:
+
+- a running Codencer relay exposing the canonical remote MCP endpoint at `/mcp`
+- a valid planner bearer token for that relay
+- a running connector with at least one explicitly shared instance
+- operator familiarity with the relay/runtime flow in [../../SELF_HOST_REFERENCE.md](../../SELF_HOST_REFERENCE.md)
+- Gemini CLI installed on the operator machine by following the current official Gemini CLI install path
+
+Keep the public planner target narrow:
+
+- use relay `/mcp`
+- do not point Gemini CLI at the local daemon `/mcp/call`
+- use a Gemini MCP server alias without underscores; the Gemini CLI configuration reference explicitly warns that underscores in MCP aliases can break policy parsing
+
+## Step 1 Configure the Codencer relay prerequisites
+
+Bring up the relay, planner token, daemon, and connector exactly as described in [../../SELF_HOST_REFERENCE.md](../../SELF_HOST_REFERENCE.md). The important operator truth for Gemini CLI is the same as for every other remote MCP client:
+
+- the relay is the public remote control plane
+- the daemon is not the public remote MCP target
+- the canonical remote MCP session path is relay `/mcp`
+- bearer-token auth is required on that relay surface
+
+If you still need to establish the relay token and connector share state, stop here and finish the self-host reference flow first.
+
+## Step 2 Add the relay MCP server to Gemini CLI
+
+Gemini CLI's current MCP docs support streamable HTTP servers through `httpUrl` plus custom request `headers`. For Codencer relay mode, use the checked-in example at [../examples/gemini-cli-relay.mcp.json](../examples/gemini-cli-relay.mcp.json).
+
+Minimal project-scoped Gemini settings shape:
+
+```json
+{
+ "mcpServers": {
+ "codencer-relay": {
+ "httpUrl": "${CODENCER_RELAY_MCP_URL:-http://127.0.0.1:8090/mcp}",
+ "headers": {
+ "Authorization": "Bearer ${CODENCER_PLANNER_TOKEN}"
+ }
+ }
+ }
+}
+```
+
+Operational notes:
+
+- `httpUrl` is the right Gemini CLI field for Codencer relay `/mcp`, because the relay's canonical MCP path is streamable HTTP, not an SSE-only endpoint
+- the bearer token belongs in the `Authorization` header
+- the example keeps the token and URL environment-driven so operators do not hard-code secrets into checked-in config
+
+If you prefer the Gemini CLI helper command instead of editing `settings.json` directly, the current official docs show the equivalent pattern:
+
+```bash
+gemini mcp add --transport http \
+ --header "Authorization: Bearer ${CODENCER_PLANNER_TOKEN}" \
+ codencer-relay \
+ "${CODENCER_RELAY_MCP_URL:-http://127.0.0.1:8090/mcp}"
+```
+
+## Step 3 Confirm Gemini CLI sees the Codencer server
+
+From the project where the Gemini settings apply, use the Gemini CLI MCP inspection command documented by Gemini:
+
+```bash
+gemini mcp list
+```
+
+Expected operator outcome on a correctly wired host:
+
+- the `codencer-relay` server appears in the list
+- transport is `http`
+- connection state is reported as connected
+
+If this step fails, fix transport, URL, auth, or relay share state before you attempt a mutating Codencer workflow.
+
+## Step 4 Run a planning example through Codencer MCP tools
+
+Use a planning-first prompt that forces Gemini CLI to inspect the environment before it mutates state. Example operator prompt:
+
+```text
+Use the Codencer relay MCP server only.
+First call codencer.list_instances and stop if there is not exactly one healthy shared instance.
+If there is one healthy shared instance, start a run with id gemini-plan-demo for project docs-demo.
+Then submit a planning-only task: "Plan the smallest documentation change needed to tighten Codencer relay troubleshooting. Do not edit files."
+Wait for the step to finish, then report the run id, step id, final step status, and a short summary of the plan.
+```
+
+Expected Codencer tool flow on the relay surface:
+
+1. `codencer.list_instances`
+2. `codencer.start_run`
+3. `codencer.submit_task`
+4. `codencer.wait_step`
+5. `codencer.get_step_result`
+
+Gemini CLI namespaces discovered MCP tools with an `mcp_` prefix plus the configured server alias. The underlying Codencer tool names are still the ones documented in [../relay_tools.md](../relay_tools.md), so the operator intent above remains grounded in `codencer.*`.
+
+## Step 5 Handle the result like an operator
+
+Treat the returned status as the control point, not the model prose alone.
+
+If the planning step succeeds:
+
+- record the returned `run_id` and `step_id`
+- inspect the plan summary from `codencer.get_step_result`
+- if you need structured proof before acting, fetch validations with `codencer.get_step_validations`
+
+If the run stops behind approval or another gate:
+
+- list gates with `codencer.list_run_gates`
+- approve or reject only after the operator reviews the returned evidence
+
+If the step fails or times out:
+
+- fetch logs with `codencer.get_step_logs`
+- confirm that the relay still sees the intended shared instance
+- retry only after fixing the underlying transport, auth, or instance-selection issue
+
+## Verification smoke
+
+This smoke is `expected-only` for this documentation pass. It was not executed locally here because `gemini` was not installed on this host on `2026-04-24`.
+
+On an operator machine that does have Gemini CLI installed, use this narrow smoke:
+
+```bash
+gemini mcp list
+```
+
+Then, from an interactive Gemini session in the configured project, ask for a non-destructive first call:
+
+```text
+Use the Codencer relay MCP server only. Call codencer.list_instances and report only the instance ids and share state.
+```
+
+Expected smoke outcome:
+
+- Gemini CLI connects to the `codencer-relay` HTTP MCP server
+- Codencer returns one or more shared instances, or an empty list that truthfully reflects relay state
+- no local daemon MCP endpoint is involved
+
+For Codencer-side proof boundaries, keep [../../BETA_TESTING.md](../../BETA_TESTING.md) and [../integrations.md](../integrations.md) as the release truth.
+
+## Known Limitations on this surface
+
+- This is an `expected-only` planner integration path in the current Codencer beta contract.
+- This page is aligned to the official Gemini CLI configuration and MCP references, but it was not locally validated here because `gemini` was not installed on this host during this pass.
+- Codencer proves the relay MCP protocol surface directly; it does not prove Gemini CLI product UX, approval behavior, or future UI wording.
+- Gemini CLI aliases discovered MCP tools by server name, so the visible tool names may be prefixed even though the underlying Codencer tools remain `codencer.*`.
+- This page stays on the relay self-host path. Cloud MCP can follow the same remote HTTP pattern, but that packaging example is out of scope for this pass.
+
+## Troubleshooting
+
+If `gemini mcp list` shows the server as disconnected:
+
+- confirm the config uses `httpUrl`, not the SSE `url` field
+- confirm the endpoint is the relay MCP path, usually `http://127.0.0.1:8090/mcp` or your deployed relay `/mcp`
+- confirm the planner token is present in `Authorization: Bearer ...`
+
+If tool calls return auth errors:
+
+- rotate or reissue the relay planner token
+- confirm the token scope still permits the intended planner operations
+- verify that environment-variable expansion resolved the expected token value in Gemini's settings
+
+If no instances appear:
+
+- confirm the connector is enrolled and running
+- confirm at least one instance is explicitly shared
+- confirm you are querying the relay, not the local daemon
+
+If Gemini appears to call the wrong tool name:
+
+- check the configured server alias
+- avoid underscores in that alias
+- remember that Gemini prefixes discovered MCP tools with the MCP server alias even though the Codencer tool inventory is still the one documented in [../relay_tools.md](../relay_tools.md)
diff --git a/docs/mcp/relay_tools.md b/docs/mcp/relay_tools.md
new file mode 100644
index 0000000..16d074e
--- /dev/null
+++ b/docs/mcp/relay_tools.md
@@ -0,0 +1,83 @@
+# Relay MCP Tools
+
+Codencer exposes the remote MCP surface from the relay, not from the local daemon.
+
+This page is about direct relay mode.
+
+If you are operating through Codencer Cloud tenancy and composed runtime mode, use `/api/cloud/v1/mcp` instead and treat [Cloud MCP Tools](cloud_tools.md) as the source of truth for that boundary.
+
+For the frozen planner/client compatibility matrix, generic client examples, and client-specific packaging notes, see [Planner / Client Integration Notes](integrations.md).
+
+## Endpoint
+
+Use the relay MCP endpoint:
+- `POST /mcp`
+- `GET /mcp`
+- `DELETE /mcp`
+
+Compatibility path:
+- `POST /mcp/call`
+
+The relay MCP server currently supports:
+- `initialize`
+- `notifications/initialized`
+- `tools/list`
+- `tools/call`
+
+## Tool List
+
+- `codencer.list_instances`
+- `codencer.get_instance`
+- `codencer.start_run`
+- `codencer.get_run`
+- `codencer.list_run_gates`
+- `codencer.submit_task`
+- `codencer.get_step`
+- `codencer.wait_step`
+- `codencer.get_step_result`
+- `codencer.list_step_artifacts`
+- `codencer.get_step_logs`
+- `codencer.get_artifact_content`
+- `codencer.get_step_validations`
+- `codencer.approve_gate`
+- `codencer.reject_gate`
+- `codencer.abort_run`
+- `codencer.retry_step`
+
+## Tool Rules
+
+- Mutating tools require explicit `instance_id`.
+- Tool calls respect the same planner auth scopes as the relay HTTP API.
+- Tool calls do not bypass connector sharing or instance routing.
+- Direct `step`, `artifact`, and `gate` lookups do not require prior observation of those ids; the relay probes only authorized online shared instances and persists successful route hints.
+- `approve_gate`, `reject_gate`, and `retry_step` require explicit `instance_id` even though the corresponding relay HTTP routes can resolve routed ids implicitly.
+- `submit_task` accepts the real Codencer `TaskSpec` shape.
+- `wait_step` is bounded and takes explicit timeout input.
+- `list_run_gates` is the canonical gate-discovery tool for a known run and instance.
+- run listing remains HTTP-only in this phase; there is no `codencer.list_runs` tool yet.
+- `get_step_logs` returns the collected step logs as explicit text or base64-safe content metadata.
+- `get_artifact_content` reads by `artifact_id` and returns text or base64-safe content metadata.
+- `abort_run` returns a successful tool result only when the daemon confirms the active step reached `cancelled`.
+- There is no raw shell tool.
+- There is no arbitrary filesystem browsing tool.
+
+## Transport Notes
+
+- `/mcp` supports session-bound Streamable HTTP `GET`, `POST`, and `DELETE`
+- the relay returns `MCP-Protocol-Version`
+- the relay can return `MCP-Session-Id` on `initialize`
+- `GET /mcp` keeps an SSE stream open for the negotiated session and emits keepalive comments
+- `POST /mcp/call` remains as a compatibility alias for simple POST callers; `/mcp` is still the canonical session path
+- the Codencer tool model remains intentionally request/response-oriented even though the transport now supports a real SSE session
+
+## Proven Compatibility
+
+- verified in repo tests against the official Go SDK `StreamableClientTransport`
+- verified for manual JSON-RPC callers using `POST /mcp` and `POST /mcp/call`
+- not overclaimed as universal client compatibility beyond the integrations directly exercised here
+
+## Local MCP Distinction
+
+The daemon-local `/mcp/call` endpoint is separate.
+
+It is useful as a local compatibility/admin bridge, but it is not the public remote MCP surface for planner integrations.
diff --git a/docs/validation_task.yaml b/docs/validation_task.yaml
index b0be5c5..e33dc11 100644
--- a/docs/validation_task.yaml
+++ b/docs/validation_task.yaml
@@ -3,7 +3,7 @@ run_id: "validation-run-01"
step_id: "bump-version-01"
# [OPTIONAL] phase_id: "phase-execution-$RUN_ID" (auto-generated if omitted)
title: "Internal Version Bump"
-goal: "Update internal/app/version.go to set Version = \"v0.1.0-alpha\""
+goal: "Update internal/app/version.go to set Version = \"v0.2.0-alpha-validation\""
adapter_profile: "codex"
constraints:
- "Do not change the package name"
diff --git a/extension/node_modules/.bin/tsc b/extension/node_modules/.bin/tsc
deleted file mode 120000
index 0863208..0000000
--- a/extension/node_modules/.bin/tsc
+++ /dev/null
@@ -1 +0,0 @@
-../typescript/bin/tsc
\ No newline at end of file
diff --git a/extension/node_modules/.bin/tsserver b/extension/node_modules/.bin/tsserver
deleted file mode 120000
index f8f8f1a..0000000
--- a/extension/node_modules/.bin/tsserver
+++ /dev/null
@@ -1 +0,0 @@
-../typescript/bin/tsserver
\ No newline at end of file
diff --git a/extension/node_modules/.package-lock.json b/extension/node_modules/.package-lock.json
deleted file mode 100644
index 83f433a..0000000
--- a/extension/node_modules/.package-lock.json
+++ /dev/null
@@ -1,36 +0,0 @@
-{
- "name": "codencer-bridge",
- "version": "0.1.0",
- "lockfileVersion": 3,
- "requires": true,
- "packages": {
- "node_modules/@types/node": {
- "version": "16.18.126",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-16.18.126.tgz",
- "integrity": "sha512-OTcgaiwfGFBKacvfwuHzzn1KLxH/er8mluiy8/uM3sGXHaRe73RrSIj01jow9t4kJEW633Ov+cOexXeiApTyAw==",
- "dev": true,
- "license": "MIT"
- },
- "node_modules/@types/vscode": {
- "version": "1.110.0",
- "resolved": "https://registry.npmjs.org/@types/vscode/-/vscode-1.110.0.tgz",
- "integrity": "sha512-AGuxUEpU4F4mfuQjxPPaQVyuOMhs+VT/xRok1jiHVBubHK7lBRvCuOMZG0LKUwxncrPorJ5qq/uil3IdZBd5lA==",
- "dev": true,
- "license": "MIT"
- },
- "node_modules/typescript": {
- "version": "5.9.3",
- "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz",
- "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
- "dev": true,
- "license": "Apache-2.0",
- "bin": {
- "tsc": "bin/tsc",
- "tsserver": "bin/tsserver"
- },
- "engines": {
- "node": ">=14.17"
- }
- }
- }
-}
diff --git a/extension/node_modules/@types/node/LICENSE b/extension/node_modules/@types/node/LICENSE
deleted file mode 100644
index 9e841e7..0000000
--- a/extension/node_modules/@types/node/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
- MIT License
-
- Copyright (c) Microsoft Corporation.
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in all
- copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE
diff --git a/extension/node_modules/@types/node/README.md b/extension/node_modules/@types/node/README.md
deleted file mode 100644
index 7113591..0000000
--- a/extension/node_modules/@types/node/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# Installation
-> `npm install --save @types/node`
-
-# Summary
-This package contains type definitions for node (https://nodejs.org/).
-
-# Details
-Files were exported from https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/node/v16.
-
-### Additional Details
- * Last updated: Tue, 04 Feb 2025 00:04:06 GMT
- * Dependencies: none
-
-# Credits
-These definitions were written by [Microsoft TypeScript](https://github.com/Microsoft), [Alberto Schiabel](https://github.com/jkomyno), [Alvis HT Tang](https://github.com/alvis), [Andrew Makarov](https://github.com/r3nya), [Benjamin Toueg](https://github.com/btoueg), [Chigozirim C.](https://github.com/smac89), [David Junger](https://github.com/touffy), [Deividas Bakanas](https://github.com/DeividasBakanas), [Eugene Y. Q. Shen](https://github.com/eyqs), [Hannes Magnusson](https://github.com/Hannes-Magnusson-CK), [Huw](https://github.com/hoo29), [Kelvin Jin](https://github.com/kjin), [Klaus Meinhardt](https://github.com/ajafff), [Lishude](https://github.com/islishude), [Mariusz Wiktorczyk](https://github.com/mwiktorczyk), [Mohsen Azimi](https://github.com/mohsen1), [Nikita Galkin](https://github.com/galkin), [Parambir Singh](https://github.com/parambirs), [Sebastian Silbermann](https://github.com/eps1lon), [Seth Westphal](https://github.com/westy92), [Simon Schick](https://github.com/SimonSchick), [Thomas den Hollander](https://github.com/ThomasdenH), [Wilco Bakker](https://github.com/WilcoBakker), [wwwy3y3](https://github.com/wwwy3y3), [Samuel Ainsworth](https://github.com/samuela), [Kyle Uehlein](https://github.com/kuehlein), [Thanik Bhongbhibhat](https://github.com/bhongy), [Marcin Kopacz](https://github.com/chyzwar), [Trivikram Kamat](https://github.com/trivikr), [Junxiao Shi](https://github.com/yoursunny), [Ilia Baryshnikov](https://github.com/qwelias), [ExE Boss](https://github.com/ExE-Boss), [Piotr Bลaลผejewicz](https://github.com/peterblazejewicz), [Anna Henningsen](https://github.com/addaleax), [Victor Perin](https://github.com/victorperin), [NodeJS Contributors](https://github.com/NodeJS), [Linus Unnebรคck](https://github.com/LinusU), and [wafuwafu13](https://github.com/wafuwafu13).
diff --git a/extension/node_modules/@types/node/assert.d.ts b/extension/node_modules/@types/node/assert.d.ts
deleted file mode 100644
index bac3cfd..0000000
--- a/extension/node_modules/@types/node/assert.d.ts
+++ /dev/null
@@ -1,986 +0,0 @@
-/**
- * The `assert` module provides a set of assertion functions for verifying
- * invariants.
- * @see [source](https://github.com/nodejs/node/blob/v16.9.0/lib/assert.js)
- */
-declare module "assert" {
- /**
- * An alias of {@link ok}.
- * @since v0.5.9
- * @param value The input that is checked for being truthy.
- */
- function assert(value: unknown, message?: string | Error): asserts value;
- namespace assert {
- /**
- * Indicates the failure of an assertion. All errors thrown by the `assert` module
- * will be instances of the `AssertionError` class.
- */
- class AssertionError extends Error {
- actual: unknown;
- expected: unknown;
- operator: string;
- generatedMessage: boolean;
- code: "ERR_ASSERTION";
- constructor(options?: {
- /** If provided, the error message is set to this value. */
- message?: string | undefined;
- /** The `actual` property on the error instance. */
- actual?: unknown | undefined;
- /** The `expected` property on the error instance. */
- expected?: unknown | undefined;
- /** The `operator` property on the error instance. */
- operator?: string | undefined;
- /** If provided, the generated stack trace omits frames before this function. */
- // eslint-disable-next-line @typescript-eslint/no-unsafe-function-type
- stackStartFn?: Function | undefined;
- });
- }
- /**
- * This feature is currently experimental and behavior might still change.
- * @since v14.2.0, v12.19.0
- * @experimental
- */
- class CallTracker {
- /**
- * The wrapper function is expected to be called exactly `exact` times. If the
- * function has not been called exactly `exact` times when `tracker.verify()` is called, then `tracker.verify()` will throw an
- * error.
- *
- * ```js
- * import assert from 'assert';
- *
- * // Creates call tracker.
- * const tracker = new assert.CallTracker();
- *
- * function func() {}
- *
- * // Returns a function that wraps func() that must be called exact times
- * // before tracker.verify().
- * const callsfunc = tracker.calls(func);
- * ```
- * @since v14.2.0, v12.19.0
- * @param [fn='A no-op function']
- * @param [exact=1]
- * @return that wraps `fn`.
- */
- calls(exact?: number): () => void;
- calls any>(fn?: Func, exact?: number): Func;
- /**
- * Example:
- *
- * ```js
- * import assert from 'node:assert';
- *
- * const tracker = new assert.CallTracker();
- *
- * function func() {}
- * const callsfunc = tracker.calls(func);
- * callsfunc(1, 2, 3);
- *
- * assert.deepStrictEqual(tracker.getCalls(callsfunc),
- * [{ thisArg: this, arguments: [1, 2, 3 ] }]);
- * ```
- *
- * @since v18.8.0, v16.18.0
- * @param fn
- * @returns An Array with the calls to a tracked function.
- */
- getCalls(fn: Function): CallTrackerCall[];
- /**
- * The arrays contains information about the expected and actual number of calls of
- * the functions that have not been called the expected number of times.
- *
- * ```js
- * import assert from 'assert';
- *
- * // Creates call tracker.
- * const tracker = new assert.CallTracker();
- *
- * function func() {}
- *
- * function foo() {}
- *
- * // Returns a function that wraps func() that must be called exact times
- * // before tracker.verify().
- * const callsfunc = tracker.calls(func, 2);
- *
- * // Returns an array containing information on callsfunc()
- * tracker.report();
- * // [
- * // {
- * // message: 'Expected the func function to be executed 2 time(s) but was
- * // executed 0 time(s).',
- * // actual: 0,
- * // expected: 2,
- * // operator: 'func',
- * // stack: stack trace
- * // }
- * // ]
- * ```
- * @since v14.2.0, v12.19.0
- * @return of objects containing information about the wrapper functions returned by `calls`.
- */
- report(): CallTrackerReportInformation[];
- /**
- * Reset calls of the call tracker.
- * If a tracked function is passed as an argument, the calls will be reset for it.
- * If no arguments are passed, all tracked functions will be reset.
- *
- * ```js
- * import assert from 'node:assert';
- *
- * const tracker = new assert.CallTracker();
- *
- * function func() {}
- * const callsfunc = tracker.calls(func);
- *
- * callsfunc();
- * // Tracker was called once
- * tracker.getCalls(callsfunc).length === 1;
- *
- * tracker.reset(callsfunc);
- * tracker.getCalls(callsfunc).length === 0;
- * ```
- *
- * @since v18.8.0, v16.18.0
- * @param fn a tracked function to reset.
- */
- reset(fn?: Function): void;
- /**
- * Iterates through the list of functions passed to `tracker.calls()` and will throw an error for functions that
- * have not been called the expected number of times.
- *
- * ```js
- * import assert from 'assert';
- *
- * // Creates call tracker.
- * const tracker = new assert.CallTracker();
- *
- * function func() {}
- *
- * // Returns a function that wraps func() that must be called exact times
- * // before tracker.verify().
- * const callsfunc = tracker.calls(func, 2);
- *
- * callsfunc();
- *
- * // Will throw an error since callsfunc() was only called once.
- * tracker.verify();
- * ```
- * @since v14.2.0, v12.19.0
- */
- verify(): void;
- }
- interface CallTrackerCall {
- thisArg: object;
- arguments: unknown[];
- }
- interface CallTrackerReportInformation {
- message: string;
- /** The actual number of times the function was called. */
- actual: number;
- /** The number of times the function was expected to be called. */
- expected: number;
- /** The name of the function that is wrapped. */
- operator: string;
- /** A stack trace of the function. */
- stack: object;
- }
- type AssertPredicate = RegExp | (new() => object) | ((thrown: unknown) => boolean) | object | Error;
- /**
- * Throws an `AssertionError` with the provided error message or a default
- * error message. If the `message` parameter is an instance of an `Error` then
- * it will be thrown instead of the `AssertionError`.
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.fail();
- * // AssertionError [ERR_ASSERTION]: Failed
- *
- * assert.fail('boom');
- * // AssertionError [ERR_ASSERTION]: boom
- *
- * assert.fail(new TypeError('need array'));
- * // TypeError: need array
- * ```
- *
- * Using `assert.fail()` with more than two arguments is possible but deprecated.
- * See below for further details.
- * @since v0.1.21
- * @param [message='Failed']
- */
- function fail(message?: string | Error): never;
- /** @deprecated since v10.0.0 - use fail([message]) or other assert functions instead. */
- function fail(
- actual: unknown,
- expected: unknown,
- message?: string | Error,
- operator?: string,
- // eslint-disable-next-line @typescript-eslint/no-unsafe-function-type
- stackStartFn?: Function,
- ): never;
- /**
- * Tests if `value` is truthy. It is equivalent to`assert.equal(!!value, true, message)`.
- *
- * If `value` is not truthy, an `AssertionError` is thrown with a `message`property set equal to the value of the `message` parameter. If the `message`parameter is `undefined`, a default
- * error message is assigned. If the `message`parameter is an instance of an `Error` then it will be thrown instead of the`AssertionError`.
- * If no arguments are passed in at all `message` will be set to the string:`` 'No value argument passed to `assert.ok()`' ``.
- *
- * Be aware that in the `repl` the error message will be different to the one
- * thrown in a file! See below for further details.
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.ok(true);
- * // OK
- * assert.ok(1);
- * // OK
- *
- * assert.ok();
- * // AssertionError: No value argument passed to `assert.ok()`
- *
- * assert.ok(false, 'it\'s false');
- * // AssertionError: it's false
- *
- * // In the repl:
- * assert.ok(typeof 123 === 'string');
- * // AssertionError: false == true
- *
- * // In a file (e.g. test.js):
- * assert.ok(typeof 123 === 'string');
- * // AssertionError: The expression evaluated to a falsy value:
- * //
- * // assert.ok(typeof 123 === 'string')
- *
- * assert.ok(false);
- * // AssertionError: The expression evaluated to a falsy value:
- * //
- * // assert.ok(false)
- *
- * assert.ok(0);
- * // AssertionError: The expression evaluated to a falsy value:
- * //
- * // assert.ok(0)
- * ```
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * // Using `assert()` works the same:
- * assert(0);
- * // AssertionError: The expression evaluated to a falsy value:
- * //
- * // assert(0)
- * ```
- * @since v0.1.21
- */
- function ok(value: unknown, message?: string | Error): asserts value;
- /**
- * **Strict assertion mode**
- *
- * An alias of {@link strictEqual}.
- *
- * **Legacy assertion mode**
- *
- * > Stability: 3 - Legacy: Use {@link strictEqual} instead.
- *
- * Tests shallow, coercive equality between the `actual` and `expected` parameters
- * using the [Abstract Equality Comparison](https://tc39.github.io/ecma262/#sec-abstract-equality-comparison) ( `==` ). `NaN` is special handled
- * and treated as being identical in case both sides are `NaN`.
- *
- * ```js
- * import assert from 'assert';
- *
- * assert.equal(1, 1);
- * // OK, 1 == 1
- * assert.equal(1, '1');
- * // OK, 1 == '1'
- * assert.equal(NaN, NaN);
- * // OK
- *
- * assert.equal(1, 2);
- * // AssertionError: 1 == 2
- * assert.equal({ a: { b: 1 } }, { a: { b: 1 } });
- * // AssertionError: { a: { b: 1 } } == { a: { b: 1 } }
- * ```
- *
- * If the values are not equal, an `AssertionError` is thrown with a `message`property set equal to the value of the `message` parameter. If the `message`parameter is undefined, a default
- * error message is assigned. If the `message`parameter is an instance of an `Error` then it will be thrown instead of the`AssertionError`.
- * @since v0.1.21
- */
- function equal(actual: unknown, expected: unknown, message?: string | Error): void;
- /**
- * **Strict assertion mode**
- *
- * An alias of {@link notStrictEqual}.
- *
- * **Legacy assertion mode**
- *
- * > Stability: 3 - Legacy: Use {@link notStrictEqual} instead.
- *
- * Tests shallow, coercive inequality with the [Abstract Equality Comparison](https://tc39.github.io/ecma262/#sec-abstract-equality-comparison)(`!=` ). `NaN` is special handled and treated as
- * being identical in case both
- * sides are `NaN`.
- *
- * ```js
- * import assert from 'assert';
- *
- * assert.notEqual(1, 2);
- * // OK
- *
- * assert.notEqual(1, 1);
- * // AssertionError: 1 != 1
- *
- * assert.notEqual(1, '1');
- * // AssertionError: 1 != '1'
- * ```
- *
- * If the values are equal, an `AssertionError` is thrown with a `message`property set equal to the value of the `message` parameter. If the `message`parameter is undefined, a default error
- * message is assigned. If the `message`parameter is an instance of an `Error` then it will be thrown instead of the`AssertionError`.
- * @since v0.1.21
- */
- function notEqual(actual: unknown, expected: unknown, message?: string | Error): void;
- /**
- * **Strict assertion mode**
- *
- * An alias of {@link deepStrictEqual}.
- *
- * **Legacy assertion mode**
- *
- * > Stability: 3 - Legacy: Use {@link deepStrictEqual} instead.
- *
- * Tests for deep equality between the `actual` and `expected` parameters. Consider
- * using {@link deepStrictEqual} instead. {@link deepEqual} can have
- * surprising results.
- *
- * _Deep equality_ means that the enumerable "own" properties of child objects
- * are also recursively evaluated by the following rules.
- * @since v0.1.21
- */
- function deepEqual(actual: unknown, expected: unknown, message?: string | Error): void;
- /**
- * **Strict assertion mode**
- *
- * An alias of {@link notDeepStrictEqual}.
- *
- * **Legacy assertion mode**
- *
- * > Stability: 3 - Legacy: Use {@link notDeepStrictEqual} instead.
- *
- * Tests for any deep inequality. Opposite of {@link deepEqual}.
- *
- * ```js
- * import assert from 'assert';
- *
- * const obj1 = {
- * a: {
- * b: 1
- * }
- * };
- * const obj2 = {
- * a: {
- * b: 2
- * }
- * };
- * const obj3 = {
- * a: {
- * b: 1
- * }
- * };
- * const obj4 = Object.create(obj1);
- *
- * assert.notDeepEqual(obj1, obj1);
- * // AssertionError: { a: { b: 1 } } notDeepEqual { a: { b: 1 } }
- *
- * assert.notDeepEqual(obj1, obj2);
- * // OK
- *
- * assert.notDeepEqual(obj1, obj3);
- * // AssertionError: { a: { b: 1 } } notDeepEqual { a: { b: 1 } }
- *
- * assert.notDeepEqual(obj1, obj4);
- * // OK
- * ```
- *
- * If the values are deeply equal, an `AssertionError` is thrown with a`message` property set equal to the value of the `message` parameter. If the`message` parameter is undefined, a default
- * error message is assigned. If the`message` parameter is an instance of an `Error` then it will be thrown
- * instead of the `AssertionError`.
- * @since v0.1.21
- */
- function notDeepEqual(actual: unknown, expected: unknown, message?: string | Error): void;
- /**
- * Tests strict equality between the `actual` and `expected` parameters as
- * determined by the [SameValue Comparison](https://tc39.github.io/ecma262/#sec-samevalue).
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.strictEqual(1, 2);
- * // AssertionError [ERR_ASSERTION]: Expected inputs to be strictly equal:
- * //
- * // 1 !== 2
- *
- * assert.strictEqual(1, 1);
- * // OK
- *
- * assert.strictEqual('Hello foobar', 'Hello World!');
- * // AssertionError [ERR_ASSERTION]: Expected inputs to be strictly equal:
- * // + actual - expected
- * //
- * // + 'Hello foobar'
- * // - 'Hello World!'
- * // ^
- *
- * const apples = 1;
- * const oranges = 2;
- * assert.strictEqual(apples, oranges, `apples ${apples} !== oranges ${oranges}`);
- * // AssertionError [ERR_ASSERTION]: apples 1 !== oranges 2
- *
- * assert.strictEqual(1, '1', new TypeError('Inputs are not identical'));
- * // TypeError: Inputs are not identical
- * ```
- *
- * If the values are not strictly equal, an `AssertionError` is thrown with a`message` property set equal to the value of the `message` parameter. If the`message` parameter is undefined, a
- * default error message is assigned. If the`message` parameter is an instance of an `Error` then it will be thrown
- * instead of the `AssertionError`.
- * @since v0.1.21
- */
- function strictEqual(actual: unknown, expected: T, message?: string | Error): asserts actual is T;
- /**
- * Tests strict inequality between the `actual` and `expected` parameters as
- * determined by the [SameValue Comparison](https://tc39.github.io/ecma262/#sec-samevalue).
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.notStrictEqual(1, 2);
- * // OK
- *
- * assert.notStrictEqual(1, 1);
- * // AssertionError [ERR_ASSERTION]: Expected "actual" to be strictly unequal to:
- * //
- * // 1
- *
- * assert.notStrictEqual(1, '1');
- * // OK
- * ```
- *
- * If the values are strictly equal, an `AssertionError` is thrown with a`message` property set equal to the value of the `message` parameter. If the`message` parameter is undefined, a
- * default error message is assigned. If the`message` parameter is an instance of an `Error` then it will be thrown
- * instead of the `AssertionError`.
- * @since v0.1.21
- */
- function notStrictEqual(actual: unknown, expected: unknown, message?: string | Error): void;
- /**
- * Tests for deep equality between the `actual` and `expected` parameters.
- * "Deep" equality means that the enumerable "own" properties of child objects
- * are recursively evaluated also by the following rules.
- * @since v1.2.0
- */
- function deepStrictEqual(actual: unknown, expected: T, message?: string | Error): asserts actual is T;
- /**
- * Tests for deep strict inequality. Opposite of {@link deepStrictEqual}.
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.notDeepStrictEqual({ a: 1 }, { a: '1' });
- * // OK
- * ```
- *
- * If the values are deeply and strictly equal, an `AssertionError` is thrown
- * with a `message` property set equal to the value of the `message` parameter. If
- * the `message` parameter is undefined, a default error message is assigned. If
- * the `message` parameter is an instance of an `Error` then it will be thrown
- * instead of the `AssertionError`.
- * @since v1.2.0
- */
- function notDeepStrictEqual(actual: unknown, expected: unknown, message?: string | Error): void;
- /**
- * Expects the function `fn` to throw an error.
- *
- * If specified, `error` can be a [`Class`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Classes),
- * [`RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions), a validation function,
- * a validation object where each property will be tested for strict deep equality,
- * or an instance of error where each property will be tested for strict deep
- * equality including the non-enumerable `message` and `name` properties. When
- * using an object, it is also possible to use a regular expression, when
- * validating against a string property. See below for examples.
- *
- * If specified, `message` will be appended to the message provided by the`AssertionError` if the `fn` call fails to throw or in case the error validation
- * fails.
- *
- * Custom validation object/error instance:
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * const err = new TypeError('Wrong value');
- * err.code = 404;
- * err.foo = 'bar';
- * err.info = {
- * nested: true,
- * baz: 'text'
- * };
- * err.reg = /abc/i;
- *
- * assert.throws(
- * () => {
- * throw err;
- * },
- * {
- * name: 'TypeError',
- * message: 'Wrong value',
- * info: {
- * nested: true,
- * baz: 'text'
- * }
- * // Only properties on the validation object will be tested for.
- * // Using nested objects requires all properties to be present. Otherwise
- * // the validation is going to fail.
- * }
- * );
- *
- * // Using regular expressions to validate error properties:
- * throws(
- * () => {
- * throw err;
- * },
- * {
- * // The `name` and `message` properties are strings and using regular
- * // expressions on those will match against the string. If they fail, an
- * // error is thrown.
- * name: /^TypeError$/,
- * message: /Wrong/,
- * foo: 'bar',
- * info: {
- * nested: true,
- * // It is not possible to use regular expressions for nested properties!
- * baz: 'text'
- * },
- * // The `reg` property contains a regular expression and only if the
- * // validation object contains an identical regular expression, it is going
- * // to pass.
- * reg: /abc/i
- * }
- * );
- *
- * // Fails due to the different `message` and `name` properties:
- * throws(
- * () => {
- * const otherErr = new Error('Not found');
- * // Copy all enumerable properties from `err` to `otherErr`.
- * for (const [key, value] of Object.entries(err)) {
- * otherErr[key] = value;
- * }
- * throw otherErr;
- * },
- * // The error's `message` and `name` properties will also be checked when using
- * // an error as validation object.
- * err
- * );
- * ```
- *
- * Validate instanceof using constructor:
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.throws(
- * () => {
- * throw new Error('Wrong value');
- * },
- * Error
- * );
- * ```
- *
- * Validate error message using [`RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions):
- *
- * Using a regular expression runs `.toString` on the error object, and will
- * therefore also include the error name.
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.throws(
- * () => {
- * throw new Error('Wrong value');
- * },
- * /^Error: Wrong value$/
- * );
- * ```
- *
- * Custom error validation:
- *
- * The function must return `true` to indicate all internal validations passed.
- * It will otherwise fail with an `AssertionError`.
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.throws(
- * () => {
- * throw new Error('Wrong value');
- * },
- * (err) => {
- * assert(err instanceof Error);
- * assert(/value/.test(err));
- * // Avoid returning anything from validation functions besides `true`.
- * // Otherwise, it's not clear what part of the validation failed. Instead,
- * // throw an error about the specific validation that failed (as done in this
- * // example) and add as much helpful debugging information to that error as
- * // possible.
- * return true;
- * },
- * 'unexpected error'
- * );
- * ```
- *
- * `error` cannot be a string. If a string is provided as the second
- * argument, then `error` is assumed to be omitted and the string will be used for`message` instead. This can lead to easy-to-miss mistakes. Using the same
- * message as the thrown error message is going to result in an`ERR_AMBIGUOUS_ARGUMENT` error. Please read the example below carefully if using
- * a string as the second argument gets considered:
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * function throwingFirst() {
- * throw new Error('First');
- * }
- *
- * function throwingSecond() {
- * throw new Error('Second');
- * }
- *
- * function notThrowing() {}
- *
- * // The second argument is a string and the input function threw an Error.
- * // The first case will not throw as it does not match for the error message
- * // thrown by the input function!
- * assert.throws(throwingFirst, 'Second');
- * // In the next example the message has no benefit over the message from the
- * // error and since it is not clear if the user intended to actually match
- * // against the error message, Node.js throws an `ERR_AMBIGUOUS_ARGUMENT` error.
- * assert.throws(throwingSecond, 'Second');
- * // TypeError [ERR_AMBIGUOUS_ARGUMENT]
- *
- * // The string is only used (as message) in case the function does not throw:
- * assert.throws(notThrowing, 'Second');
- * // AssertionError [ERR_ASSERTION]: Missing expected exception: Second
- *
- * // If it was intended to match for the error message do this instead:
- * // It does not throw because the error messages match.
- * assert.throws(throwingSecond, /Second$/);
- *
- * // If the error message does not match, an AssertionError is thrown.
- * assert.throws(throwingFirst, /Second$/);
- * // AssertionError [ERR_ASSERTION]
- * ```
- *
- * Due to the confusing error-prone notation, avoid a string as the second
- * argument.
- * @since v0.1.21
- */
- function throws(block: () => unknown, message?: string | Error): void;
- function throws(block: () => unknown, error: AssertPredicate, message?: string | Error): void;
- /**
- * Asserts that the function `fn` does not throw an error.
- *
- * Using `assert.doesNotThrow()` is actually not useful because there
- * is no benefit in catching an error and then rethrowing it. Instead, consider
- * adding a comment next to the specific code path that should not throw and keep
- * error messages as expressive as possible.
- *
- * When `assert.doesNotThrow()` is called, it will immediately call the `fn`function.
- *
- * If an error is thrown and it is the same type as that specified by the `error`parameter, then an `AssertionError` is thrown. If the error is of a
- * different type, or if the `error` parameter is undefined, the error is
- * propagated back to the caller.
- *
- * If specified, `error` can be a [`Class`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Classes),
- * [`RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions) or a validation
- * function. See {@link throws} for more details.
- *
- * The following, for instance, will throw the `TypeError` because there is no
- * matching error type in the assertion:
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.doesNotThrow(
- * () => {
- * throw new TypeError('Wrong value');
- * },
- * SyntaxError
- * );
- * ```
- *
- * However, the following will result in an `AssertionError` with the message
- * 'Got unwanted exception...':
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.doesNotThrow(
- * () => {
- * throw new TypeError('Wrong value');
- * },
- * TypeError
- * );
- * ```
- *
- * If an `AssertionError` is thrown and a value is provided for the `message`parameter, the value of `message` will be appended to the `AssertionError` message:
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.doesNotThrow(
- * () => {
- * throw new TypeError('Wrong value');
- * },
- * /Wrong value/,
- * 'Whoops'
- * );
- * // Throws: AssertionError: Got unwanted exception: Whoops
- * ```
- * @since v0.1.21
- */
- function doesNotThrow(block: () => unknown, message?: string | Error): void;
- function doesNotThrow(block: () => unknown, error: AssertPredicate, message?: string | Error): void;
- /**
- * Throws `value` if `value` is not `undefined` or `null`. This is useful when
- * testing the `error` argument in callbacks. The stack trace contains all frames
- * from the error passed to `ifError()` including the potential new frames for`ifError()` itself.
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.ifError(null);
- * // OK
- * assert.ifError(0);
- * // AssertionError [ERR_ASSERTION]: ifError got unwanted exception: 0
- * assert.ifError('error');
- * // AssertionError [ERR_ASSERTION]: ifError got unwanted exception: 'error'
- * assert.ifError(new Error());
- * // AssertionError [ERR_ASSERTION]: ifError got unwanted exception: Error
- *
- * // Create some random error frames.
- * let err;
- * (function errorFrame() {
- * err = new Error('test error');
- * })();
- *
- * (function ifErrorFrame() {
- * assert.ifError(err);
- * })();
- * // AssertionError [ERR_ASSERTION]: ifError got unwanted exception: test error
- * // at ifErrorFrame
- * // at errorFrame
- * ```
- * @since v0.1.97
- */
- function ifError(value: unknown): asserts value is null | undefined;
- /**
- * Awaits the `asyncFn` promise or, if `asyncFn` is a function, immediately
- * calls the function and awaits the returned promise to complete. It will then
- * check that the promise is rejected.
- *
- * If `asyncFn` is a function and it throws an error synchronously,`assert.rejects()` will return a rejected `Promise` with that error. If the
- * function does not return a promise, `assert.rejects()` will return a rejected`Promise` with an `ERR_INVALID_RETURN_VALUE` error. In both cases the error
- * handler is skipped.
- *
- * Besides the async nature to await the completion behaves identically to {@link throws}.
- *
- * If specified, `error` can be a [`Class`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Classes),
- * [`RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions), a validation function,
- * an object where each property will be tested for, or an instance of error where
- * each property will be tested for including the non-enumerable `message` and `name` properties.
- *
- * If specified, `message` will be the message provided by the `AssertionError` if the `asyncFn` fails to reject.
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * await assert.rejects(
- * async () => {
- * throw new TypeError('Wrong value');
- * },
- * {
- * name: 'TypeError',
- * message: 'Wrong value'
- * }
- * );
- * ```
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * await assert.rejects(
- * async () => {
- * throw new TypeError('Wrong value');
- * },
- * (err) => {
- * assert.strictEqual(err.name, 'TypeError');
- * assert.strictEqual(err.message, 'Wrong value');
- * return true;
- * }
- * );
- * ```
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.rejects(
- * Promise.reject(new Error('Wrong value')),
- * Error
- * ).then(() => {
- * // ...
- * });
- * ```
- *
- * `error` cannot be a string. If a string is provided as the second
- * argument, then `error` is assumed to be omitted and the string will be used for`message` instead. This can lead to easy-to-miss mistakes. Please read the
- * example in {@link throws} carefully if using a string as the second
- * argument gets considered.
- * @since v10.0.0
- */
- function rejects(block: (() => Promise) | Promise, message?: string | Error): Promise;
- function rejects(
- block: (() => Promise) | Promise,
- error: AssertPredicate,
- message?: string | Error,
- ): Promise;
- /**
- * Awaits the `asyncFn` promise or, if `asyncFn` is a function, immediately
- * calls the function and awaits the returned promise to complete. It will then
- * check that the promise is not rejected.
- *
- * If `asyncFn` is a function and it throws an error synchronously,`assert.doesNotReject()` will return a rejected `Promise` with that error. If
- * the function does not return a promise, `assert.doesNotReject()` will return a
- * rejected `Promise` with an `ERR_INVALID_RETURN_VALUE` error. In both cases
- * the error handler is skipped.
- *
- * Using `assert.doesNotReject()` is actually not useful because there is little
- * benefit in catching a rejection and then rejecting it again. Instead, consider
- * adding a comment next to the specific code path that should not reject and keep
- * error messages as expressive as possible.
- *
- * If specified, `error` can be a [`Class`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Classes),
- * [`RegExp`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions) or a validation
- * function. See {@link throws} for more details.
- *
- * Besides the async nature to await the completion behaves identically to {@link doesNotThrow}.
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * await assert.doesNotReject(
- * async () => {
- * throw new TypeError('Wrong value');
- * },
- * SyntaxError
- * );
- * ```
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.doesNotReject(Promise.reject(new TypeError('Wrong value')))
- * .then(() => {
- * // ...
- * });
- * ```
- * @since v10.0.0
- */
- function doesNotReject(
- block: (() => Promise) | Promise,
- message?: string | Error,
- ): Promise;
- function doesNotReject(
- block: (() => Promise) | Promise,
- error: AssertPredicate,
- message?: string | Error,
- ): Promise;
- /**
- * Expects the `string` input to match the regular expression.
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.match('I will fail', /pass/);
- * // AssertionError [ERR_ASSERTION]: The input did not match the regular ...
- *
- * assert.match(123, /pass/);
- * // AssertionError [ERR_ASSERTION]: The "string" argument must be of type string.
- *
- * assert.match('I will pass', /pass/);
- * // OK
- * ```
- *
- * If the values do not match, or if the `string` argument is of another type than`string`, an `AssertionError` is thrown with a `message` property set equal
- * to the value of the `message` parameter. If the `message` parameter is
- * undefined, a default error message is assigned. If the `message` parameter is an
- * instance of an `Error` then it will be thrown instead of the `AssertionError`.
- * @since v13.6.0, v12.16.0
- */
- function match(value: string, regExp: RegExp, message?: string | Error): void;
- /**
- * Expects the `string` input not to match the regular expression.
- *
- * ```js
- * import assert from 'assert/strict';
- *
- * assert.doesNotMatch('I will fail', /fail/);
- * // AssertionError [ERR_ASSERTION]: The input was expected to not match the ...
- *
- * assert.doesNotMatch(123, /pass/);
- * // AssertionError [ERR_ASSERTION]: The "string" argument must be of type string.
- *
- * assert.doesNotMatch('I will pass', /different/);
- * // OK
- * ```
- *
- * If the values do match, or if the `string` argument is of another type than`string`, an `AssertionError` is thrown with a `message` property set equal
- * to the value of the `message` parameter. If the `message` parameter is
- * undefined, a default error message is assigned. If the `message` parameter is an
- * instance of an `Error` then it will be thrown instead of the `AssertionError`.
- * @since v13.6.0, v12.16.0
- */
- function doesNotMatch(value: string, regExp: RegExp, message?: string | Error): void;
- const strict:
- & Omit<
- typeof assert,
- | "equal"
- | "notEqual"
- | "deepEqual"
- | "notDeepEqual"
- | "ok"
- | "strictEqual"
- | "deepStrictEqual"
- | "ifError"
- | "strict"
- >
- & {
- (value: unknown, message?: string | Error): asserts value;
- equal: typeof strictEqual;
- notEqual: typeof notStrictEqual;
- deepEqual: typeof deepStrictEqual;
- notDeepEqual: typeof notDeepStrictEqual;
- // Mapped types and assertion functions are incompatible?
- // TS2775: Assertions require every name in the call target
- // to be declared with an explicit type annotation.
- ok: typeof ok;
- strictEqual: typeof strictEqual;
- deepStrictEqual: typeof deepStrictEqual;
- ifError: typeof ifError;
- strict: typeof strict;
- };
- }
- export = assert;
-}
-declare module "node:assert" {
- import assert = require("assert");
- export = assert;
-}
diff --git a/extension/node_modules/@types/node/assert/strict.d.ts b/extension/node_modules/@types/node/assert/strict.d.ts
deleted file mode 100644
index f333913..0000000
--- a/extension/node_modules/@types/node/assert/strict.d.ts
+++ /dev/null
@@ -1,8 +0,0 @@
-declare module "assert/strict" {
- import { strict } from "node:assert";
- export = strict;
-}
-declare module "node:assert/strict" {
- import { strict } from "node:assert";
- export = strict;
-}
diff --git a/extension/node_modules/@types/node/async_hooks.d.ts b/extension/node_modules/@types/node/async_hooks.d.ts
deleted file mode 100644
index 4ed0453..0000000
--- a/extension/node_modules/@types/node/async_hooks.d.ts
+++ /dev/null
@@ -1,501 +0,0 @@
-/**
- * The `async_hooks` module provides an API to track asynchronous resources. It
- * can be accessed using:
- *
- * ```js
- * import async_hooks from 'async_hooks';
- * ```
- * @experimental
- * @see [source](https://github.com/nodejs/node/blob/v16.9.0/lib/async_hooks.js)
- */
-declare module "async_hooks" {
- /**
- * ```js
- * import { executionAsyncId } from 'async_hooks';
- *
- * console.log(executionAsyncId()); // 1 - bootstrap
- * fs.open(path, 'r', (err, fd) => {
- * console.log(executionAsyncId()); // 6 - open()
- * });
- * ```
- *
- * The ID returned from `executionAsyncId()` is related to execution timing, not
- * causality (which is covered by `triggerAsyncId()`):
- *
- * ```js
- * const server = net.createServer((conn) => {
- * // Returns the ID of the server, not of the new connection, because the
- * // callback runs in the execution scope of the server's MakeCallback().
- * async_hooks.executionAsyncId();
- *
- * }).listen(port, () => {
- * // Returns the ID of a TickObject (process.nextTick()) because all
- * // callbacks passed to .listen() are wrapped in a nextTick().
- * async_hooks.executionAsyncId();
- * });
- * ```
- *
- * Promise contexts may not get precise `executionAsyncIds` by default.
- * See the section on `promise execution tracking`.
- * @since v8.1.0
- * @return The `asyncId` of the current execution context. Useful to track when something calls.
- */
- function executionAsyncId(): number;
- /**
- * Resource objects returned by `executionAsyncResource()` are most often internal
- * Node.js handle objects with undocumented APIs. Using any functions or properties
- * on the object is likely to crash your application and should be avoided.
- *
- * Using `executionAsyncResource()` in the top-level execution context will
- * return an empty object as there is no handle or request object to use,
- * but having an object representing the top-level can be helpful.
- *
- * ```js
- * import { open } from 'fs';
- * import { executionAsyncId, executionAsyncResource } from 'async_hooks';
- *
- * console.log(executionAsyncId(), executionAsyncResource()); // 1 {}
- * open(new URL(import.meta.url), 'r', (err, fd) => {
- * console.log(executionAsyncId(), executionAsyncResource()); // 7 FSReqWrap
- * });
- * ```
- *
- * This can be used to implement continuation local storage without the
- * use of a tracking `Map` to store the metadata:
- *
- * ```js
- * import { createServer } from 'http';
- * import {
- * executionAsyncId,
- * executionAsyncResource,
- * createHook
- * } from 'async_hooks';
- * const sym = Symbol('state'); // Private symbol to avoid pollution
- *
- * createHook({
- * init(asyncId, type, triggerAsyncId, resource) {
- * const cr = executionAsyncResource();
- * if (cr) {
- * resource[sym] = cr[sym];
- * }
- * }
- * }).enable();
- *
- * const server = createServer((req, res) => {
- * executionAsyncResource()[sym] = { state: req.url };
- * setTimeout(function() {
- * res.end(JSON.stringify(executionAsyncResource()[sym]));
- * }, 100);
- * }).listen(3000);
- * ```
- * @since v13.9.0, v12.17.0
- * @return The resource representing the current execution. Useful to store data within the resource.
- */
- function executionAsyncResource(): object;
- /**
- * ```js
- * const server = net.createServer((conn) => {
- * // The resource that caused (or triggered) this callback to be called
- * // was that of the new connection. Thus the return value of triggerAsyncId()
- * // is the asyncId of "conn".
- * async_hooks.triggerAsyncId();
- *
- * }).listen(port, () => {
- * // Even though all callbacks passed to .listen() are wrapped in a nextTick()
- * // the callback itself exists because the call to the server's .listen()
- * // was made. So the return value would be the ID of the server.
- * async_hooks.triggerAsyncId();
- * });
- * ```
- *
- * Promise contexts may not get valid `triggerAsyncId`s by default. See
- * the section on `promise execution tracking`.
- * @return The ID of the resource responsible for calling the callback that is currently being executed.
- */
- function triggerAsyncId(): number;
- interface HookCallbacks {
- /**
- * Called when a class is constructed that has the possibility to emit an asynchronous event.
- * @param asyncId a unique ID for the async resource
- * @param type the type of the async resource
- * @param triggerAsyncId the unique ID of the async resource in whose execution context this async resource was created
- * @param resource reference to the resource representing the async operation, needs to be released during destroy
- */
- init?(asyncId: number, type: string, triggerAsyncId: number, resource: object): void;
- /**
- * When an asynchronous operation is initiated or completes a callback is called to notify the user.
- * The before callback is called just before said callback is executed.
- * @param asyncId the unique identifier assigned to the resource about to execute the callback.
- */
- before?(asyncId: number): void;
- /**
- * Called immediately after the callback specified in before is completed.
- * @param asyncId the unique identifier assigned to the resource which has executed the callback.
- */
- after?(asyncId: number): void;
- /**
- * Called when a promise has resolve() called. This may not be in the same execution id
- * as the promise itself.
- * @param asyncId the unique id for the promise that was resolve()d.
- */
- promiseResolve?(asyncId: number): void;
- /**
- * Called after the resource corresponding to asyncId is destroyed
- * @param asyncId a unique ID for the async resource
- */
- destroy?(asyncId: number): void;
- }
- interface AsyncHook {
- /**
- * Enable the callbacks for a given AsyncHook instance. If no callbacks are provided enabling is a noop.
- */
- enable(): this;
- /**
- * Disable the callbacks for a given AsyncHook instance from the global pool of AsyncHook callbacks to be executed. Once a hook has been disabled it will not be called again until enabled.
- */
- disable(): this;
- }
- /**
- * Registers functions to be called for different lifetime events of each async
- * operation.
- *
- * The callbacks `init()`/`before()`/`after()`/`destroy()` are called for the
- * respective asynchronous event during a resource's lifetime.
- *
- * All callbacks are optional. For example, if only resource cleanup needs to
- * be tracked, then only the `destroy` callback needs to be passed. The
- * specifics of all functions that can be passed to `callbacks` is in the `Hook Callbacks` section.
- *
- * ```js
- * import { createHook } from 'async_hooks';
- *
- * const asyncHook = createHook({
- * init(asyncId, type, triggerAsyncId, resource) { },
- * destroy(asyncId) { }
- * });
- * ```
- *
- * The callbacks will be inherited via the prototype chain:
- *
- * ```js
- * class MyAsyncCallbacks {
- * init(asyncId, type, triggerAsyncId, resource) { }
- * destroy(asyncId) {}
- * }
- *
- * class MyAddedCallbacks extends MyAsyncCallbacks {
- * before(asyncId) { }
- * after(asyncId) { }
- * }
- *
- * const asyncHook = async_hooks.createHook(new MyAddedCallbacks());
- * ```
- *
- * Because promises are asynchronous resources whose lifecycle is tracked
- * via the async hooks mechanism, the `init()`, `before()`, `after()`, and`destroy()` callbacks _must not_ be async functions that return promises.
- * @since v8.1.0
- * @param callbacks The `Hook Callbacks` to register
- * @return Instance used for disabling and enabling hooks
- */
- function createHook(callbacks: HookCallbacks): AsyncHook;
- interface AsyncResourceOptions {
- /**
- * The ID of the execution context that created this async event.
- * @default executionAsyncId()
- */
- triggerAsyncId?: number | undefined;
- /**
- * Disables automatic `emitDestroy` when the object is garbage collected.
- * This usually does not need to be set (even if `emitDestroy` is called
- * manually), unless the resource's `asyncId` is retrieved and the
- * sensitive API's `emitDestroy` is called with it.
- * @default false
- */
- requireManualDestroy?: boolean | undefined;
- }
- /**
- * The class `AsyncResource` is designed to be extended by the embedder's async
- * resources. Using this, users can easily trigger the lifetime events of their
- * own resources.
- *
- * The `init` hook will trigger when an `AsyncResource` is instantiated.
- *
- * The following is an overview of the `AsyncResource` API.
- *
- * ```js
- * import { AsyncResource, executionAsyncId } from 'async_hooks';
- *
- * // AsyncResource() is meant to be extended. Instantiating a
- * // new AsyncResource() also triggers init. If triggerAsyncId is omitted then
- * // async_hook.executionAsyncId() is used.
- * const asyncResource = new AsyncResource(
- * type, { triggerAsyncId: executionAsyncId(), requireManualDestroy: false }
- * );
- *
- * // Run a function in the execution context of the resource. This will
- * // * establish the context of the resource
- * // * trigger the AsyncHooks before callbacks
- * // * call the provided function `fn` with the supplied arguments
- * // * trigger the AsyncHooks after callbacks
- * // * restore the original execution context
- * asyncResource.runInAsyncScope(fn, thisArg, ...args);
- *
- * // Call AsyncHooks destroy callbacks.
- * asyncResource.emitDestroy();
- *
- * // Return the unique ID assigned to the AsyncResource instance.
- * asyncResource.asyncId();
- *
- * // Return the trigger ID for the AsyncResource instance.
- * asyncResource.triggerAsyncId();
- * ```
- */
- class AsyncResource {
- /**
- * AsyncResource() is meant to be extended. Instantiating a
- * new AsyncResource() also triggers init. If triggerAsyncId is omitted then
- * async_hook.executionAsyncId() is used.
- * @param type The type of async event.
- * @param triggerAsyncId The ID of the execution context that created
- * this async event (default: `executionAsyncId()`), or an
- * AsyncResourceOptions object (since v9.3.0)
- */
- constructor(type: string, triggerAsyncId?: number | AsyncResourceOptions);
- /**
- * Binds the given function to the current execution context.
- *
- * The returned function will have an `asyncResource` property referencing
- * the `AsyncResource` to which the function is bound.
- * @since v14.8.0, v12.19.0
- * @param fn The function to bind to the current execution context.
- * @param type An optional name to associate with the underlying `AsyncResource`.
- */
- static bind any, ThisArg>(
- fn: Func,
- type?: string,
- thisArg?: ThisArg,
- ): Func & {
- asyncResource: AsyncResource;
- };
- /**
- * Binds the given function to execute to this `AsyncResource`'s scope.
- *
- * The returned function will have an `asyncResource` property referencing
- * the `AsyncResource` to which the function is bound.
- * @since v14.8.0, v12.19.0
- * @param fn The function to bind to the current `AsyncResource`.
- */
- bind any>(
- fn: Func,
- ): Func & {
- asyncResource: AsyncResource;
- };
- /**
- * Call the provided function with the provided arguments in the execution context
- * of the async resource. This will establish the context, trigger the AsyncHooks
- * before callbacks, call the function, trigger the AsyncHooks after callbacks, and
- * then restore the original execution context.
- * @since v9.6.0
- * @param fn The function to call in the execution context of this async resource.
- * @param thisArg The receiver to be used for the function call.
- * @param args Optional arguments to pass to the function.
- */
- runInAsyncScope(
- fn: (this: This, ...args: any[]) => Result,
- thisArg?: This,
- ...args: any[]
- ): Result;
- /**
- * Call all `destroy` hooks. This should only ever be called once. An error will
- * be thrown if it is called more than once. This **must** be manually called. If
- * the resource is left to be collected by the GC then the `destroy` hooks will
- * never be called.
- * @return A reference to `asyncResource`.
- */
- emitDestroy(): this;
- /**
- * @return The unique `asyncId` assigned to the resource.
- */
- asyncId(): number;
- /**
- * @return The same `triggerAsyncId` that is passed to the `AsyncResource` constructor.
- */
- triggerAsyncId(): number;
- }
- /**
- * This class creates stores that stay coherent through asynchronous operations.
- *
- * While you can create your own implementation on top of the `async_hooks` module,`AsyncLocalStorage` should be preferred as it is a performant and memory safe
- * implementation that involves significant optimizations that are non-obvious to
- * implement.
- *
- * The following example uses `AsyncLocalStorage` to build a simple logger
- * that assigns IDs to incoming HTTP requests and includes them in messages
- * logged within each request.
- *
- * ```js
- * import http from 'http';
- * import { AsyncLocalStorage } from 'async_hooks';
- *
- * const asyncLocalStorage = new AsyncLocalStorage();
- *
- * function logWithId(msg) {
- * const id = asyncLocalStorage.getStore();
- * console.log(`${id !== undefined ? id : '-'}:`, msg);
- * }
- *
- * let idSeq = 0;
- * http.createServer((req, res) => {
- * asyncLocalStorage.run(idSeq++, () => {
- * logWithId('start');
- * // Imagine any chain of async operations here
- * setImmediate(() => {
- * logWithId('finish');
- * res.end();
- * });
- * });
- * }).listen(8080);
- *
- * http.get('http://localhost:8080');
- * http.get('http://localhost:8080');
- * // Prints:
- * // 0: start
- * // 1: start
- * // 0: finish
- * // 1: finish
- * ```
- *
- * Each instance of `AsyncLocalStorage` maintains an independent storage context.
- * Multiple instances can safely exist simultaneously without risk of interfering
- * with each other data.
- * @since v13.10.0, v12.17.0
- */
- class AsyncLocalStorage {
- /**
- * Disables the instance of `AsyncLocalStorage`. All subsequent calls
- * to `asyncLocalStorage.getStore()` will return `undefined` until`asyncLocalStorage.run()` or `asyncLocalStorage.enterWith()` is called again.
- *
- * When calling `asyncLocalStorage.disable()`, all current contexts linked to the
- * instance will be exited.
- *
- * Calling `asyncLocalStorage.disable()` is required before the`asyncLocalStorage` can be garbage collected. This does not apply to stores
- * provided by the `asyncLocalStorage`, as those objects are garbage collected
- * along with the corresponding async resources.
- *
- * Use this method when the `asyncLocalStorage` is not in use anymore
- * in the current process.
- * @since v13.10.0, v12.17.0
- * @experimental
- */
- disable(): void;
- /**
- * Returns the current store.
- * If called outside of an asynchronous context initialized by
- * calling `asyncLocalStorage.run()` or `asyncLocalStorage.enterWith()`, it
- * returns `undefined`.
- * @since v13.10.0, v12.17.0
- */
- getStore(): T | undefined;
- /**
- * Runs a function synchronously within a context and returns its
- * return value. The store is not accessible outside of the callback function or
- * the asynchronous operations created within the callback.
- *
- * The optional `args` are passed to the callback function.
- *
- * If the callback function throws an error, the error is thrown by `run()` too.
- * The stacktrace is not impacted by this call and the context is exited.
- *
- * Example:
- *
- * ```js
- * const store = { id: 2 };
- * try {
- * asyncLocalStorage.run(store, () => {
- * asyncLocalStorage.getStore(); // Returns the store object
- * throw new Error();
- * });
- * } catch (e) {
- * asyncLocalStorage.getStore(); // Returns undefined
- * // The error will be caught here
- * }
- * ```
- * @since v13.10.0, v12.17.0
- */
- run(store: T, callback: () => R): R;
- run(store: T, callback: (...args: TArgs) => R, ...args: TArgs): R;
- /**
- * Runs a function synchronously outside of a context and returns its
- * return value. The store is not accessible within the callback function or
- * the asynchronous operations created within the callback. Any `getStore()`call done within the callback function will always return `undefined`.
- *
- * The optional `args` are passed to the callback function.
- *
- * If the callback function throws an error, the error is thrown by `exit()` too.
- * The stacktrace is not impacted by this call and the context is re-entered.
- *
- * Example:
- *
- * ```js
- * // Within a call to run
- * try {
- * asyncLocalStorage.getStore(); // Returns the store object or value
- * asyncLocalStorage.exit(() => {
- * asyncLocalStorage.getStore(); // Returns undefined
- * throw new Error();
- * });
- * } catch (e) {
- * asyncLocalStorage.getStore(); // Returns the same object or value
- * // The error will be caught here
- * }
- * ```
- * @since v13.10.0, v12.17.0
- * @experimental
- */
- exit(callback: (...args: TArgs) => R, ...args: TArgs): R;
- /**
- * Transitions into the context for the remainder of the current
- * synchronous execution and then persists the store through any following
- * asynchronous calls.
- *
- * Example:
- *
- * ```js
- * const store = { id: 1 };
- * // Replaces previous store with the given store object
- * asyncLocalStorage.enterWith(store);
- * asyncLocalStorage.getStore(); // Returns the store object
- * someAsyncOperation(() => {
- * asyncLocalStorage.getStore(); // Returns the same object
- * });
- * ```
- *
- * This transition will continue for the _entire_ synchronous execution.
- * This means that if, for example, the context is entered within an event
- * handler subsequent event handlers will also run within that context unless
- * specifically bound to another context with an `AsyncResource`. That is why`run()` should be preferred over `enterWith()` unless there are strong reasons
- * to use the latter method.
- *
- * ```js
- * const store = { id: 1 };
- *
- * emitter.on('my-event', () => {
- * asyncLocalStorage.enterWith(store);
- * });
- * emitter.on('my-event', () => {
- * asyncLocalStorage.getStore(); // Returns the same object
- * });
- *
- * asyncLocalStorage.getStore(); // Returns undefined
- * emitter.emit('my-event');
- * asyncLocalStorage.getStore(); // Returns the same object
- * ```
- * @since v13.11.0, v12.17.0
- * @experimental
- */
- enterWith(store: T): void;
- }
-}
-declare module "node:async_hooks" {
- export * from "async_hooks";
-}
diff --git a/extension/node_modules/@types/node/buffer.buffer.d.ts b/extension/node_modules/@types/node/buffer.buffer.d.ts
deleted file mode 100644
index adeb227..0000000
--- a/extension/node_modules/@types/node/buffer.buffer.d.ts
+++ /dev/null
@@ -1,365 +0,0 @@
-declare module "buffer" {
- global {
- interface BufferConstructor {
- // see buffer.d.ts for implementation shared with all TypeScript versions
-
- /**
- * Allocates a new buffer containing the given {str}.
- *
- * @param str String to store in buffer.
- * @param encoding encoding to use, optional. Default is 'utf8'
- * @deprecated since v10.0.0 - Use `Buffer.from(string[, encoding])` instead.
- */
- new(str: string, encoding?: BufferEncoding): Buffer;
- /**
- * Allocates a new buffer of {size} octets.
- *
- * @param size count of octets to allocate.
- * @deprecated since v10.0.0 - Use `Buffer.alloc()` instead (also see `Buffer.allocUnsafe()`).
- */
- new(size: number): Buffer;
- /**
- * Allocates a new buffer containing the given {array} of octets.
- *
- * @param array The octets to store.
- * @deprecated since v10.0.0 - Use `Buffer.from(array)` instead.
- */
- new(array: Uint8Array): Buffer;
- /**
- * Produces a Buffer backed by the same allocated memory as
- * the given {ArrayBuffer}/{SharedArrayBuffer}.
- *
- * @param arrayBuffer The ArrayBuffer with which to share memory.
- * @deprecated since v10.0.0 - Use `Buffer.from(arrayBuffer[, byteOffset[, length]])` instead.
- */
- new(arrayBuffer: TArrayBuffer): Buffer;
- /**
- * Allocates a new buffer containing the given {array} of octets.
- *
- * @param array The octets to store.
- * @deprecated since v10.0.0 - Use `Buffer.from(array)` instead.
- */
- new(array: readonly any[]): Buffer;
- /**
- * Copies the passed {buffer} data onto a new {Buffer} instance.
- *
- * @param buffer The buffer to copy.
- * @deprecated since v10.0.0 - Use `Buffer.from(buffer)` instead.
- */
- new(buffer: Buffer): Buffer;
- /**
- * Allocates a new `Buffer` using an `array` of bytes in the range `0` โ `255`.
- * Array entries outside that range will be truncated to fit into it.
- *
- * ```js
- * import { Buffer } from 'node:buffer';
- *
- * // Creates a new Buffer containing the UTF-8 bytes of the string 'buffer'.
- * const buf = Buffer.from([0x62, 0x75, 0x66, 0x66, 0x65, 0x72]);
- * ```
- *
- * A `TypeError` will be thrown if `array` is not an `Array` or another type
- * appropriate for `Buffer.from()` variants.
- *
- * `Buffer.from(array)` and `Buffer.from(string)` may also use the internal`Buffer` pool like `Buffer.allocUnsafe()` does.
- * @since v5.10.0
- */
- from(
- arrayBuffer: WithImplicitCoercion,
- byteOffset?: number,
- length?: number,
- ): Buffer;
- /**
- * Creates a new Buffer using the passed {data}
- * @param data data to create a new Buffer
- */
- from(data: Uint8Array | readonly number[]): Buffer;
- from(data: WithImplicitCoercion): Buffer;
- /**
- * Creates a new Buffer containing the given JavaScript string {str}.
- * If provided, the {encoding} parameter identifies the character encoding.
- * If not provided, {encoding} defaults to 'utf8'.
- */
- from(
- str:
- | WithImplicitCoercion