diff --git a/docs/.vitepress/config.ts b/docs/.vitepress/config.ts index 499fd4c6..11ae3afb 100644 --- a/docs/.vitepress/config.ts +++ b/docs/.vitepress/config.ts @@ -76,6 +76,14 @@ export default defineConfig({ items: [ { text: 'Positioning', link: '/reference/positioning' }, { text: 'Architecture', link: '/reference/architecture' }, + { text: 'Local Glossary', link: '/reference/glossary/' }, + { text: 'Chat Data Flow', link: '/reference/chat-data-flow' }, + { text: 'Actor Lifecycle Surfaces', link: '/reference/actor-group-lifecycle-surfaces' }, + { text: 'Actor Terminal Input Control', link: '/reference/actor-terminal-input-control' }, + { text: 'Actor Native Resume Bindings', link: '/reference/actor-native-resume-bindings' }, + { text: 'Codex Session Resume', link: '/reference/codex-session-resume' }, + { text: 'Runtime Session Recovery Strategy', link: '/reference/runtime-session-recovery-strategy' }, + { text: 'Operations Board and Review', link: '/reference/operations-board-and-review' }, { text: 'Features', link: '/reference/features' }, { text: 'CLI', link: '/reference/cli' } ] diff --git a/docs/guide/best-practices.md b/docs/guide/best-practices.md index 24d0724c..ae7ad608 100644 --- a/docs/guide/best-practices.md +++ b/docs/guide/best-practices.md @@ -2,6 +2,16 @@ Tips for getting the most out of CCCC. +## Terminology Alignment + +This document follows the local glossary: + +- `group` is the main collaboration unit +- `actor` is the live scheduled participant +- `profile` is reusable actor runtime configuration, not the live actor itself +- `attach` sets the group's `authoritative_workspace` +- `status` is an evidence-bound operator-facing surface + ## Setting Up for Success ### Write a Good PROJECT.md @@ -116,15 +126,22 @@ Agents read the markdown and follow the rules semantically. There's no special p Use recommended flags for autonomous operation: ```bash +# Reusable profile-backed setup +cccc actor profile upsert --id impl-shared --name "Implementer Shared" --runtime claude +cccc actor add impl --profile-id impl-shared # Claude Code -cccc actor add impl --runtime claude # Uses: claude --dangerously-skip-permissions +# Reusable profile-backed setup +cccc actor profile upsert --id review-shared --name "Reviewer Shared" --runtime codex +cccc actor add review --profile-id review-shared # Codex -cccc actor add review --runtime codex # Uses: codex --dangerously-bypass-approvals-and-sandbox --search ``` +This keeps reusable runtime intent in `profile` records while leaving the live +scheduled participant as the `actor`. + ## Effective Communication ### Be Specific @@ -254,3 +271,17 @@ If an agent keeps repeating: - Check commits before pushing - Use code review agents - Set up CI/CD guardrails + +## Related Glossary + +- [group](/reference/glossary/group) +- [actor](/reference/glossary/actor) +- [profile](/reference/glossary/profile) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) +- [status](/reference/glossary/status) + +## Change Log + +- `2026-03-23`: Added profile-backed runtime setup examples so best-practice guidance matches the current CLI actor/profile split. +- `2026-03-23`: Added glossary alignment so best-practice guidance distinguishes live actor behavior from reusable profile configuration and keeps attach/status semantics explicit. diff --git a/docs/guide/faq.md b/docs/guide/faq.md index d4205b2c..2142d752 100644 --- a/docs/guide/faq.md +++ b/docs/guide/faq.md @@ -2,6 +2,19 @@ Frequently asked questions about CCCC. +## Terminology Alignment + +This document follows the local glossary: + +- `group` is the concise repo-local term, while `working group` remains + compatibility wording +- `attach` defines a group's `authoritative_workspace` +- `scope` is older wording that may still appear in historical answers +- `profile` means reusable actor runtime configuration and launch intent +- `status` is an evidence-bound surface, not proof that every deeper capability + is available +- `resume` is layered and does not only mean native runtime session continuity + ## Installation & Setup ### How do I install CCCC? @@ -78,6 +91,20 @@ This checks Python version, available runtimes, and daemon status. cccc actor add my-agent --runtime custom --command "my-custom-cli" ``` +### What is the difference between an actor and a profile? + +- An `actor` is the live scheduled participant inside a group. +- A `profile` is reusable runtime configuration that an actor may link to. +- Linking a profile does not by itself prove that the actor is currently + running, resumed, or attached to any particular live session. + +Typical profile-backed path: + +```bash +cccc actor profile upsert --id shared-dev --name "Shared Dev" --runtime claude +cccc actor add dev --profile-id shared-dev +``` + ### Agent won't start? 1. Check the terminal tab for error messages @@ -198,14 +225,16 @@ cccc doctor # Check configuration ### What is a Working Group? -A working group is like an IM group chat with execution capabilities. It includes: +A `group` is the core collaboration unit in CCCC. Older FAQ wording may still +say `working group` for compatibility. It includes: - An append-only ledger (message history) - One or more actors (agents) -- Optional scopes (project directories) +- An attached project path that acts as the group's authoritative workspace ### What is the Ledger? -The ledger is an append-only event stream that stores all messages, state changes, and decisions. It's the single source of truth for a working group. +The ledger is an append-only event stream that stores all messages, state +changes, and decisions. It's the single source of truth for a group. ### What is MCP? @@ -213,4 +242,23 @@ MCP (Model Context Protocol) is how agents interact with CCCC. It exposes a rich ### What is a Scope? -A scope is a project directory attached to a working group. Agents work within scopes, and events are attributed to scopes. +`Scope` is older compatibility wording for an attached project directory. In +current glossary terms, `attach` sets the group's +`authoritative_workspace`, while actor runtimes may later work in the same path +or a different `execution_workspace` depending on explicit policy. + +## Related Glossary + +- [group](/reference/glossary/group) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) +- [execution_workspace](/reference/glossary/execution_workspace) +- [profile](/reference/glossary/profile) +- [resume](/reference/glossary/resume) +- [status](/reference/glossary/status) + +## Change Log + +- `2026-03-21`: Added local glossary alignment so FAQ answers stop treating `working group`, `scope`, `status`, and `resume` as self-evident legacy shorthand. +- `2026-03-23`: Added a concrete profile-management command path so the actor-versus-profile FAQ points to the current CLI surface. +- `2026-03-23`: Added `profile` alignment and an explicit actor-versus-profile FAQ so reusable runtime configuration is not confused with live actor state. diff --git a/docs/guide/getting-started/cli.md b/docs/guide/getting-started/cli.md index c78cdd8d..3f5352e4 100644 --- a/docs/guide/getting-started/cli.md +++ b/docs/guide/getting-started/cli.md @@ -16,6 +16,10 @@ cccc attach . This binds the current directory as a "scope" and creates a working group. +In current glossary wording, `attach` sets the group's +`authoritative_workspace`. Older shorthand like `scope` remains compatibility +wording in some guides and CLI surfaces. + ## Step 3: Configure MCP for Your Runtime ```bash @@ -32,6 +36,19 @@ cccc actor add assistant --runtime claude The first enabled actor automatically becomes the "foreman" (coordinator). +At this step you are creating a live `actor`. Reusable `profile` support is a +separate concept used when you want runtime defaults or launch intent to be +reused across actors or later sessions. + +If you want that reusable layer first, you can now create it directly from the +CLI before attaching it to a live actor: + +```bash +cccc actor profile upsert --id shared-codex --name "Shared Codex" --runtime codex +cccc actor profile list +cccc actor add assistant --profile-id shared-codex +``` + ## Step 5: Start the Agent ```bash @@ -163,7 +180,8 @@ Access at http://127.0.0.1:8848/ cd ~/projects/my-app cccc attach . cccc setup --runtime claude -cccc actor add dev --runtime claude +cccc actor profile upsert --id dev-shared --name "Dev Shared" --runtime claude +cccc actor add dev --profile-id dev-shared # Work cccc group start @@ -219,3 +237,19 @@ cccc attach . - [Workflows](/guide/workflows) - Learn collaboration patterns - [CLI Reference](/reference/cli) - Complete command reference - [IM Bridge](/guide/im-bridge/) - Set up mobile access +- [Local Glossary](/reference/glossary/) - Canonical local term meanings + +## Related Glossary + +- [group](/reference/glossary/group) +- [actor](/reference/glossary/actor) +- [profile](/reference/glossary/profile) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) + +## Change Log + +- `2026-03-21`: Added local glossary alignment so quick-start wording around `attach` points readers to `authoritative_workspace` instead of leaving `scope` ambiguous. +- `2026-03-23`: Added a concrete `actor add --profile-id ...` quick-start path so profile-backed live actor setup is shown, not just described. +- `2026-03-23`: Added direct `cccc actor profile ...` quick-start examples so reusable runtime configuration is no longer only described abstractly. +- `2026-03-23`: Added actor-versus-profile quick-start wording so reusable runtime configuration is not confused with live actor creation. diff --git a/docs/guide/getting-started/index.md b/docs/guide/getting-started/index.md index 4f0c414d..55c169e6 100644 --- a/docs/guide/getting-started/index.md +++ b/docs/guide/getting-started/index.md @@ -2,6 +2,16 @@ Get CCCC running in 10 minutes. +## Terminology Alignment + +This page follows the local glossary: + +- `group` is the main collaboration unit +- `actor` is the live scheduled participant +- `profile` is reusable actor runtime configuration and launch intent +- `attach` sets the group's `authoritative_workspace` +- older `scope` wording may still appear as compatibility shorthand + ## Choose Your Approach CCCC offers two ways to get started: @@ -100,6 +110,17 @@ cccc doctor This checks Python version, available runtimes, and system configuration. +If you already know you want reusable runtime intent instead of one-off actor +creation, the current CLI supports a profile-backed start path: + +```bash +cccc actor profile upsert --id starter-shared --name "Starter Shared" --runtime claude +cccc actor add starter --profile-id starter-shared +``` + +That is the profile-backed setup path. The live scheduled participant is still +the actor; the profile stores reusable runtime defaults. + ## Next Steps - [Web UI Quick Start](./web) - Get started with the visual interface @@ -109,3 +130,16 @@ This checks Python version, available runtimes, and system configuration. - [Use Cases](/guide/use-cases) - Learn high-ROI real-world patterns - [Operations Runbook](/guide/operations) - Run CCCC with operator-grade reliability - [Positioning](/reference/positioning) - Decide where CCCC should sit in your stack + +## Related Glossary + +- [group](/reference/glossary/group) +- [actor](/reference/glossary/actor) +- [profile](/reference/glossary/profile) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) + +## Change Log + +- `2026-03-23`: Added a profile-backed setup pointer so the landing page reflects the current actor/profile product split. +- `2026-03-23`: Added glossary alignment so the main getting-started entry uses repo-local `group` / `actor` / `profile` / `attach` meanings instead of leaving old shorthand implicit. diff --git a/docs/guide/getting-started/web.md b/docs/guide/getting-started/web.md index 72ec45b8..38fc9d10 100644 --- a/docs/guide/getting-started/web.md +++ b/docs/guide/getting-started/web.md @@ -2,6 +2,15 @@ Get started with CCCC using the Web interface. +## Terminology Alignment + +This document follows the local glossary: + +- `attach` sets the group's `authoritative_workspace` +- `actor` is the live participant you create and start in Web UI +- `profile` is reusable runtime configuration, separate from the live actor +- `status` is an evidence-bound operator-facing surface + ## Step 1: Start CCCC Open a terminal and run: @@ -35,6 +44,10 @@ cccc attach . 3. Refresh the Web UI to see your new group +In current glossary wording, `attach` sets the group's +`authoritative_workspace`. That does not by itself create per-actor isolated +execution workspaces. + ## Step 4: Add Your First Agent 1. Click **Add Actor** in the header @@ -44,6 +57,18 @@ cccc attach . - **Runner**: PTY (terminal) or Headless 3. Click **Create** +This step creates a live `actor`. In current product terms, any future +profile-backed Web flow should still be read as linking reusable configuration +to the actor, not replacing actor identity. + +Today, if you want a profile-backed setup before returning to the Web UI, you +can do it from another terminal: + +```bash +cccc actor profile upsert --id assistant-shared --name "Assistant Shared" --runtime claude +cccc actor add assistant --profile-id assistant-shared +``` + ## Step 5: Configure MCP (First Time Only) If this is your first time using CCCC with this runtime: @@ -155,3 +180,18 @@ Run `cccc attach .` in your project directory, then refresh the Web UI. - [Workflows](/guide/workflows) - Learn collaboration patterns - [Web UI Guide](/guide/web-ui) - Detailed UI documentation - [IM Bridge](/guide/im-bridge/) - Set up mobile access +- [Local Glossary](/reference/glossary/) - Canonical local term meanings + +## Related Glossary + +- [actor](/reference/glossary/actor) +- [profile](/reference/glossary/profile) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) +- [status](/reference/glossary/status) + +## Change Log + +- `2026-03-21`: Added local glossary alignment so Web quick-start wording distinguishes group attachment from future execution-workspace policy. +- `2026-03-23`: Added a profile-backed setup note so Web quick-start reflects the current CLI actor/profile surface. +- `2026-03-23`: Added actor-versus-profile quick-start wording so Web setup does not collapse reusable runtime configuration into live actor creation. diff --git a/docs/guide/group-space-notebooklm.md b/docs/guide/group-space-notebooklm.md index 2879f038..41b26143 100644 --- a/docs/guide/group-space-notebooklm.md +++ b/docs/guide/group-space-notebooklm.md @@ -2,6 +2,15 @@ This guide covers the user-facing Web flow for connecting NotebookLM and choosing which notebooks CCCC should use. +## Terminology Alignment + +This document follows the local glossary: + +- `group` remains the lifecycle owner of notebook binding state +- `attach` sets the group's `authoritative_workspace` +- `status` on this page should be read as a lightweight operator-facing snapshot +- provider binding state does not replace group/runtime truth + The Web UI is intentionally minimal: 1. connect Google @@ -127,3 +136,14 @@ Relevant metadata files remain: - `/space/artifacts/notebooklm/...` These implementation details matter for agent/developer workflows, but they are not part of the normal user-facing binding flow. + +## Related Glossary + +- [group](/reference/glossary/group) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) +- [status](/reference/glossary/status) + +## Change Log + +- `2026-03-23`: Added glossary alignment so NotebookLM binding guidance keeps group ownership, attach authority, and lightweight status semantics explicit. diff --git a/docs/guide/index.md b/docs/guide/index.md index c6134877..55fac810 100644 --- a/docs/guide/index.md +++ b/docs/guide/index.md @@ -8,6 +8,13 @@ Use this section based on your role and goal. - [Web UI Quick Start](/guide/getting-started/web) if you prefer visual control - [CLI Quick Start](/guide/getting-started/cli) if you prefer terminal-first workflow +Current recommended profile-backed start path: + +```bash +cccc actor profile upsert --id starter-shared --name "Starter Shared" --runtime claude +cccc actor add starter --profile-id starter-shared +``` + ## If You Need Practical, High-ROI Patterns - [Use Cases](/guide/use-cases) for production-like collaboration scenarios @@ -28,7 +35,21 @@ Use this section based on your role and goal. ## Core Concepts (Short Version) - **Working Group**: the collaboration unit with durable history -- **Actor**: an agent runtime session (foreman/peer) -- **Scope**: a directory context attached to a group +- **Actor**: the live scheduled participant (foreman/peer) +- **Profile**: reusable actor runtime configuration and launch intent +- **Scope**: older compatibility wording for a directory context attached to a group - **Ledger**: append-only collaboration event stream - **Daemon**: single writer and source of operational truth + +## Related Glossary + +- [group](/reference/glossary/group) +- [actor](/reference/glossary/actor) +- [profile](/reference/glossary/profile) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) + +## Change Log + +- `2026-03-23`: Added a profile-backed start path to the guide landing page so high-level onboarding points to the current actor/profile setup model. +- `2026-03-23`: Added `profile` to the short core-concepts map so the main guide landing page no longer collapses reusable runtime configuration into actor-only wording. diff --git a/docs/guide/operations.md b/docs/guide/operations.md index 0a2ebca2..2765a122 100644 --- a/docs/guide/operations.md +++ b/docs/guide/operations.md @@ -2,6 +2,21 @@ This page is for operators who need reliable day-to-day CCCC execution. +## Terminology Alignment + +This document follows the local glossary: + +- `registry` is a secondary bookkeeping and lookup surface, not the truth root +- `group` is the main runtime ownership unit +- `profile` is reusable actor runtime configuration, not the live actor itself +- `status` is an evidence-bound operator surface +- `attach` and `authoritative_workspace` remain distinct from lower-level + runtime execution details + +Operator shortcut: +- reusable launch intent can now be inspected and changed with + `cccc actor profile ...` before or between live actor runs + ## 1) Runtime Topology Default runtime home: @@ -14,6 +29,10 @@ Key paths: - `~/.cccc/groups//group.yaml` - `~/.cccc/groups//ledger.jsonl` +Notes: +- `registry.json` is for indexing and lookup. +- Group state and runtime evidence remain the stronger truth sources. + ## 2) Startup and Health Checks ### Start @@ -203,3 +222,19 @@ Optional throughput tuning: export CCCC_SPACE_PROVIDER_MAX_INFLIGHT=1 # safer export CCCC_SPACE_PROVIDER_MAX_INFLIGHT=4 # faster ``` + +## Related Glossary + +- [group](/reference/glossary/group) +- [profile](/reference/glossary/profile) +- [registry](/reference/glossary/registry) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) +- [status](/reference/glossary/status) + +## Change Log + +- `2026-03-21`: Added local glossary alignment so the operator runbook keeps registry, group ownership, status surfaces, and workspace authority clearly separated. +- `2026-03-23`: Added `cccc actor profile ...` operator note so reusable launch + intent management is documented alongside runtime operations. +- `2026-03-23`: Added `profile` alignment so operator lifecycle and recovery actions are not confused with reusable runtime configuration semantics. diff --git a/docs/guide/use-cases.md b/docs/guide/use-cases.md index df801a0b..505cbbf3 100644 --- a/docs/guide/use-cases.md +++ b/docs/guide/use-cases.md @@ -2,6 +2,16 @@ This page focuses on high-ROI, real-world CCCC workflows. +## Terminology Alignment + +This document follows the local glossary: + +- `group` is the main collaboration unit +- `actor` is the live scheduled participant used in each scenario +- `profile` is reusable runtime configuration and is separate from the actor +- `attach` sets the group's `authoritative_workspace` +- `status` should be read as operator-facing evidence + ## How to Read This Page Each scenario includes: @@ -24,8 +34,10 @@ cd /path/to/repo cccc attach . cccc setup --runtime claude cccc setup --runtime codex -cccc actor add builder --runtime claude -cccc actor add reviewer --runtime codex +cccc actor profile upsert --id builder-shared --name "Builder Shared" --runtime claude +cccc actor profile upsert --id reviewer-shared --name "Reviewer Shared" --runtime codex +cccc actor add builder --profile-id builder-shared +cccc actor add reviewer --profile-id reviewer-shared cccc group start ``` @@ -58,10 +70,14 @@ Split one medium project into parallel tracks while keeping alignment. ### Minimal Setup ```bash -cccc actor add foreman --runtime claude -cccc actor add frontend --runtime codex -cccc actor add backend --runtime gemini -cccc actor add qa --runtime kimi +cccc actor profile upsert --id foreman-shared --name "Foreman Shared" --runtime claude +cccc actor profile upsert --id frontend-shared --name "Frontend Shared" --runtime codex +cccc actor profile upsert --id backend-shared --name "Backend Shared" --runtime gemini +cccc actor profile upsert --id qa-shared --name "QA Shared" --runtime kimi +cccc actor add foreman --profile-id foreman-shared +cccc actor add frontend --profile-id frontend-shared +cccc actor add backend --profile-id backend-shared +cccc actor add qa --profile-id qa-shared cccc group start ``` @@ -155,3 +171,17 @@ Run comparable multi-agent sessions with stable logging and replayability. - `docs/guide/operations.md` - `docs/reference/positioning.md` - `docs/reference/features.md` + +## Related Glossary + +- [group](/reference/glossary/group) +- [actor](/reference/glossary/actor) +- [profile](/reference/glossary/profile) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) +- [status](/reference/glossary/status) + +## Change Log + +- `2026-03-23`: Added concrete `profile` creation plus `actor add --profile-id ...` examples so scenario setup reflects the current CLI surface. +- `2026-03-23`: Added local glossary alignment so scenario docs stop implying that actor setup, reusable profile config, attach authority, and status evidence are the same layer. diff --git a/docs/guide/web-ui.md b/docs/guide/web-ui.md index 8cc6bfef..37efa501 100644 --- a/docs/guide/web-ui.md +++ b/docs/guide/web-ui.md @@ -2,6 +2,16 @@ The CCCC Web UI is a mobile-first control plane for managing your AI agents. +## Terminology Alignment + +This document follows the local glossary: + +- `actor` is the live participant shown in the UI +- `profile` is reusable runtime configuration and launch intent +- `attach` sets the group's `authoritative_workspace` +- `status` is an evidence-bound surface +- `resume` is layered and is not automatically proved by one visible status line + ## Accessing the Web UI After starting CCCC: @@ -29,6 +39,22 @@ The Web UI has these main areas: 1. Click the **+** button in the sidebar 2. Or use CLI: `cccc attach /path/to/project` +Planned next step for template-driven setup: + +This section describes intended behavior, not a shipped Web UI screen yet. + +- after choosing a project path and template file, Web UI should expose a + `Resume Recovery` section +- the user can add actor-specific bindings of: + - actor + - `session_id` + - enabled toggle +- those bindings should stay outside the portable template file itself +- when enabled for a `codex` PTY actor, launch should prefer native + `codex resume ` +- disabling resume later in actor settings should not erase the stored + `session_id` + ### Switching Groups Click on a group in the sidebar to switch. @@ -52,12 +78,24 @@ Click on a group in the sidebar to switch. 3. Set actor ID and options 4. Click **Create** +If the UI later exposes reusable profile selection, that should be read as +"link this live actor to a reusable runtime profile", not "replace actor +identity with profile identity". + ### Starting/Stopping Agents - Click the **Play** button to start an agent - Click the **Stop** button to stop - Use **Restart** to clear context and restart +For native runtime resume, the intended behavior is: + +- launch may resume the runtime's own session when the actor is configured for + native resume +- CCCC's own preamble/help/system prompt injection flow should remain unchanged + +See also: `Actor Native Resume Bindings` and `Codex Session Resume`. + ### Viewing Agent Terminal Click on an agent's tab to see its terminal output. @@ -173,3 +211,16 @@ Then authenticate once to bootstrap the session cookie: - Open `http://YOUR_HOST:8848/?token=` (or `.../ui/?token=...`) using an Access Token created in Web Access. After that, you can use the Web UI normally without `?token=...`. + +## Related Glossary + +- [actor](/reference/glossary/actor) +- [profile](/reference/glossary/profile) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) +- [resume](/reference/glossary/resume) +- [status](/reference/glossary/status) + +## Change Log + +- `2026-03-23`: Added local glossary alignment so Web UI guidance keeps actor identity, reusable profile config, attach authority, and evidence-bound resume/status semantics separate. diff --git a/docs/guide/workflows.md b/docs/guide/workflows.md index 3db1c74f..4fd3873e 100644 --- a/docs/guide/workflows.md +++ b/docs/guide/workflows.md @@ -2,6 +2,18 @@ Common patterns for using CCCC to coordinate AI agents. +## Terminology Alignment + +This document follows the local glossary: + +- `attach` sets the group's `authoritative_workspace` +- `shared` is the default lightweight execution interpretation unless an + explicit isolated policy exists +- `group` is the main collaboration unit +- `profile` is reusable actor runtime configuration, not the live actor itself +- `status` updates should be read as operator-facing evidence, not as proof of + every deeper runtime capability + ## Solo Development with One Agent The simplest setup: one agent assisting you with a project. @@ -11,10 +23,14 @@ The simplest setup: one agent assisting you with a project. ```bash cd /your/project cccc attach . -cccc actor add assistant --runtime claude +cccc actor profile upsert --id assistant-shared --name "Assistant Shared" --runtime claude +cccc actor add assistant --profile-id assistant-shared cccc ``` +In current glossary wording, `cccc attach .` anchors the group to that project +as its `authoritative_workspace`. + ### Workflow 1. Open the Web UI at http://127.0.0.1:8848/ @@ -30,8 +46,10 @@ Use one agent for implementation and another for review. ### Setup ```bash -cccc actor add implementer --runtime claude -cccc actor add reviewer --runtime codex +cccc actor profile upsert --id impl-shared --name "Implementer Shared" --runtime claude +cccc actor profile upsert --id review-shared --name "Reviewer Shared" --runtime codex +cccc actor add implementer --profile-id impl-shared +cccc actor add reviewer --profile-id review-shared cccc group start ``` @@ -54,10 +72,14 @@ For complex projects, use multiple specialized agents. ### Setup Example ```bash -cccc actor add architect --runtime claude # Design decisions -cccc actor add frontend --runtime codex # UI implementation -cccc actor add backend --runtime droid # API implementation -cccc actor add tester --runtime kimi # Testing +cccc actor profile upsert --id architect-shared --name "Architect Shared" --runtime claude +cccc actor profile upsert --id frontend-shared --name "Frontend Shared" --runtime codex +cccc actor profile upsert --id backend-shared --name "Backend Shared" --runtime droid +cccc actor profile upsert --id tester-shared --name "Tester Shared" --runtime kimi +cccc actor add architect --profile-id architect-shared # Design decisions +cccc actor add frontend --profile-id frontend-shared # UI implementation +cccc actor add backend --profile-id backend-shared # API implementation +cccc actor add tester --profile-id tester-shared # Testing ``` ### Coordination @@ -136,3 +158,18 @@ cccc send "Please refactor the entire authentication module. Report progress eve - IM Bridge sends updates to your phone - Check progress via Web UI when convenient - Agents notify on completion or errors + +## Related Glossary + +- [group](/reference/glossary/group) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) +- [profile](/reference/glossary/profile) +- [shared](/reference/glossary/shared) +- [status](/reference/glossary/status) + +## Change Log + +- `2026-03-21`: Added local glossary alignment so workflow examples consistently describe group attachment, default shared execution, and status language. +- `2026-03-23`: Added concrete profile-backed actor setup examples so workflow docs reflect the new CLI profile linkage surface. +- `2026-03-23`: Added `profile` alignment so workflow examples keep reusable runtime configuration separate from live actor participation. diff --git a/docs/reference/actor-group-lifecycle-openapi.yaml b/docs/reference/actor-group-lifecycle-openapi.yaml new file mode 100644 index 00000000..6a79e52a --- /dev/null +++ b/docs/reference/actor-group-lifecycle-openapi.yaml @@ -0,0 +1,184 @@ +openapi: 3.1.0 +info: + title: CCCC Actor And Group Lifecycle API + version: 0.1.0-draft + summary: Focused OpenAPI excerpt for group and actor lifecycle routes + description: | + This is a focused OpenAPI draft for the Web HTTP routes used to start, + stop, and restart `CCCC` actors and groups. + + It is intentionally not a full product-wide OpenAPI document. It only + covers the lifecycle routes that back the current Web UI controls. + + Terminology boundary: + - group lifecycle routes operate on live group state + - actor lifecycle routes operate on live actor state + - reusable actor profile configuration is related but out of scope here + - attach / authoritative workspace semantics remain separate from these + lifecycle entry points +servers: + - url: http://127.0.0.1:8848 + description: Local default Web UI server +tags: + - name: Groups + - name: Actors +paths: + /api/v1/groups/{group_id}/start: + post: + tags: [Groups] + summary: Start a working group + description: Start enabled actors in the target group. + operationId: startGroup + parameters: + - $ref: '#/components/parameters/GroupId' + - $ref: '#/components/parameters/ByQuery' + responses: + '200': + description: Group start request accepted by the daemon + content: + application/json: + schema: + $ref: '#/components/schemas/DaemonEnvelope' + /api/v1/groups/{group_id}/stop: + post: + tags: [Groups] + summary: Stop a working group + description: Stop actor runtimes in the target group. + operationId: stopGroup + parameters: + - $ref: '#/components/parameters/GroupId' + - $ref: '#/components/parameters/ByQuery' + responses: + '200': + description: Group stop request accepted by the daemon + content: + application/json: + schema: + $ref: '#/components/schemas/DaemonEnvelope' + /api/v1/groups/{group_id}/state: + post: + tags: [Groups] + summary: Set group state + description: | + Set the target group state to `active`, `idle`, or `paused`. + + In the current HTTP surface, `stopped` is handled by the dedicated + `/stop` endpoint instead of this route. + operationId: setGroupState + parameters: + - $ref: '#/components/parameters/GroupId' + - $ref: '#/components/parameters/ByQuery' + - name: state + in: query + required: true + schema: + type: string + enum: [active, idle, paused] + responses: + '200': + description: Group state change request accepted by the daemon + content: + application/json: + schema: + $ref: '#/components/schemas/DaemonEnvelope' + /api/v1/groups/{group_id}/actors/{actor_id}/start: + post: + tags: [Actors] + summary: Start one actor + description: Start or enable the target actor runtime. + operationId: startActor + parameters: + - $ref: '#/components/parameters/GroupId' + - $ref: '#/components/parameters/ActorId' + - $ref: '#/components/parameters/ByQuery' + responses: + '200': + description: Actor start request accepted by the daemon + content: + application/json: + schema: + $ref: '#/components/schemas/DaemonEnvelope' + /api/v1/groups/{group_id}/actors/{actor_id}/stop: + post: + tags: [Actors] + summary: Stop one actor + description: Stop or disable the target actor runtime. + operationId: stopActor + parameters: + - $ref: '#/components/parameters/GroupId' + - $ref: '#/components/parameters/ActorId' + - $ref: '#/components/parameters/ByQuery' + responses: + '200': + description: Actor stop request accepted by the daemon + content: + application/json: + schema: + $ref: '#/components/schemas/DaemonEnvelope' + /api/v1/groups/{group_id}/actors/{actor_id}/restart: + post: + tags: [Actors] + summary: Restart one actor + description: Restart the target actor runtime. + operationId: restartActor + parameters: + - $ref: '#/components/parameters/GroupId' + - $ref: '#/components/parameters/ActorId' + - $ref: '#/components/parameters/ByQuery' + responses: + '200': + description: Actor restart request accepted by the daemon + content: + application/json: + schema: + $ref: '#/components/schemas/DaemonEnvelope' +components: + parameters: + GroupId: + name: group_id + in: path + required: true + schema: + type: string + description: Target working group identifier + ActorId: + name: actor_id + in: path + required: true + schema: + type: string + description: Target actor identifier + ByQuery: + name: by + in: query + required: false + schema: + type: string + default: user + description: Requester identity string forwarded to the daemon + schemas: + DaemonEnvelope: + type: object + description: Generic Web-to-daemon JSON envelope used by these routes + properties: + ok: + type: boolean + result: + type: object + additionalProperties: true + error: + $ref: '#/components/schemas/ErrorObject' + required: [ok] + ErrorObject: + type: object + properties: + code: + type: string + message: + type: string + details: + oneOf: + - type: object + additionalProperties: true + - type: 'null' + required: [code, message] diff --git a/docs/reference/actor-group-lifecycle-surfaces.md b/docs/reference/actor-group-lifecycle-surfaces.md new file mode 100644 index 00000000..60b0d27f --- /dev/null +++ b/docs/reference/actor-group-lifecycle-surfaces.md @@ -0,0 +1,267 @@ +# Actor And Group Lifecycle Surfaces + +This note is a focused map for starting, stopping, and restarting `CCCC` groups +and actors across the three operator surfaces that matter in practice: + +- `CLI` +- `HTTP` +- `MCP` + +It is intentionally narrower than a full API reference. The goal is to answer: + +- which command or route starts an actor? +- which HTTP path backs the Web UI button? +- which MCP tool can do the same thing? +- which daemon operation is ultimately invoked? + +## Terminology Alignment + +This focused note follows the local glossary: + +- `group` is the lifecycle owner for attached collaboration state +- `actor` is the live scheduled participant being started, stopped, or restarted +- `profile` is reusable actor runtime configuration, not the lifecycle action itself +- `attach` still defines the group's `authoritative_workspace` +- actor lifecycle may affect runtime execution, but it does not redefine + workspace authority or turn registry bookkeeping into runtime truth + +## Source Files + +The concrete sources for this mapping are: + +- `docs/reference/actor-group-lifecycle-openapi.yaml` +- `src/cccc/cli/actor_cmds.py` +- `src/cccc/cli/group_cmds.py` +- `src/cccc/ports/web/routes/actors.py` +- `src/cccc/ports/web/routes/im.py` +- `src/cccc/ports/mcp/handlers/cccc_group_actor.py` +- `src/cccc/ports/mcp/server.py` +- `web/src/services/api.ts` + +The sibling `actor-group-lifecycle-openapi.yaml` file is a focused HTTP excerpt +that complements this note. Use this Markdown page for cross-surface mapping, +and the YAML file when you want a machine-readable snapshot of the current HTTP +lifecycle routes. + +## Quick Map + +| Intent | CLI | HTTP | MCP | Daemon op | +|---|---|---|---|---| +| Start one actor | `cccc actor start --group ` | `POST /api/v1/groups/{group_id}/actors/{actor_id}/start` | `cccc_actor(action="start", actor_id=..., group_id=...)` | `actor_start` | +| Stop one actor | `cccc actor stop --group ` | `POST /api/v1/groups/{group_id}/actors/{actor_id}/stop` | `cccc_actor(action="stop", actor_id=..., group_id=...)` | `actor_stop` | +| Restart one actor | `cccc actor restart --group ` | `POST /api/v1/groups/{group_id}/actors/{actor_id}/restart` | `cccc_actor(action="restart", actor_id=..., group_id=...)` | `actor_restart` | +| Start group actors | `cccc group start --group ` | `POST /api/v1/groups/{group_id}/start` | no dedicated `group start` tool in current MCP surface | `group_start` | +| Stop group actors | `cccc group stop --group ` | `POST /api/v1/groups/{group_id}/stop` | `cccc_group(action="set_state", state="stopped", group_id=...)` | `group_stop` | +| Set group state active/idle/paused | `cccc group set-state --group ` | `POST /api/v1/groups/{group_id}/state?state=` | `cccc_group(action="set_state", state=..., group_id=...)` | `group_set_state` | + +## CLI Surface + +### Actor lifecycle + +The CLI exposes direct actor lifecycle commands: + +```bash +cccc actor start --group +cccc actor stop --group +cccc actor restart --group +``` + +These commands eventually call the daemon with: + +- `actor_start` +- `actor_stop` +- `actor_restart` + +### Group lifecycle + +The CLI exposes direct group lifecycle commands: + +```bash +cccc group start --group +cccc group stop --group +cccc group set-state active --group +cccc group set-state idle --group +cccc group set-state paused --group +cccc group set-state stopped --group +``` + +Important detail: + +- `set-state stopped` does not call `group_set_state` +- it is normalized to the daemon `group_stop` operation + +## HTTP Surface + +## Actor routes + +The actor lifecycle routes are defined under the group-scoped Web router: + +- `POST /api/v1/groups/{group_id}/actors/{actor_id}/start` +- `POST /api/v1/groups/{group_id}/actors/{actor_id}/stop` +- `POST /api/v1/groups/{group_id}/actors/{actor_id}/restart` + +The Web UI helper methods in `web/src/services/api.ts` call exactly these paths: + +- `startActor(...)` +- `stopActor(...)` +- `restartActor(...)` + +### Group routes + +The group lifecycle HTTP paths are: + +- `POST /api/v1/groups/{group_id}/start` +- `POST /api/v1/groups/{group_id}/stop` +- `POST /api/v1/groups/{group_id}/state?state=active|idle|paused` + +Important implementation detail: + +- these paths are group lifecycle routes +- but today they are implemented in `src/cccc/ports/web/routes/im.py` +- this is easy to miss if you only search in `groups.py` + +The Web UI helper methods are: + +- `startGroup(...)` +- `stopGroup(...)` +- `setGroupState(...)` + +## MCP Surface + +### Actor lifecycle via `cccc_actor` + +The current MCP tool surface supports actor lifecycle through one consolidated +tool: + +- `cccc_actor` + +Supported actions relevant here: + +- `list` +- `add` +- `remove` +- `start` +- `stop` +- `restart` + +Example: + +```json +{ + "group_id": "g_xxx", + "action": "start", + "actor_id": "peer-impl" +} +``` + +### Group lifecycle via `cccc_group` + +The current MCP tool surface does not expose a dedicated `group start` action. +Instead: + +- `cccc_group(action="set_state", state="active" | "idle" | "paused")` + maps to `group_set_state` +- `cccc_group(action="set_state", state="stopped")` + maps to `group_stop` + +That means: + +- MCP can stop a group +- MCP can change non-stopped group state +- but current MCP does not have a first-class `group_start` action + +If an automation or external controller needs to start a whole group today, the +available stable surfaces are: + +- `CLI` +- `HTTP` +- direct daemon op from internal code + +## Web UI Notes + +If you are debugging button behavior in the browser: + +- actor buttons call actor lifecycle HTTP routes +- group buttons call group lifecycle HTTP routes +- both eventually route into daemon ops + +The Web UI client functions are the fastest file to inspect first: + +- `web/src/services/api.ts` + +## Practical Examples + +### Start one actor with CLI + +```bash +cccc actor start peer-arch --group g_example123 +``` + +### Start one actor with HTTP + +```bash +curl -X POST \ + "http://127.0.0.1:8848/api/v1/groups/g_example123/actors/peer-arch/start?by=user" +``` + +### Restart one actor with HTTP + +```bash +curl -X POST \ + "http://127.0.0.1:8848/api/v1/groups/g_example123/actors/peer-arch/restart?by=user" +``` + +### Start a group with HTTP + +```bash +curl -X POST \ + "http://127.0.0.1:8848/api/v1/groups/g_example123/start?by=user" +``` + +### Stop a group with MCP semantics + +```json +{ + "group_id": "g_example123", + "action": "set_state", + "state": "stopped" +} +``` + +## Scope Boundary + +This document only covers lifecycle entry points for: + +- `group start` +- `group stop` +- `group state` +- `actor start` +- `actor stop` +- `actor restart` + +It does not try to document: + +- the full HTTP API +- WebSocket terminal streaming +- IM bridge routes +- actor-profile CRUD surfaces +- attach / authoritative-workspace semantics beyond the lifecycle boundary +- execution-workspace resolution policy +- registry lookup behavior +- `resume`-specific launch metadata +- PTY keystroke injection or raw terminal input delivery + +## Related Glossary + +- [group](/reference/glossary/group) +- [actor](/reference/glossary/actor) +- [profile](/reference/glossary/profile) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) +- [execution_workspace](/reference/glossary/execution_workspace) +- [registry](/reference/glossary/registry) +- [status](/reference/glossary/status) + +## Change Log + +- `2026-03-23`: Added local glossary alignment so lifecycle entry-point docs stop being misread as the source of truth for profiles, workspace authority, or registry semantics. diff --git a/docs/reference/actor-native-resume-bindings.md b/docs/reference/actor-native-resume-bindings.md new file mode 100644 index 00000000..eb6b00a5 --- /dev/null +++ b/docs/reference/actor-native-resume-bindings.md @@ -0,0 +1,501 @@ +# Actor Native Resume Bindings + +Reference for actor-scoped native session recovery bindings in CCCC. + +The `codex` PTY path described here is now shipped and validated in Web UI. +Some of the broader runtime-agnostic material still serves as design guidance +for future runtimes, but the actor-bound `codex` resume flow is no longer only +proposed behavior. + +## Goal + +Allow a user to say: + +- this actor should launch by resuming a known native runtime session +- this actor should remember the session identifier even when resume is + temporarily disabled + +The first concrete target is `codex` PTY actors. + +## Terminology Alignment + +This document follows the local glossary: + +- `attach` sets the group's `authoritative_workspace` +- actor runtime `cwd` should be read as `execution_workspace` +- `profile` means reusable actor runtime configuration and launch intent +- `resume` is layered and is not only native session reuse +- `status` is evidence, not automatic proof of later resume success + +## Primary use case + +A user already worked in a project directly with Codex CLI and has valuable +session context under a known `session_id`. + +Later, the user opens CCCC, creates a new group from a template, attaches the +same project path, and wants one or more actors to inherit that native session +continuity while still keeping CCCC's own coordination flow. + +In glossary terms, the attached project path remains the authoritative +workspace even if later runtime evidence points to a more specific actor +execution workspace. +Likewise, a stored actor or profile-native resume binding remains launch intent +until live runtime evidence confirms continuity. + +## User experience + +### Group creation from template + +When the user creates a group from a template in Web UI: + +1. choose project path +2. choose template file +3. optionally open a `Resume Recovery` section +4. add one or more bindings + +Each binding row should contain: + +- actor selector +- runtime badge / inferred runtime +- `session_id` input +- `enabled` toggle + +This is intentionally actor-based rather than foreman-only. The common case may +still be "bind the first actor / foreman", but the model should not hard-code +that assumption. + +### Actor editing after creation + +In actor settings, the user should be able to: + +- see whether native resume is enabled for launch +- edit or replace the stored `session_id` +- disable resume without deleting the stored `session_id` + +This gives users a reversible switch: + +- `enabled = true`: try native resume on launch +- `enabled = false`: launch fresh, but keep the saved session identifier for + later reuse + +## Executed validation matrix + +The feature was re-validated on March 15, 2026 local time +(`2026-03-16` UTC) with three different chain lengths instead of only one +happy path. + +### Short chain: edit existing actor intent only + +Validated against existing group `g_414cfed1a68e` +(`cccc-resume-template-e2e-verified`). + +Executed path: + +1. Stop actor `需求规划专家`. +2. Open `Edit agent configuration`. +3. Clear `Prefer native resume on launch`. +4. Save. +5. Re-open the same dialog. +6. Verify: + - `Prefer native resume on launch` stays unchecked + - `Session ID` still stays + `019cf430-228b-7ea3-bb58-bf2653eea8c2` +7. Re-enable resume and save again to restore the original state. + +This proves the actor-level switch is reversible and does not erase the stored +native session target. + +### Medium chain: template import plus actor prefill, no launch + +Validated against fresh group `g_4fa9ed662c8e` +(`cccc-resume-template-medium-fresh`) attached to: + +- `/Users/glennxu/workspace/minion/cccc-resume-e2e-medium-fresh` + +Executed path: + +1. Create a brand-new group from blueprint. +2. Import `data/cccc-group-template-codex--checklist.yaml`. +3. In `Resume Recovery`, bind actor `需求规划专家`. +4. Save session ID `019cf430-228b-7ea3-bb58-bf2653eea8c2`. +5. Leave `Resume on launch` enabled. +6. Create the group. +7. Open `Edit agent configuration` for `需求规划专家` before first launch. +8. Verify: + - `Prefer native resume on launch` is checked + - `Session ID` is prefilled with + `019cf430-228b-7ea3-bb58-bf2653eea8c2` + +The resulting group file persisted: + +```yaml +native_resume: + enabled: true + session_id: 019cf430-228b-7ea3-bb58-bf2653eea8c2 +``` + +This proves the create-flow binding survives the modal submit, API apply step, +and actor edit preload even before any runtime launch happens. + +## Long chain: full end-to-end launch and `/status` + +The following path was re-validated on March 15, 2026 local time +(`2026-03-16T01:44Z`) against the feature branch implementation: + +1. Open Web UI and create a new group. +2. Attach the existing project path + `/Users/glennxu/workspace/minion/cccc-resume-e2e-round2`. +3. Import the blueprint file + `data/cccc-group-template-codex--checklist.yaml`. +4. In `Resume Recovery`, add one binding. +5. Select the first actor, `需求规划专家`. +6. Enter session ID `019cf430-228b-7ea3-bb58-bf2653eea8c2`. +7. Keep `Resume on launch` enabled. +8. Create the group from blueprint. +9. Open that actor's `Edit agent configuration` dialog before launch. +10. Verify both of the following are already populated from the saved binding: + - `Prefer native resume on launch` is checked + - `Session ID` is `019cf430-228b-7ea3-bb58-bf2653eea8c2` +11. Launch the actor. +12. In the actor terminal, manually run `/status` and press Enter. +13. Verify the resumed Codex session still reports + `019cf430-228b-7ea3-bb58-bf2653eea8c2`. + +The same validation also confirmed that the binding is durably written into the +group state. The resulting group file contained: + +```yaml +native_resume: + enabled: true + session_id: 019cf430-228b-7ea3-bb58-bf2653eea8c2 +``` + +For the verified Web API request, `POST /api/v1/groups/from_template` returned: + +```json +{ + "group_id": "g_414cfed1a68e", + "applied": true, + "resume_bindings_applied": ["需求规划专家"] +} +``` + +This matters because it proves the full chain is working end to end: + +- template-create modal state +- Web API request payload +- daemon template-create apply step +- persisted actor config +- actor edit modal preload +- actual runtime launch via native resume +- live `/status` session continuity + +A later Chrome MCP rerun on March 15, 2026 local time also reconfirmed the +runtime continuity on fresh group `g_236e39802004` +(`cccc-resume-template-long-regression`). +In that rerun, typing `/status` into the Web terminal was accepted, but the +session line did not re-render back into the terminal DOM reliably enough for +automation to scrape it. +The runtime PTY state still recorded: + +```json +{ + "session_id": "019cf430-228b-7ea3-bb58-bf2653eea8c2" +} +``` + +from `~/.cccc/groups/g_236e39802004/state/runners/pty/需求规划专家.json`. + +Inference: native resume continuity still held; the weaker part in that rerun +was the Web terminal's `/status` rendering as automation evidence, not the +resume binding itself. + +## Related Glossary + +- [actor](/reference/glossary/actor) +- [profile](/reference/glossary/profile) +- [resume](/reference/glossary/resume) +- [status](/reference/glossary/status) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) +- [execution_workspace](/reference/glossary/execution_workspace) + +## Change Log + +- `2026-03-21`: Added local glossary alignment so actor-scoped native resume docs stop drifting on attach authority, execution workspace, and status-versus-resume meaning. +- `2026-03-23`: Added `profile` alignment so stored native resume intent can be discussed without collapsing reusable config into live runtime truth. + +## Additional branch conditions observed during testing + +These were not counted as successful medium-chain runs, but they are important +test branches because they change the control flow before resume binding can be +validated. + +### Ordinary create-flow duplicate attach path is now guarded + +During this round of regression testing, the plain `Create Group` flow +(without blueprint import) exposed a separate bug outside the resume-binding +path: + +- the UI first created a new empty group +- it then tried `attach` +- the daemon previously allowed that attach even when the same scope already + belonged to another group + +This produced duplicate groups for the same attached directory. + +The feature branch now fixes that ordinary create-flow branch in two layers: + +- daemon `attach` rejects `scope_already_attached` when the scope already + belongs to a different group +- Web UI deletes the just-created empty group and switches to the already + attached group instead of leaving an orphan behind + +After the fix, a Chrome MCP rerun using +`/Users/glennxu/workspace/minion/cccc-resume-e2e-medium-regression` no longer +created a new `duplicate-path-check-fixed-2` group. +The UI switched back to the already attached group and showed: + +- `This directory already has a working group. Opening it instead.` + +One nuance matters for debugging: if older duplicate groups already exist from +pre-fix runs, the reopened group may be whichever group the current registry +maps to for that scope, not necessarily the earliest or most desirable one. + +### Ordinary create-flow nested path now also reopens the normalized parent scope + +The same plain `Create Group` branch was re-run with a nested path: + +- `/Users/glennxu/workspace/minion/cccc-codex-session-resume/e2e-nested-regression-2` + +Because `detect_scope(...)` normalizes that path back to the parent worktree +scope, this is an important separate branch. + +Before the fix, this branch could also create a second empty group for the same +normalized parent scope. +After the fix, the UI re-opened the already attached group and did not create a +new `nested-path-check-fixed-2` group. + +So nested-path attach is still a distinct regression branch worth testing, but +it should now resolve to the existing normalized parent scope rather than +creating another orphan group. + +## UI automation stability note + +During Web UI automation, one concrete source of flakiness was identified in the +`Resume Recovery` list itself: + +- if a binding row key changes when `actor_id` changes, React remounts the whole + row +- browser automation then loses its handle on the `Session ID` field right after + actor selection + +The feature branch now keeps each resume-binding row on a stable local ID +instead of deriving the row key from `actor_id`. + +Practical effect: + +- selecting an actor no longer recreates that row's inputs +- the follow-up `Session ID` fill step is much more reliable for Chrome MCP and + similar UI automation +- repeated controls also expose stable per-row field ids and clearer labels, + which makes future browser automation easier to target + +## Recommended regression order + +After any UI change to group creation, actor editing, or native resume wiring, +re-run the following cases in order: + +1. `Short`: + open an existing actor, disable native resume, save, reopen, and confirm the + saved `session_id` survives while the checkbox stays off +2. `Medium`: + create a fresh group from blueprint on a brand-new attach path, bind one + actor, create the group, then confirm the actor edit dialog preloads the + saved binding before first launch +3. `Long`: + repeat the medium case and also launch the actor, manually run `/status`, and + confirm the runtime session still matches the saved `session_id` +4. `Duplicate path branch`: + submit a path that already belongs to an existing group and confirm the UI + reopens that group instead of silently creating a second one +5. `Nested path branch`: + submit a subdirectory under an already attached group root and confirm + whether it still coalesces to the parent group +6. `Daemon mismatch guard`: + verify `uv run cccc daemon status` reports the expected feature-branch daemon + version before trusting any failed UI result + +This order intentionally goes from cheapest validation to most expensive +validation. If the short or medium cases fail, the long case is usually not yet +worth running. + +## Operational pitfall: stale daemon false negatives + +During validation, a serious false-negative case appeared: + +- the Web UI on port `8848` was serving the current feature branch +- but the daemon behind `~/.cccc` had silently fallen back to version `0.4.2` + +In that stale-daemon state: + +- the request body still included `resume_bindings_json` +- the server response omitted `resume_bindings_applied` +- the created group's `group.yaml` kept `native_resume_policy: 'off'` +- the Edit Actor dialog showed no saved session ID + +That failure mode is not a UI regression in the feature itself. It means the +current Web bundle is talking to an older daemon process. + +Practical rule for debugging: + +- always verify daemon version before trusting template-import resume results +- the expected fixed path should report `ccccd: running pid=... version=0.4.4` +- if the daemon reports `0.4.2`, stop it and restart from the current worktree + before drawing any product conclusion + +## Why this should not live inside the template file + +Group templates are meant to stay portable across: + +- machines +- repositories +- users +- clean-room project setup flows + +`session_id` values are not portable in that same way. They point at local CLI +history under a specific runtime home such as `~/.codex`. + +So the template should continue to describe: + +- actor order +- runtime +- runner +- command +- delivery / automation defaults + +But it should not directly carry machine-local native session bindings. + +## Data model direction + +CCCC should distinguish two different classes of state. + +### 1. Actor configuration: user intent + +This is the durable configuration that answers: + +- should this actor try native resume on launch? +- if yes, what native session identifier should it prefer? + +Conceptually, this looks like: + +```yaml +native_resume: + enabled: true + session_id: sess_abc123 +``` + +The exact field names may change, but the semantics should stay stable. + +### 2. Runner state: runtime-discovered evidence + +This is the PTY/headless runtime state CCCC already records, such as: + +- `session_id` +- `session_log_path` +- `runtime` +- `cwd` + +This state is evidence gathered from launch and recovery, not the user's source +of truth for desired behavior. + +## Launch behavior + +For the first supported runtime, the launch rule should be: + +- if actor runtime is `codex` +- and actor runner is `pty` +- and native resume is enabled +- and a `session_id` is configured + +Then: + +1. pass that `session_id` into the current `codex` recovery pipeline +2. let discovery logic try to resolve `session_log_path` +3. build a proper `codex resume ` command when possible +4. otherwise fall back safely to a fresh launch + +That final fallback matters in real use. + +A live session ID captured from `codex /status` is useful actor-local evidence, +but it does not always mean Codex itself can reopen that session through +`codex resume `. + +So the launch contract should be: + +- preserve the user's configured `session_id` +- prefer native resume when recovery evidence is strong enough +- avoid turning an unavailable resume target into a broken actor start + +If native resume is disabled: + +- do not attempt `codex resume` +- do not delete the configured `session_id` + +## Relationship to current Codex resume implementation + +This design does **not** replace the existing `codex` resume work. + +Instead, it adds a better configuration surface on top of: + +- `codex` session discovery +- `session_log_path` recovery +- PTY state persistence +- `experimental_resume` command injection + +In other words: + +- current implementation solves "how to resume" +- actor native resume bindings solve "which actor should prefer resume, and when" + +## Prompt and coordination behavior + +Native resume must not bypass CCCC's own coordination model. + +Required invariant: + +- native runtime resume may change the launch command +- it must **not** disable or short-circuit CCCC prompt delivery, preamble/help, + inbox continuity, or work-state recovery + +So the target behavior is: + +- actor resumes native `codex` session when configured +- CCCC still injects its own system guidance through the normal startup path + +## Safety boundaries + +The initial scope should stay conservative. + +Supported first: + +- `codex` +- `pty` +- manual `session_id` entry +- actor-scoped enable/disable switch +- safe fallback to fresh launch when native resume target is unavailable + +Not required for the first iteration: + +- auto-discover-and-bind from Web UI directly +- runtime-agnostic generic editor for every future CLI +- auto-probing live runtime `/status` from Web +- embedding session bindings into exported group templates + +## Recommended rollout order + +1. Add actor-config fields for native resume intent. +2. Accept actor resume bindings during "create group from template". +3. Thread configured `session_id` into the existing `codex` launch pipeline. +4. Add actor edit controls for enable/disable and session ID retention. +5. Only after that, consider richer runtime-generic abstractions. diff --git a/docs/reference/actor-terminal-input-control.md b/docs/reference/actor-terminal-input-control.md new file mode 100644 index 00000000..afbd1b4a --- /dev/null +++ b/docs/reference/actor-terminal-input-control.md @@ -0,0 +1,343 @@ +# Actor Terminal Input Control + +Date: `2026-03-18` + +Status: `reference` + +## Purpose + +This document explains how to send **raw terminal input** to a running PTY actor in CCCC. + +It is intended for: + +- Web terminal automation +- Browser / Chrome MCP driven tests +- Future terminal-driving bots +- Debugging cases where an actor is blocked on a TUI prompt such as: + - `Press enter to continue` + - trust-directory prompts + - numbered selection prompts + +This document also clarifies the difference between: + +- **raw terminal keystrokes** +- **normal chat message delivery** + +Those two paths are different and should not be mixed. + +## Terminology Alignment + +This document follows the local glossary: + +- `actor` is the live participant whose PTY you are driving +- raw terminal input is not the same surface as chat delivery +- terminal tail text is useful runtime evidence, but not guaranteed live truth +- `status` should be read as an evidence-bound observation, not as a universal + proof of every deeper runtime property +- `host_surface` means the CCCC-owned readable surface around PTY transport and + delivery behavior, not downstream interpretation + +## Current Primary Interface + +For raw keystrokes, use the actor terminal WebSocket: + +`/groups/{group_id}/actors/{actor_id}/term` + +Relevant code path: + +- `src/cccc/ports/web/routes/actors.py` +- `src/cccc/runners/pty.py` + +## How The Terminal WebSocket Works + +The WebSocket route accepts a connection, attaches to the actor's PTY stream, then forwards: + +- PTY output back to the client as binary frames +- terminal input from the client into the PTY stdin + +The input message shape is: + +```json +{"t":"i","d":"..."} +``` + +Where: + +- `t = "i"` means terminal input +- `d` is the raw text / control sequence to write into the PTY + +The backend behavior is effectively: + +1. receive WebSocket text frame +2. parse JSON +3. if `t == "i"`, encode `d` as UTF-8 +4. write bytes directly into the attached PTY + +This is the only current Web-facing interface that supports sending a **pure Enter key** without any accompanying text. + +## Raw Input Examples + +### Press Enter + +```json +{"t":"i","d":"\r"} +``` + +Meaning: + +- send a carriage return into the PTY +- in most CLI / TUI contexts this behaves like pressing Enter + +### Type a command and press Enter + +```json +{"t":"i","d":"/status\r"} +``` + +Meaning: + +- type `/status` +- then press Enter + +This is the correct shape when you want the terminal to execute a command immediately. + +### Move cursor up, then press Enter + +```json +{"t":"i","d":"\u001b[A\r"} +``` + +Meaning: + +- `\u001b[A` = Up Arrow +- `\r` = Enter + +This is useful for selection prompts where the highlighted option may not already be on the desired line. + +### Interrupt with Ctrl+C + +```json +{"t":"i","d":"\u0003"} +``` + +Meaning: + +- send ETX / Ctrl+C to the PTY + +### Send Escape + +```json +{"t":"i","d":"\u001b"} +``` + +Meaning: + +- send Escape + +### Move down, then confirm + +```json +{"t":"i","d":"\u001b[B\r"} +``` + +Meaning: + +- `\u001b[B` = Down Arrow +- `\r` = Enter + +## Common Control Sequences + +Useful terminal control inputs for automation: + +- `\r` = Enter / carriage return +- `\n` = line feed +- `\u0003` = Ctrl+C +- `\u0004` = Ctrl+D +- `\u001b` = Escape +- `\u001b[A` = Up Arrow +- `\u001b[B` = Down Arrow +- `\u001b[C` = Right Arrow +- `\u001b[D` = Left Arrow + +## Important Distinction: Raw Input vs Chat Delivery + +CCCC also has normal messaging APIs such as: + +- `POST /api/v1/groups/{group_id}/send` +- `POST /api/v1/groups/{group_id}/reply` + +Those routes are **not** raw-keyboard APIs. + +They go through the daemon message delivery pipeline and ultimately call PTY text submission logic. + +That path behaves like: + +1. queue a `chat.message` +2. deliver text payload into the PTY +3. automatically append a submit key based on actor `submit` mode + +Default submit mode is: + +- `b"\r"` for `enter` + +This means normal message delivery is best understood as: + +- **send text to the actor** +- then **auto-submit** + +It is not equivalent to direct keystroke control. + +## Why `/send` Cannot Be Used For Pure Enter + +The PTY text submission logic rejects empty text before it tries to submit: + +- `raw = (text or "").rstrip("\\n")` +- if `raw` is empty, the call returns `False` + +So these are different: + +Correct for pure Enter: + +```json +{"t":"i","d":"\r"} +``` + +Not valid for pure Enter: + +```json +POST /api/v1/groups/{group_id}/send +{ + "text": "", + "by": "user", + "to": ["actor-id"] +} +``` + +The messaging route is suitable for: + +- sending chat instructions +- sending `/status` as terminal text plus auto-submit +- normal actor conversation + +It is not suitable for: + +- arrow keys +- bare Enter +- Escape +- Ctrl+C +- other raw TUI navigation keys + +## How To Think About `\r` In Selection Prompts + +`\r` does **not** mean “select option 1”. + +`\r` means: + +- confirm the **currently highlighted** option + +So if a prompt visually looks like: + +```text +1. Yes, continue +› 2. No, quit + +Press enter to continue +``` + +Then a raw: + +```json +{"t":"i","d":"\r"} +``` + +is more likely to confirm `2. No, quit`, because the highlight marker is on that line. + +To force-select `1. Yes, continue`, a safer automation input is: + +```json +{"t":"i","d":"\u001b[A\r"} +``` + +This means: + +- move selection up once +- then press Enter + +## Important Caution About Terminal Tail Snippets + +System notifications often include terminal tail excerpts such as: + +- `Press enter to continue` +- the last 20 lines of terminal output + +These excerpts are useful for diagnosis, but they are **not guaranteed** to be the exact live TUI state at the moment you send input. + +In particular: + +- the visible tail may lag the live cursor state +- the currently highlighted option may have changed +- the transcript excerpt may omit control-sequence effects + +So: + +- use notification text as a hint +- do not treat it as perfect ground truth for current TUI selection state + +## Recommended Automation Strategy + +For terminal-driving automation, prefer this order: + +1. Connect to `/groups/{group_id}/actors/{actor_id}/term` +2. Observe the live PTY output +3. Send raw input frames with `{"t":"i","d":"..."}` +4. Use arrow-key sequences before `\r` when selection matters +5. Reserve `/send` and `/reply` for normal textual interaction, not TUI control + +## Practical Examples + +### Example: trigger `/status` + +```json +{"t":"i","d":"/status\r"} +``` + +### Example: trust-directory prompt, choose Yes + +```json +{"t":"i","d":"\u001b[A\r"} +``` + +### Example: trust-directory prompt, accept current default + +```json +{"t":"i","d":"\r"} +``` + +### Example: stop a stuck command + +```json +{"t":"i","d":"\u0003"} +``` + +## Current Limitations + +- There is currently no dedicated HTTP endpoint for “raw PTY input bytes”. +- The supported Web-facing raw-input surface is the actor terminal WebSocket. +- Normal messaging APIs intentionally operate at the chat-message layer, not the keystroke layer. + +## Relevant Source References + +- `src/cccc/ports/web/routes/actors.py` +- `src/cccc/runners/pty.py` +- `src/cccc/daemon/messaging/delivery.py` +- `web/src/components/AgentTab.tsx` + +## Related Glossary + +- [actor](/reference/glossary/actor) +- [status](/reference/glossary/status) +- [host_surface](/reference/glossary/host_surface) + +## Change Log + +- `2026-03-24`: Added glossary alignment so terminal-input docs keep raw PTY control, chat delivery, and evidence-bound status semantics clearly separated. diff --git a/docs/reference/architecture.md b/docs/reference/architecture.md index c72b86a2..04005690 100644 --- a/docs/reference/architecture.md +++ b/docs/reference/architecture.md @@ -10,13 +10,31 @@ - Like an IM group chat, but with execution/delivery capabilities - Each group has an append-only ledger (event stream) -- Can bind multiple Scopes (project directories) +- Can bind project directories, with one current authoritative workspace anchor ### Actor - **Foreman**: Coordinator + Executor (the first enabled actor automatically becomes foreman) - **Peer**: Independent expert (other actors) - Supports PTY (terminal) and Headless (MCP-only) runners +- Runtime execution may happen in the authoritative workspace or a different execution workspace, depending on explicit policy + +### Profile + +- A `profile` is a reusable actor runtime configuration object +- It can carry runtime defaults, launch intent, and profile-owned secrets +- An actor may link to a profile, but the actor remains the live scheduled participant +- Profile linkage should not be confused with live runtime truth or successful native resume + +Typical profile-backed path: + +```bash +cccc actor profile upsert --id shared-dev --name "Shared Dev" --runtime claude +cccc actor add --profile-id shared-dev +``` + +That profile-backed path stores reusable runtime intent in the profile while the +actor continues to own live scheduling and runtime state. ### Ledger @@ -24,6 +42,15 @@ - All messages, events, and decisions are recorded here - Supports snapshot/compaction +## Workspace Semantics + +- `attach` defines the current `authoritative_workspace` for a group. +- The `authoritative_workspace` is the semantic project anchor for the group. +- An actor's `execution_workspace` is the effective path its runtime is working in. +- In the default `shared` interpretation, execution workspace and authoritative workspace are the same path. +- If a future optional `isolated` mode is enabled, execution workspace may differ, but authority does not move away from the attached workspace. +- Older wording such as `scope` remains compatibility wording in some docs and APIs, but local glossary wording wins when there is conflict. + ## Directory Layout Default: `CCCC_HOME=~/.cccc` @@ -169,6 +196,10 @@ class ChatMessageData: MCP is exposed as an action-oriented surface. Tool count is intentionally not hardcoded, because optional capability packs can add more tools when enabled. +The local glossary term `host_surface` is useful here: CCCC-owned readable +surfaces such as MCP status tools should expose host/runtime truth without +pretending to be higher-level interpretation layers. + The surface is best understood as capability groups instead of a fixed namespace/tool count. Each group can expose one or more MCP tools, and some groups use action-style wrappers rather than one-tool-per-operation naming. ### Core Collaboration Capability Groups @@ -209,3 +240,20 @@ src/cccc/ │ └── mcp/ # MCP Server └── resources/ # Built-in resources ``` + +## Related Glossary + +- [group](/reference/glossary/group) +- [actor](/reference/glossary/actor) +- [profile](/reference/glossary/profile) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) +- [execution_workspace](/reference/glossary/execution_workspace) +- [host_surface](/reference/glossary/host_surface) +- [registry](/reference/glossary/registry) + +## Change Log + +- `2026-03-21`: Aligned architecture wording with the new local glossary so `attach`, workspace authority, execution workspace, registry, and host-surface meanings stay repo-local and explicit. +- `2026-03-23`: Added a profile-backed path example so the architecture page ties the actor/profile split to the current CLI surface. +- `2026-03-23`: Added the repo-local `profile` layer to the core-concepts section so live actor identity and reusable runtime configuration are no longer conflated. diff --git a/docs/reference/cli.md b/docs/reference/cli.md index 0476f9de..7ceeca94 100644 --- a/docs/reference/cli.md +++ b/docs/reference/cli.md @@ -50,13 +50,20 @@ Notes: ### `cccc attach` -Create or attach to a working group. +Bind a project path to the current or selected group as its authoritative workspace. ```bash -cccc attach . # Attach current directory as scope +cccc attach . # Attach current directory as authoritative workspace cccc attach /path/to/project ``` +Notes: +- `attach` defines the group's `authoritative_workspace`. +- It does not by itself create per-actor isolated workspaces. +- Current product direction keeps the default execution path lightweight: actors + still default to `workspace_mode = shared` unless a future explicit isolated + policy says otherwise. + ### `cccc groups` List all working groups. @@ -99,6 +106,7 @@ Add a new actor to the group. cccc actor add --runtime claude cccc actor add --runtime codex cccc actor add --runtime custom --command "my-agent" +cccc actor add --profile-id shared-profile ``` Options: @@ -106,6 +114,9 @@ Options: - `--command`: Custom command (for custom runtime) - `--runner`: Runner type (pty or headless) - `--title`: Display title +- `--profile-id`: Link the new live actor to a reusable `profile` +- `--profile-scope` / `--profile-owner-id`: Address explicit user-scoped + profiles when `--profile-id` is used ### `cccc actor` @@ -116,11 +127,59 @@ cccc actor list # List actors cccc actor start # Start actor cccc actor stop # Stop actor cccc actor restart # Restart actor +cccc actor sessions # Show recoverable Codex sessions for actors +cccc actor sessions # Show one actor's recovery candidate +cccc actor sessions --probe-status cccc actor remove # Remove actor cccc actor update ... # Update actor settings cccc actor secrets ... # Manage runtime-only secrets +cccc actor update --profile-id shared-profile +cccc actor update --profile-action convert_to_custom +``` + +### `cccc actor profile` + +Manage reusable actor runtime profiles. + +```bash +cccc actor profile list +cccc actor profile list --view my +cccc actor profile get +cccc actor profile upsert --name "Shared Codex" --runtime codex +cccc actor profile upsert --id shared --command "codex --resume" +cccc actor profile delete +cccc actor profile secrets --keys ``` +Notes: +- Reusable `profile` records can now be managed directly from the CLI instead + of only being mentioned as actor linkage metadata. +- `list` supports `--view global|my|all`. +- `get` and `delete` support `--scope global|user` and `--owner-id ...` so the + CLI can address explicit user-scoped profile refs. +- `upsert` stores reusable launch intent and capability defaults; it does not + by itself prove any live runtime continuity. +- `secrets` manages runtime-only secret keys for a reusable profile without + mixing those values into the profile document itself. + +Notes: +- An actor's runtime defaults may come from direct actor config or a linked + reusable `profile`; that linkage is configuration intent, not live runtime + proof. +- For `codex` PTY actors, `cccc actor restart ` now attempts best-effort session recovery from prior PTY state and `~/.codex` metadata. +- See `Codex Session Resume` for the exact recovery sources and boundaries. +- In local glossary terms, `resume` is layered semantics: CCCC-owned recovery and + native runtime recovery are related but not identical. +- Planned next layer: actor-configured native resume bindings should be able to + prefer a stored `session_id` on launch while still preserving current prompt + injection and CCCC-owned recovery behavior. +- See `Actor Native Resume Bindings` for the agreed Web UI and actor-settings + design. +- `cccc actor sessions --probe-status` is now a safety hint, not an automatic PTY injection. +- If you need `/status`, first enter the actor's actual `Codex` terminal, then run `/status` manually inside that session. +- `/status` output is useful runtime evidence, but it is not by itself proof + that later native resume will succeed. + ## Message Commands ### `cccc send` @@ -224,6 +283,23 @@ Notes: - local `repo/space/` files -> provider sources, - provider source/artifact projection -> local `repo/space/` (`.sync/remote-sources` and `artifacts/`). +## Related Glossary + +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) +- [workspace_mode](/reference/glossary/workspace_mode) +- [profile](/reference/glossary/profile) +- [resume](/reference/glossary/resume) +- [status](/reference/glossary/status) + +## Change Log + +- `2026-03-23`: Added `cccc actor profile ...` reference coverage so reusable + profile management is documented as a first-class CLI surface. +- `2026-03-23`: Added `profile` glossary alignment so CLI actor lifecycle notes distinguish reusable runtime config from live runtime evidence. + +- `2026-03-21`: Aligned CLI wording with the new local glossary so `attach`, `resume`, and `status` stop drifting between shorthand and canonical repo-local meaning. + ## Setup Commands ### `cccc setup` diff --git a/docs/reference/codex-session-resume.md b/docs/reference/codex-session-resume.md new file mode 100644 index 00000000..af705c10 --- /dev/null +++ b/docs/reference/codex-session-resume.md @@ -0,0 +1,474 @@ +# Codex Session Resume + +How CCCC restores a `codex` actor back into the right long-running session, and +how actor-bound resume intent interacts with runtime evidence and safe fallback. + +## Terminology Alignment + +This document follows the local glossary: + +- `resume` is layered; it does not mean only native vendor session recovery +- `status` means an evidence-bound observation surface +- `profile` means reusable actor runtime configuration and launch intent +- `attach` defines the `authoritative_workspace` +- actor runtime `cwd` is better described as an `execution_workspace` + +If older wording in this document sounds narrower than the glossary, treat it +as compatibility wording. + +## Current implementation status + +Today, the shipped recovery path combines actor-bound resume intent with +PTY-state-driven runtime evidence. + +For PTY actors with runtime `codex`, CCCC persists and reuses: + +- `session_id` +- `session_log_path` +- `runtime` +- `cwd` + +In glossary terms, the recovered `cwd` is runtime evidence about execution +workspace. It should not be read as replacing the group's authoritative +workspace. + +When a `codex` PTY actor starts or restarts, CCCC can rebuild a real +`codex resume ...` command instead of always starting a brand-new empty session. + +The current Web UI also lets a user: + +- enable or disable native resume per actor +- keep a stored `session_id` even while resume is disabled +- read the actor's current live session ID from `/status` output that is already + visible in the PTY transcript +- save that live session ID back into actor configuration + +That stored native-resume intent may live in direct actor configuration or, in +future flows, in reusable actor-linked `profile` semantics. In either case, it +remains launch intent rather than proof. + +Important: a live `/status` session ID is useful evidence, but it is **not** by +itself proof that `codex resume ` will succeed later. + +This is an intentional local distinction between `status` and `resume`. + +This currently applies across the normal actor launch paths: + +- `cccc actor start` +- `cccc actor restart` +- `cccc group start` +- template/bootstrap flows that eventually start PTY actors + +## Safe fallback behavior + +CCCC now treats native resume as an optimization, not a hard requirement. + +If a `codex` actor has native resume enabled and a configured `session_id`, CCCC +tries to build a real `codex resume ...` command only when it also has concrete +resume artifacts such as a `session_log_path`. + +If those artifacts are incomplete, stale, or missing, CCCC now falls back +safely to a normal fresh `codex` launch instead of letting the actor crash on +startup. + +That means: + +- the actor stays launchable even when the saved resume target is no longer + present in Codex's own saved-session store +- the configured `session_id` is still preserved in actor config +- the running actor can continue from a fresh session, and the user can inspect + a new live session ID later if needed + +## Discovery order + +CCCC now resolves prior `codex` session metadata from multiple sources, in this order: + +1. persisted PTY state under the group runner state +2. `~/.codex/sessions/**/rollout-*.jsonl` +3. `~/.codex/active/as-*.json` +4. `~/.codex/state_*.sqlite` `threads` rows +5. `~/.codex/session_index.jsonl` + +The goal is to avoid depending on only one weak signal. + +## How actor matching works + +When CCCC needs to recover a `codex` session for a specific actor, it scores candidates using: + +- attached workspace path / `cwd` +- `group_id` +- actor `title` +- actor `id` +- recent `threads.updated_at` + +For `sqlite` thread recovery, CCCC currently matches against the `first_user_message` content that the `codex` CLI stores for each thread. In practice, CCCC benefits from prompts that contain lines like: + +- `group_id: ...` +- `You are ` +- `You are ` + +## Resume command shape + +When both `session_id` and `session_log_path` are known, CCCC upgrades the actor command to: + +```bash +codex \ + ...existing global flags... \ + -C /absolute/workspace/path \ + -c 'experimental_resume="/absolute/path/to/rollout-....jsonl"' \ + resume +``` + +This keeps the restored session aligned with the actor scope and gives `codex` both: + +- the logical session identifier +- the rollout log needed by `experimental_resume` + +## Current user flow + +In the normal case, you do not need to manually paste the long shell wrapper. + +Recommended flow: + +```bash +cccc attach . +cccc actor add planner --runtime codex --title "需求规划专家" +cccc actor sessions +cccc actor sessions planner --probe-status +cccc actor restart planner +``` + +If that actor had previous PTY state, or CCCC can find its prior `codex` thread +from `~/.codex`, it will now try to resume automatically. + +You can also inspect what CCCC currently believes is recoverable: + +```bash +cccc actor sessions --group +cccc actor sessions --group +cccc actor sessions --group --probe-status +``` + +`--probe-status` is intentionally safety-first: + +- it does not inject `/status` into the live `codex` PTY +- it reminds you that `/status` should be queried manually after you enter the actor's terminal +- automatic PTY injection is disabled because it can disrupt or terminate the live session + +The matching Web UI flow is also safety-first: + +- CCCC reads the current session ID only from PTY output that already exists +- it does not auto-send `/status` into the terminal for you +- the captured live session ID can be saved even if it is not yet resumable by + Codex itself + +## Current Web UI flow + +The shipped Web UI behavior is: + +- templates stay portable and do not embed machine-local `session_id` values +- "Create Group from Blueprint" accepts per-actor resume bindings +- actor settings expose: + - an enable/disable toggle for native resume + - a stored `session_id` + - a `Use Current Session` action that reads session diagnostics +- if native resume is disabled, launch always starts fresh but keeps the saved + `session_id` +- if native resume is enabled, launch prefers native resume when recovery + evidence is strong enough, otherwise it falls back to a fresh launch + +## Launch prompt behavior + +When an actor already has `native_resume.enabled = true` and a saved +`session_id`, Web launch paths now use an explicit three-way modal instead of a +browser `OK / Cancel` confirm. + +The modal shows the saved session ID and offers three distinct actions: + +- `Resume saved session` +- `Fresh start and clear saved session` +- `Close` + +`Launch All Agents` now follows the same decision model. Instead of silently +using saved native-resume state for every actor, the Web UI launches actors +sequentially and reuses the same resume prompt for each actor that already has a +saved `session_id`. Foreman is launched first, then peers. + +Semantics matter here: + +- `Resume saved session` launches with native resume intent +- resuming can continue an unfinished prior Codex task; if that old session was + still busy, the actor may not consume fresh CCCC messages immediately after + launch +- `Fresh start and clear saved session` starts a brand-new session and deletes + the saved `session_id` for that actor +- `Close` performs no launch, restart, or configuration change + +This change prevents the previous ambiguity where a plain `Cancel` action could +be misread as "close the dialog" even though it actually meant "discard the +saved session and launch fresh". + +That gives CCCC two separate layers of state: + +- actor configuration stores the user's resume intent +- PTY runner state stores the runtime-discovered recovery evidence + +This distinction is intentional. User intent should not be lost just because a +runtime process stopped, and runtime evidence should not overwrite the user's +configuration model. + +## Related Glossary + +- [actor](/reference/glossary/actor) +- [profile](/reference/glossary/profile) +- [resume](/reference/glossary/resume) +- [status](/reference/glossary/status) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) +- [execution_workspace](/reference/glossary/execution_workspace) + +## Change Log + +- `2026-03-21`: Added local glossary alignment so `resume`, `status`, and workspace-boundary wording stay consistent with repo-local semantics. +- `2026-03-23`: Added `profile` alignment so stored native-resume intent is kept distinct from live runtime evidence and actor identity. + +## Resume into a busy session + +During March 16, 2026 validation on group `g_e87bca3c7a4d` +(`resume-fallback-flow-5`), actor `peer-impl` confirmed an important edge case: + +- `/status` still showed the expected saved session ID +- actor edit state and PTY runner state agreed on the same `session_id` +- but the resumed Codex terminal was still inside an older in-progress task + (`Summarize recent commits`) +- because that native session was still busy, the actor did not promptly read or + visibly reply to fresh CCCC chat instructions + +This is not the same failure mode as "resume lost the session". It means: + +- session continuity is intact +- Web/API persistence is intact +- the resumed runtime can still be unsuitable for immediate coordination if the + old Codex task is not idle yet + +That is why the launch prompt must clearly explain the consequence of choosing +resume, and why `Launch All Agents` should not silently auto-resume every saved +actor. + +## Launch persistence guard + +During March 2026 end-to-end validation, CCCC hit a daemon-side regression: + +- a PTY launch could correctly capture a new Codex `session_id` +- the daemon would persist it into actor config +- but a later stale in-memory `group.save()` during the same launch flow could + overwrite that actor update back to an empty `session_id` + +CCCC now avoids that overwrite by reloading the latest group document before it +persists `group.running = true` in both: + +- `actor.start` +- `group.start` + +This matters most for Codex session recovery because session capture and group +launch bookkeeping can happen within the same startup window. + +## Verified template-import resume flow + +The template-import path was re-validated on March 15, 2026 local time +(`2026-03-16T01:44Z`) with a real previously used Codex session. + +Validated inputs: + +- attached project path: + `/Users/glennxu/workspace/minion/cccc-resume-e2e-round2` +- blueprint file: + `data/cccc-group-template-codex--checklist.yaml` +- actor bound during create: + `需求规划专家` +- saved session ID: + `019cf430-228b-7ea3-bb58-bf2653eea8c2` + +Validated outcomes: + +1. `POST /api/v1/groups/from_template` accepted the actor binding and returned + `resume_bindings_applied: ["需求规划专家"]`. +2. The created group persisted: + + ```yaml + native_resume: + enabled: true + session_id: 019cf430-228b-7ea3-bb58-bf2653eea8c2 + ``` + +3. Before launch, `Edit Agent: 需求规划专家` already showed: + - `Prefer native resume on launch` checked + - `Session ID` prefilled with the same saved value +4. After launch, manually entering `/status` in the actor terminal still showed + Codex session + `019cf430-228b-7ea3-bb58-bf2653eea8c2`. + +That sequence proves the session ID is not only saved in config; it also +survives the actual actor start and remains the runtime's active Codex session. + +## Executed test matrix + +The validation set now explicitly covers short, medium, and long chains instead +of relying on a single happy path. + +### Short chain + +Existing-group edit-only validation was run on group `g_414cfed1a68e` +(`cccc-resume-template-e2e-verified`): + +1. Stop actor `需求规划专家`. +2. Disable `Prefer native resume on launch`. +3. Save and reopen the actor edit dialog. +4. Verify the checkbox stays off while `Session ID` still remains + `019cf430-228b-7ea3-bb58-bf2653eea8c2`. +5. Re-enable resume and save again. + +Result: actor launch intent is reversible without losing the stored native +session target. + +### Medium chain + +Fresh-group pre-launch validation was run on group `g_4fa9ed662c8e` +(`cccc-resume-template-medium-fresh`) attached to: + +- `/Users/glennxu/workspace/minion/cccc-resume-e2e-medium-fresh` + +Executed path: + +1. Create a new group from + `data/cccc-group-template-codex--checklist.yaml`. +2. In `Resume Recovery`, bind actor `需求规划专家` to session + `019cf430-228b-7ea3-bb58-bf2653eea8c2`. +3. Create the group. +4. Open `Edit Agent: 需求规划专家` before first launch. +5. Verify: + - `Prefer native resume on launch` is checked + - `Session ID` is already prefilled with the same saved value + +Result: template-import binding survives persistence and actor settings preload +even before runtime launch. + +### Long chain + +Full create -> persist -> preload -> launch -> `/status` continuity was run on +group `g_414cfed1a68e` (`cccc-resume-template-e2e-verified`), and the actor +terminal still reported session +`019cf430-228b-7ea3-bb58-bf2653eea8c2` after launch. + +Result: runtime continuity matches the saved actor config, not just the form +state. + +A later rerun on fresh group `g_236e39802004` +(`cccc-resume-template-long-regression`) confirmed the same runtime continuity +via persisted PTY state even though the Web terminal did not reliably repaint +the `/status` session line for automation scraping. +The runner state file still stored the same resumed `session_id`, so the +resume behavior remained correct. + +## Extra control-flow branches observed + +Two non-happy-path branches were also observed during testing and should stay +in the regression set: + +1. Plain `Create Group` previously had its own duplicate-scope bug: it could + create an empty new group and then attach an already attached scope to it. + The current feature branch now blocks that attach in the daemon and cleans up + the just-created empty group in Web UI, so re-submitting the same attach path + reopens the existing group instead of leaving another orphan behind. +2. Nested paths under an already attached worktree are still normalized back to + the parent scope, but that branch now also reopens the existing group rather + than creating a second empty one. + +## Operational pitfall: Web/UI current, daemon stale + +One important debugging lesson from the same validation: + +- a current Web UI alone is not enough +- if the daemon under `~/.cccc` is stale, resume behavior will look broken even + though the feature branch code is correct + +The concrete false-negative symptom was: + +- Web UI served the current feature worktree +- daemon reported `ccccd: running pid=... version=0.4.2` +- template-create requests still carried `resume_bindings_json` +- response body did not include `resume_bindings_applied` +- the created group did not persist `native_resume` + +After restarting the daemon from the feature worktree so it reported version +`0.4.4`, the same UI path succeeded immediately. + +So the first debugging step for any "template resume binding did not stick" +report should be: + +```bash +uv run cccc daemon status +``` + +If the daemon version does not match the worktree being tested, restart the +daemon first and only then trust the UI result. + +## When recovery works best + +Recovery is strongest when all of these are true: + +- the group is attached to the correct project path +- the actor keeps a stable `actor_id` +- the actor keeps a stable `title` +- the original `codex` session still exists under `~/.codex` +- the original thread prompt included the actor and group identity + +## Current boundaries + +The current implementation is intentionally conservative. + +Implemented: + +- actor-config-level `resume_enabled` / `session_id` controls +- Web UI group-creation bindings for actor-specific native resume +- actor edit controls that preserve `session_id` while disabling resume +- one-click UI actions for per-actor session diagnostics / current-session fill +- persisted `session_log_path` in PTY runner state +- multi-source `codex` session discovery +- automatic `experimental_resume` injection +- automatic `resume ` rebuild during actor launch paths +- automatic fallback to a fresh launch when the configured resume target is not + actually recoverable + +Not yet implemented: + +- querying live `codex` sessions by sending `/status` +- baking the external `agent-sessions-shim` heartbeat wrapper directly into CCCC +- proving Codex resumability from a live session ID alone when Codex itself has + not persisted the corresponding saved-session artifacts + +## Prompt injection remains unchanged + +Native resume does not replace CCCC's own system prompt flow. + +The intended model is: + +- `codex` resumes its native session when enabled and recoverable +- CCCC still injects its own preamble/help/system guidance through the existing + PTY startup and delivery pipeline + +In other words, native resume is an enhancement to the launch command, not a +reason to skip CCCC-owned coordination and prompt delivery. + +## Why this is safer than only using `session_id` + +Using only `session_id` is not enough in real cases where: + +- session index is incomplete +- multiple actors share the same workspace +- the CLI history exists in `sqlite` or active presence but not in the index +- `/status` reports a live session that Codex still does not list under + resumable saved sessions +- `codex resume ` needs the rollout file to restore properly + +By keeping both `session_id` and `session_log_path`, CCCC can recover much closer to the hand-crafted command that has already been proven to work locally. diff --git a/docs/reference/features.md b/docs/reference/features.md index 723c1a0e..13e79304 100644 --- a/docs/reference/features.md +++ b/docs/reference/features.md @@ -2,6 +2,19 @@ Detailed feature documentation for CCCC. +## Terminology Alignment + +This document follows the local glossary: + +- `group` is the main collaboration unit +- `attach` sets a group's `authoritative_workspace` +- `profile` means reusable actor runtime configuration, not the live actor itself +- `status` means an evidence-bound operator-facing state surface +- `resume` should be read by boundary: IM delivery resume is not the same thing + as runtime-native session resume +- `host_surface` refers to CCCC-owned readable truth surfaces, not downstream + interpretation layers + ## IM-Style Messaging ### Core Contracts @@ -128,7 +141,7 @@ cccc im logs -f ``` System Prompt (thin layer) ├── Who you are: Actor ID, role -├── Where you are: Working Group, Scope +├── Where you are: Group, attached workspace anchor └── What you can do: MCP tool list + key reminders (see cccc_help) MCP Tools (authoritative playbook + execution interface) @@ -231,6 +244,30 @@ cccc actor secrets --unset KEY cccc actor secrets --keys ``` +## Actor Profiles + +CCCC also supports reusable actor `profile` objects on the daemon / Web / MCP +side. + +- A profile stores reusable runtime configuration such as runtime kind, runner, + command, submit policy, capability defaults, and profile-owned secrets. +- An actor remains the live scheduled participant in a group. +- Linking an actor to a profile means the actor inherits reusable runtime + intent; it does not by itself prove current runtime liveness, execution + workspace, or native session continuity. +- Profile linkage is useful when repeated group setups or multiple actors should + share the same runtime defaults without copying all fields by hand. + +CLI surface: + +```bash +cccc actor profile upsert --id shared-dev --name "Shared Dev" --runtime claude +cccc actor profile secrets shared-dev --keys +cccc actor add --profile-id shared-dev +cccc actor update --profile-id shared-dev +cccc actor update --profile-action convert_to_custom +``` + ## Blueprint / Group Template CCCC Web supports blueprint export/import for portable group setup. @@ -330,3 +367,18 @@ cccc setup --runtime custom cccc doctor # Environment check + runtime detection cccc runtime list # List available runtimes (JSON) ``` + +## Related Glossary + +- [group](/reference/glossary/group) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) +- [profile](/reference/glossary/profile) +- [resume](/reference/glossary/resume) +- [status](/reference/glossary/status) +- [host_surface](/reference/glossary/host_surface) + +## Change Log + +- `2026-03-21`: Added local glossary alignment so feature-level docs stop mixing group semantics, attach authority, status surfaces, and different kinds of resume into one vague layer. +- `2026-03-23`: Added `profile` alignment and a dedicated actor-profile section so reusable runtime configuration stops being conflated with live actor identity. diff --git a/docs/reference/glossary/README.md b/docs/reference/glossary/README.md new file mode 100644 index 00000000..1d0ed52c --- /dev/null +++ b/docs/reference/glossary/README.md @@ -0,0 +1,18 @@ +# CCCC Local Glossary + +This compatibility page remains so repo-local readers who expect `README.md` +can still find the glossary root. + +这个兼容页保留下来,是为了让仍然按 `README.md` 习惯查找的人,能继续找到 glossary 根入口。 + +Use the canonical glossary index here: + +- [index.md](./index.md) + +请使用这里的正式 glossary 首页: + +- [index.md](./index.md) + +## Change Log + +- `2026-03-21`: Converted `README.md` into a compatibility entry and moved the canonical glossary root content to `index.md` for VitePress-friendly directory routing. diff --git a/docs/reference/glossary/actor.md b/docs/reference/glossary/actor.md new file mode 100644 index 00000000..1e58cb62 --- /dev/null +++ b/docs/reference/glossary/actor.md @@ -0,0 +1,56 @@ +# Term + +`actor` + +术语:`actor` + +## Definition + +An `actor` is a scheduled collaboration participant inside a `cccc` group. + +`actor` 是 `cccc` group 内部的一个可调度协作参与者。 + +## Why It Exists + +The term exists so runtime identity, collaboration identity, and operator-facing +coordination can be expressed with one stable unit. + +它的存在,是为了让 runtime 身份、协作身份和操作者协调对象能通过一个稳定单元表达出来。 + +## What It Is Not + +- It is not merely a terminal tab. +- It is not only a runtime process. +- It is not interchangeable with a user. + +## Canonical Scope + +Runtime domain, collaboration domain, CLI and Web operator surfaces. + +runtime 域、协作域,以及 CLI / Web 操作面。 + +## Related Terms + +- `group` +- `profile` +- `execution_workspace` +- `resume` +- `status` + +## Repo Usage Notes + +- `actor_id` is the stable identity key. +- Display title and runtime session identity may change, but the actor term refers to the scheduled participant itself. +- An actor may link to a reusable `profile`, but actor and profile are not the + same semantic object. + +## Status + +Active + +当前状态:有效 + +## Change Log + +- `2026-03-21`: Added the local card so actor identity can stop drifting between UI wording, runtime notes, and handoff prose. +- `2026-03-23`: Clarified that a live actor can link to a reusable profile without collapsing the two terms into one. diff --git a/docs/reference/glossary/attach.md b/docs/reference/glossary/attach.md new file mode 100644 index 00000000..b746e352 --- /dev/null +++ b/docs/reference/glossary/attach.md @@ -0,0 +1,53 @@ +# Term + +`attach` + +术语:`attach` + +## Definition + +In `cccc`, `attach` is the user-facing action that binds a project path to a +group as the group's authoritative workspace. + +在 `cccc` 里,`attach` 是把某个项目路径绑定到 group 上、并把它设为该 group 的 authoritative workspace 的用户动作。 + +## Why It Exists + +`attach` gives operators one explicit action for saying which project root the +group is currently anchored to. + +它的存在,是为了给操作者一个明确动作,说明这个 group 当前到底锚定在哪个项目根路径上。 + +## What It Is Not + +- It is not the same thing as actor launch. +- It is not the same thing as per-actor execution isolation. +- It is not a promise that every actor must run in its own separate workspace. + +## Canonical Scope + +Product, CLI, runtime anchor semantics. + +产品、CLI、runtime 锚点语义。 + +## Related Terms + +- `authoritative_workspace` +- `execution_workspace` +- `workspace_mode` +- `group` + +## Repo Usage Notes + +- In current `cccc` product direction, `attach` remains the authoritative path-setting action. +- Legacy wording such as `attach scope` may remain in older docs for compatibility, but glossary meaning wins. + +## Status + +Active + +当前状态:有效 + +## Change Log + +- `2026-03-21`: Created the first local glossary card for `attach` and fixed its boundary relative to authoritative and execution workspace semantics. diff --git a/docs/reference/glossary/authoritative_workspace.md b/docs/reference/glossary/authoritative_workspace.md new file mode 100644 index 00000000..73e8d35e --- /dev/null +++ b/docs/reference/glossary/authoritative_workspace.md @@ -0,0 +1,52 @@ +# Term + +`authoritative_workspace` + +术语:`authoritative_workspace` + +## Definition + +The `authoritative_workspace` is the project path that `cccc` treats as the +group's official workspace anchor. + +`authoritative_workspace` 是 `cccc` 视为 group 官方工作区锚点的项目路径。 + +## Why It Exists + +This term exists to keep authority separate from per-actor execution details. + +这个术语存在的意义,是把 authority 和 actor 具体执行目录分开。 + +## What It Is Not + +- It is not automatically every actor's current `cwd`. +- It is not replaced just because an actor uses an isolated workspace. +- It is not a bookkeeping convenience; it is a semantic anchor. + +## Canonical Scope + +Group-level product meaning and runtime truth. + +group 级产品语义与 runtime truth。 + +## Related Terms + +- `attach` +- `execution_workspace` +- `workspace_mode` +- `group` + +## Repo Usage Notes + +- Current product direction says `attach` defines the authoritative workspace. +- Older wording such as `scope`, `attached path`, or `project path` may still appear, but when they conflict, `authoritative_workspace` is the precise term. + +## Status + +Active + +当前状态:有效 + +## Change Log + +- `2026-03-21`: Added the local card so `cccc` docs can distinguish authoritative path meaning from actor execution `cwd`. diff --git a/docs/reference/glossary/execution_workspace.md b/docs/reference/glossary/execution_workspace.md new file mode 100644 index 00000000..344ca5d7 --- /dev/null +++ b/docs/reference/glossary/execution_workspace.md @@ -0,0 +1,54 @@ +# Term + +`execution_workspace` + +术语:`execution_workspace` + +## Definition + +The `execution_workspace` is the effective workspace path an actor runtime is +currently using for work execution. + +`execution_workspace` 是 actor runtime 当前实际用于执行工作的有效工作区路径。 + +## Why It Exists + +This term exists so that `cccc` can describe actor execution reality without +transferring authority away from the group anchor. + +这个术语存在,是为了让 `cccc` 能描述 actor 的执行现实,同时不把 authority 从 group 锚点上转移走。 + +## What It Is Not + +- It is not automatically the authoritative workspace. +- It is not proof that ownership moved to that path. +- It is not required to differ from the authoritative workspace. + +## Canonical Scope + +Actor runtime semantics and status interpretation. + +actor runtime 语义与状态解释。 + +## Related Terms + +- `authoritative_workspace` +- `workspace_mode` +- `shared` +- `isolated` +- `actor` + +## Repo Usage Notes + +- In `shared` mode, execution workspace usually equals authoritative workspace. +- In `isolated` mode, execution workspace may be a separate actor-local path, but that does not change the authoritative workspace. + +## Status + +Active + +当前状态:有效 + +## Change Log + +- `2026-03-21`: Added the local card to stop `cwd`, `worktree path`, and authoritative project root from drifting into one ambiguous idea. diff --git a/docs/reference/glossary/group.md b/docs/reference/glossary/group.md new file mode 100644 index 00000000..6ad73e7e --- /dev/null +++ b/docs/reference/glossary/group.md @@ -0,0 +1,52 @@ +# Term + +`group` + +术语:`group` + +## Definition + +A `group` is the core collaboration unit in `cccc`. It owns collaboration state, +actor membership, runtime attachment context, and durable history. + +`group` 是 `cccc` 的核心协作单元。它承载协作状态、actor 成员、runtime 附着上下文和持久历史。 + +## Why It Exists + +The term exists so collaboration can be scoped to one durable operational unit. + +这个术语存在,是为了把协作限定到一个可持续、可运维的操作单元里。 + +## What It Is Not + +- It is not only a chat room. +- It is not only a runtime session. +- It is not just a folder on disk. + +## Canonical Scope + +Kernel domain model and operator-facing collaboration semantics. + +kernel 域模型与操作者协作语义。 + +## Related Terms + +- `actor` +- `authoritative_workspace` +- `registry` +- `host_surface` + +## Repo Usage Notes + +- In `cccc`, a group remains the main unit of lifecycle, ledger history, and runtime ownership. +- Older wording such as `working group` remains compatible, but `group` is the concise local term. + +## Status + +Active + +当前状态:有效 + +## Change Log + +- `2026-03-21`: Added the local card to anchor group meaning for product docs, CLI docs, and runtime notes. diff --git a/docs/reference/glossary/host_surface.md b/docs/reference/glossary/host_surface.md new file mode 100644 index 00000000..7599c301 --- /dev/null +++ b/docs/reference/glossary/host_surface.md @@ -0,0 +1,54 @@ +# Term + +`host_surface` + +术语:`host_surface` + +## Definition + +A `host_surface` is a CCCC-owned readable surface that exposes host/runtime +truth to downstream consumers. + +`host_surface` 是由 CCCC 拥有的可读取表面,用来向下游消费者暴露 host/runtime truth。 + +## Why It Exists + +This term exists so downstream tools can consume runtime-owned facts without +confusing them with higher-level interpretation. + +它的存在,是为了让下游工具可以消费 runtime-owned facts,而不会把它们和更高层解释混在一起。 + +## What It Is Not + +- It is not a sidecar interpretation layer. +- It is not monitor projection logic. +- It is not business-specific workflow meaning. + +## Canonical Scope + +Machine-readable host/runtime observation surfaces. + +机器可读的 host/runtime 观测表面。 + +## Related Terms + +- `status` +- `resume` +- `registry` +- `group` +- `actor` + +## Repo Usage Notes + +- `cccc_runtime_capture_status` is one concrete host-surface example. +- Local docs should use `host_surface` when the point is readable host-owned truth, not downstream interpretation. + +## Status + +Active + +当前状态:有效 + +## Change Log + +- `2026-03-21`: Added the local card so host-owned readable surfaces can stop drifting into monitor or sidecar terminology. diff --git a/docs/reference/glossary/index.md b/docs/reference/glossary/index.md new file mode 100644 index 00000000..b16fbc73 --- /dev/null +++ b/docs/reference/glossary/index.md @@ -0,0 +1,59 @@ +# CCCC Local Glossary + +This directory is the repository-local glossary for `cccc`. + +这个目录是 `cccc` 仓库内部的本地术语表。 + +## Purpose + +The local glossary exists so that `cccc` product docs, CLI docs, runtime notes, +and handoff docs can share one repository-local semantic source for the terms +that materially affect operator understanding and product meaning. + +本地 glossary 的作用,是给 `cccc` 的产品文档、CLI 文档、runtime 说明和交接文档提供一套仓库内部统一可执行的语义来源。 + +## Governance Boundary + +- The upstream cross-topic semantic source remains: + - `/Users/glennxu/workspace/Twilight/V4/TwilightProject/docs/topics/glossary/README.md` + - `/Users/glennxu/workspace/Twilight/V4/TwilightProject/docs/topics/cccc_product_and_requirement_evolution/README.md` + - `/Users/glennxu/workspace/Twilight/V4/TwilightProject/docs/topics/cccc_product_and_requirement_evolution/current_product_direction.md` + - `/Users/glennxu/workspace/Twilight/V4/TwilightProject/docs/topics/cccc_product_and_requirement_evolution/workspace_mode_shared_vs_isolated_notes.md` +- This local glossary only keeps the subset of terms that materially affect `cccc`. +- Local cards should align with upstream direction, but should not blindly copy + all upstream glossary files. +- If a local glossary card conflicts with older `cccc` prose, the glossary card + wins. + +## Current Card Set + +- [attach](./attach.md) +- [authoritative_workspace](./authoritative_workspace.md) +- [execution_workspace](./execution_workspace.md) +- [workspace_mode](./workspace_mode.md) +- [shared](./shared.md) +- [isolated](./isolated.md) +- [group](./group.md) +- [actor](./actor.md) +- [profile](./profile.md) +- [resume](./resume.md) +- [status](./status.md) +- [registry](./registry.md) +- [host_surface](./host_surface.md) + +## Authoring Rules + +- File names must be English only. +- Content should be bilingual: English plus Chinese. +- Each card should define meaning and boundary, not become an implementation notebook. +- If old wording must remain for history, label it as `legacy` or `compatibility wording`. +- When a card changes, downstream `cccc` docs using that term should be reviewed. + +## Related Note + +- [local_glossary_maintenance.md](./local_glossary_maintenance.md) + +## Change Log + +- `2026-03-21`: Created the bounded repo-local glossary root for the first set of `cccc` terms that directly affect operator understanding, CLI semantics, runtime boundaries, and resume/status interpretation. +- `2026-03-23`: Added `profile` to the canonical local card set so reusable runtime identity and launch intent are explicit in repo-local semantics. diff --git a/docs/reference/glossary/isolated.md b/docs/reference/glossary/isolated.md new file mode 100644 index 00000000..5ffba728 --- /dev/null +++ b/docs/reference/glossary/isolated.md @@ -0,0 +1,53 @@ +# Term + +`isolated` + +术语:`isolated` + +## Definition + +`isolated` is the workspace mode where an actor's execution workspace is kept +separate from the group's authoritative workspace. + +`isolated` 是一种 workspace mode,表示 actor 的 execution workspace 与 group 的 authoritative workspace 保持分离。 + +## Why It Exists + +It exists for bounded cases where actor-local isolation is useful, such as +parallel code changes or experimental lanes. + +它的存在,是为了支持 actor 本地隔离确实有价值的受限场景,例如并发改代码或实验性工作区。 + +## What It Is Not + +- It is not the new source of truth. +- It is not required for every actor. +- It is not permission to silently reinterpret group authority. + +## Canonical Scope + +Optional execution policy and advanced operator capability. + +可选执行策略与高级操作者能力。 + +## Related Terms + +- `workspace_mode` +- `shared` +- `execution_workspace` +- `registry` + +## Repo Usage Notes + +- Current product direction treats `isolated` as an explicit optional capability, not the main path. +- If future worktree support arrives, the glossary meaning still stays semantic rather than implementation-specific. + +## Status + +Optional advanced direction + +当前状态:可选增强方向 + +## Change Log + +- `2026-03-21`: Added the local card to prevent isolated execution from being misread as authority transfer. diff --git a/docs/reference/glossary/local_glossary_maintenance.md b/docs/reference/glossary/local_glossary_maintenance.md new file mode 100644 index 00000000..734e10b5 --- /dev/null +++ b/docs/reference/glossary/local_glossary_maintenance.md @@ -0,0 +1,52 @@ +# Local Glossary Maintenance + +## Why CCCC Has A Local Glossary + +`cccc` needs a repository-local glossary because some terms directly affect: + +- CLI meaning +- runtime boundary interpretation +- status and resume interpretation +- operator understanding +- handoff consistency + +`cccc` 需要自己的本地 glossary,因为有些术语会直接影响 CLI 语义、runtime 边界、`status` / `resume` 的理解,以及操作者和交接文档的一致性。 + +## Relationship To TwilightProject Upstream Glossary + +- TwilightProject glossary remains the upstream cross-topic semantic source. +- `cccc` local glossary is the executable local subset used by this repository. +- Local wording should align with upstream direction, but local cards should only + be added for terms that materially affect `cccc`. + +## When To Add A New Local Card + +Add a local card when a term materially affects: + +- product meaning +- CLI meaning +- runtime truth or runtime boundary +- status / resume interpretation +- user or operator understanding +- repeated handoff or review language in this repo + +## When To Sync Wording Back Upstream + +Sync wording back upstream when: + +- the term is no longer `cccc`-specific +- the same meaning is now reused across multiple topics or repositories +- upstream wording would benefit future cross-topic alignment +- local clarification reveals a real shared semantic gap + +## Maintenance Rule + +Update order: + +1. update local glossary first +2. scan and align affected downstream `cccc` docs +3. update the changed docs' changelog entries + +## Change Log + +- `2026-03-21`: Added the first short maintenance note clarifying why `cccc` keeps a local glossary, how it relates to TwilightProject upstream glossary, and when local wording should be pushed back upstream. diff --git a/docs/reference/glossary/profile.md b/docs/reference/glossary/profile.md new file mode 100644 index 00000000..b555e51e --- /dev/null +++ b/docs/reference/glossary/profile.md @@ -0,0 +1,59 @@ +# Term + +`profile` + +术语:`profile` + +## Definition + +In `cccc`, `profile` means a reusable actor runtime profile: a stored runtime +configuration and secret-binding intent object that an actor can link to. + +在 `cccc` 里,`profile` 指的是可复用的 actor runtime profile:它是一个可被 actor 关联的、持久保存的 runtime 配置与 secret-binding intent 对象。 + +## Why It Exists + +This term exists so reusable runtime identity and launch intent do not have to +be collapsed into the live actor record itself. + +这个术语存在,是为了把“可复用的 runtime 身份与启动意图”从 live actor 记录本身里分离出来,不必全部挤进 actor 这一层。 + +## What It Is Not + +- It is not the live actor. +- It is not a guarantee that a runtime process is currently running. +- It is not the same thing as native session continuity proof. +- It is not a shell profile or browser profile unless the text says so explicitly. + +## Canonical Scope + +Reusable actor runtime configuration, launch intent, and profile-owned secrets. + +可复用的 actor runtime 配置、启动意图,以及 profile-owned secrets。 + +## Related Terms + +- `actor` +- `resume` +- `status` +- `execution_workspace` +- `registry` + +## Repo Usage Notes + +- Unqualified `profile` in `cccc` product/runtime docs should normally be read + as `actor profile`. +- An actor is the scheduled collaboration participant; a profile is the reusable + runtime configuration that actor may link to. +- `profile_id`, `profile_scope`, and `profile_owner` identify the linked + profile, but they do not by themselves prove current live runtime state. + +## Status + +Active + +当前状态:有效 + +## Change Log + +- `2026-03-23`: Added the local card so `profile` can become a first-class repo-local term instead of drifting between actor config, runtime identity, and unrelated shell/browser wording. diff --git a/docs/reference/glossary/registry.md b/docs/reference/glossary/registry.md new file mode 100644 index 00000000..2d18ee61 --- /dev/null +++ b/docs/reference/glossary/registry.md @@ -0,0 +1,53 @@ +# Term + +`registry` + +术语:`registry` + +## Definition + +In `cccc`, `registry` is a bookkeeping and lookup surface used to index durable +objects such as groups or defaults. + +在 `cccc` 里,`registry` 是一个用于索引 group、默认项等持久对象的记账与查询面。 + +## Why It Exists + +It exists so the system can find and explain known objects without rescanning +every other state file each time. + +它存在,是为了让系统能在不每次全量重扫其他状态文件的前提下,定位并解释已知对象。 + +## What It Is Not + +- It is not the new source of truth. +- It is not a second control plane. +- It is not authority transfer away from group state or runtime truth. + +## Canonical Scope + +Bookkeeping, lookup, diagnostics. + +记账、查询、诊断。 + +## Related Terms + +- `group` +- `host_surface` +- `workspace_mode` +- `isolated` + +## Repo Usage Notes + +- Current product direction is conservative: registry is useful, but should stay secondary to authoritative state and runtime truth. +- If future workspace registry exists, this glossary meaning still applies. + +## Status + +Active but secondary + +当前状态:有效,但属次级语义面 + +## Change Log + +- `2026-03-21`: Added the local card to keep registry discussion aligned with the product rule that registry must not become the new truth root. diff --git a/docs/reference/glossary/resume.md b/docs/reference/glossary/resume.md new file mode 100644 index 00000000..1b4db4f2 --- /dev/null +++ b/docs/reference/glossary/resume.md @@ -0,0 +1,53 @@ +# Term + +`resume` + +术语:`resume` + +## Definition + +In `cccc`, `resume` means recovering useful continuity for an actor or group. +That continuity may include CCCC-owned work-state recovery and, in some runtime +paths, native runtime session recovery. + +在 `cccc` 里,`resume` 表示为 actor 或 group 恢复有用连续性。这种连续性既可能包含 CCCC 自己拥有的 work-state recovery,也可能在部分 runtime 路径里包含 native session recovery。 + +## Why It Exists + +The term exists because continuity is valuable, but different recovery layers +must stay explicit. + +这个术语存在,是因为连续性很有价值,但不同恢复层必须保持显式区分。 + +## What It Is Not + +- It is not always native runtime session recovery. +- It is not proof that the old session is safe to continue. +- It is not equivalent to a successful `/status` readout alone. + +## Canonical Scope + +Runtime recovery semantics, operator expectations, status interpretation. + +runtime 恢复语义、操作者预期、状态解释。 + +## Related Terms + +- `status` +- `actor` +- `host_surface` + +## Repo Usage Notes + +- Current `cccc` docs should distinguish CCCC-owned recovery from runtime-native resume. +- Legacy wording that treats `resume` as if it always means native vendor session continuity should be treated as compatibility wording only. + +## Status + +Active with layered semantics + +当前状态:有效,但带有分层语义 + +## Change Log + +- `2026-03-21`: Added the local card to keep `resume` from collapsing into “native session recovery only”. diff --git a/docs/reference/glossary/shared.md b/docs/reference/glossary/shared.md new file mode 100644 index 00000000..516e5da9 --- /dev/null +++ b/docs/reference/glossary/shared.md @@ -0,0 +1,52 @@ +# Term + +`shared` + +术语:`shared` + +## Definition + +`shared` is the workspace mode where an actor uses the group's authoritative +workspace as its execution workspace. + +`shared` 是一种 workspace mode,表示 actor 使用 group 的 authoritative workspace 作为自己的 execution workspace。 + +## Why It Exists + +This term exists to preserve the lightweight default path. + +这个术语存在,是为了保住轻量默认路径。 + +## What It Is Not + +- It is not a claim that no coordination risk exists. +- It is not equivalent to “all actors edit safely in parallel”. +- It is not a weaker form of authority; it is the default aligned path. + +## Canonical Scope + +Default operator path and execution policy. + +默认操作者路径与执行策略。 + +## Related Terms + +- `workspace_mode` +- `authoritative_workspace` +- `execution_workspace` +- `isolated` + +## Repo Usage Notes + +- Current product direction treats `shared` as the default recommended mode. +- `shared` is especially appropriate for proving collaboration, status, resume, and low-conflict runs first. + +## Status + +Recommended default + +当前状态:推荐默认值 + +## Change Log + +- `2026-03-21`: Added the local card so `shared` can be discussed as a first-class default mode instead of an unstated assumption. diff --git a/docs/reference/glossary/status.md b/docs/reference/glossary/status.md new file mode 100644 index 00000000..19289c84 --- /dev/null +++ b/docs/reference/glossary/status.md @@ -0,0 +1,52 @@ +# Term + +`status` + +术语:`status` + +## Definition + +`status` is the operator-facing state surface that reports current runtime, +configuration, or lifecycle reality as honestly as the available evidence allows. + +`status` 是面向操作者的状态读取面,用来在现有证据允许的前提下尽量诚实地报告 runtime、配置或生命周期的当前现实。 + +## Why It Exists + +The term exists because operators need one explainable way to understand what +the system currently believes is true. + +它的存在,是因为操作者需要一个可解释的读取面,来理解系统当前认为哪些事实成立。 + +## What It Is Not + +- It is not automatically proof of every deeper capability. +- It is not the same as control-plane ownership. +- It is not a substitute for richer diagnostics when the system is ambiguous. + +## Canonical Scope + +Operator semantics, runtime observation, CLI and helper interpretation. + +操作者语义、runtime 观测、CLI 与 helper 解释。 + +## Related Terms + +- `resume` +- `host_surface` +- `registry` + +## Repo Usage Notes + +- Current `cccc` direction prefers status surfaces that are explicit about present, missing, partial, and inferred evidence. +- Older prose that treats a status line as full proof of session resumability should be treated as legacy shorthand. + +## Status + +Active + +当前状态:有效 + +## Change Log + +- `2026-03-21`: Added the local card so `status` can consistently mean an evidence-bound observation surface rather than a vague success label. diff --git a/docs/reference/glossary/workspace_mode.md b/docs/reference/glossary/workspace_mode.md new file mode 100644 index 00000000..f1e4b26e --- /dev/null +++ b/docs/reference/glossary/workspace_mode.md @@ -0,0 +1,54 @@ +# Term + +`workspace_mode` + +术语:`workspace_mode` + +## Definition + +`workspace_mode` is the policy term that explains how `cccc` resolves an +actor's execution workspace relative to the authoritative workspace. + +`workspace_mode` 是一个策略术语,用来说明 `cccc` 应该如何把 actor 的 execution workspace 相对于 authoritative workspace 进行解析。 + +## Why It Exists + +It exists so optional workspace isolation can be explicit instead of silently +rewriting the default path. + +它的存在,是为了让可选的工作区隔离成为显式能力,而不是悄悄改写默认路径。 + +## What It Is Not + +- It is not a replacement for `attach`. +- It is not itself a registry. +- It is not proof that isolated workspaces should become the default path. + +## Canonical Scope + +Product policy, operator-facing configuration, status explanation. + +产品策略、操作者配置、状态解释。 + +## Related Terms + +- `attach` +- `authoritative_workspace` +- `execution_workspace` +- `shared` +- `isolated` + +## Repo Usage Notes + +- Current product direction recommends default `workspace_mode = shared`. +- `workspace_mode` should stay explicit and optional rather than becoming hidden automation. + +## Status + +Proposed-active direction + +当前状态:建议中的有效方向 + +## Change Log + +- `2026-03-21`: Added the first local definition so future docs can discuss shared vs isolated execution without weakening attach semantics. diff --git a/docs/reference/minion-handover-cherry-pick-assessment.md b/docs/reference/minion-handover-cherry-pick-assessment.md new file mode 100644 index 00000000..8b3a0df7 --- /dev/null +++ b/docs/reference/minion-handover-cherry-pick-assessment.md @@ -0,0 +1,264 @@ +# Minion Handover Cherry-pick Assessment + +Date: `2026-03-17` + +Status: `assessed, not yet merged` + +Current branch at assessment time: `feature/web-and-daemon-version-badges` + +## Purpose + +This note preserves the assessment of a proposed `minion handover / reasoning capture` import so that implementation can resume later without reconstructing the analysis from chat history. + +The goal of the assessment was to determine: + +1. Whether the requested commits can be `git cherry-pick`-ed onto the current codebase. +2. Which parts are structurally low-risk versus behaviorally high-risk. +3. What import order is safest for later implementation. + +## Assessed Commits + +The following commits were assessed in this exact order: + +1. `a312003` + `feat(contracts): add metadata payload structure to Event and ChatMessageData` +2. `61cc86b` + `feat(minion): add Dual-Pipeline JSONL collector and eval track` +3. `9692df1` + `docs(minion): add agent-sessions deep research report for thinking block extraction` +4. `fa9238f` + `feat(minion): add JSONL thinking block parser (ported from agent-sessions)` +5. `f59763b` + `feat(minion): add PTY ANSI stream cleaner for reasoning extraction` +6. `05621ee` + `feat(minion): refine ANSIStreamCleaner to handle Erase In Line and hook into pty` +7. `596886e` + `feat(minion-v2): Phase 0-5 complete implementation of PTY Interceptor & Watchdog` +8. `2752d35` + `test(minion): commit remaining test cases and validation fixes for Handover Orchestrator` + +## Verified Facts + +### Cherry-pick result + +- A real dry-run was performed in a temporary worktree created from the assessment branch tip. +- All `8` commits above were cherry-picked successfully. +- No textual merge conflicts occurred. +- The temporary worktree stayed clean after the cherry-pick sequence completed. + +### Syntax and import smoke checks + +The assessed stack passed the following lightweight validation in the temporary worktree: + +- Python `compileall` over the imported `minion` modules and related tests. +- Direct import and execution smoke checks for: + - `Event` envelope metadata + - `ChatMessageData.metadata` + - `ChatStreamData.metadata` + - `normalize_event_data(...)` metadata retention + - `ANSIStreamCleaner` + - `UnifiedEventMerger` + - `_format_facts_preamble(...)` + +### What could not be fully verified in the assessment environment + +- Full `pytest` execution was not completed because the available virtual environment did not provide a working `pytest` installation in that assessment context. +- Therefore, this assessment confirms: + - cherry-pick compatibility + - syntax/import viability + - targeted smoke behavior +- This assessment does **not** claim full runtime or regression safety. + +## Key Integration Surfaces + +Most files in this stack are additive. The main existing surfaces touched by the proposed import are: + +### Contracts + +- `src/cccc/contracts/v1/event.py` +- `src/cccc/contracts/v1/message.py` + +Notable effect: + +- Introduces metadata-bearing payload support for `Event`, `ChatMessageData`, and `ChatStreamData`. + +Assessment: + +- Low structural risk. +- Good candidate for early import. + +### PTY runner + +- `src/cccc/runners/pty.py` + +Notable effect: + +- Adds PTY output hook support via `on_output` / `set_output_hook`. + +Assessment: + +- Low-to-medium structural risk. +- Important foundation for later reasoning capture. +- Needs care because it introduces a new shared hook slot into the PTY supervision path. + +### Daemon startup path + +- `src/cccc/daemon/server.py` + +Notable effect: + +- `596886e` wires `logger`, `watchdog`, and `orchestrator` into daemon startup. + +Assessment: + +- Highest behavior risk in the whole stack. +- This is the main reason the full import should not be treated as a zero-risk bulk cherry-pick, even though the patches apply cleanly. + +## Risk Summary + +### Low-risk group + +These are mostly additive and comparatively safe to import early: + +- `a312003` +- `9692df1` +- `61cc86b` +- `fa9238f` +- `f59763b` +- `05621ee` + +Why they are lower risk: + +- Mostly new files or local contract extensions. +- Minimal direct coupling to current daemon default behavior. +- The only shared-path change in this set is the PTY output hook addition in `src/cccc/runners/pty.py`. + +### Higher-risk group + +- `596886e` +- `2752d35` + +Why they are higher risk: + +- They move the work from “available capability” to “default daemon behavior”. +- They introduce watchdog/orchestrator side effects during ordinary daemon and actor lifecycle. + +## Behavior Risks To Re-check Before Real Merge + +These are the main risks that should be deliberately tested during implementation. + +### 1. Single PTY output hook slot + +The imported PTY changes add a shared output hook mechanism on the supervisor. + +Risk: + +- If later features also need PTY stream interception, there may be overwrite or composition issues. + +Implementation note: + +- Consider whether the final design should support hook fan-out instead of a single hook slot. + +### 2. Reasoning logger starts background JSONL watchers + +The imported logger stack sets up background watchers and currently hardcodes a probe under `~/.claude/projects`. + +Risk: + +- This is not runtime-neutral. +- It may be mismatched with Codex-focused workflows. +- It may create persistent background thread / resource behavior even when the feature is not actively needed. + +Implementation note: + +- Re-check whether the watcher should be runtime-aware and gated by actor runtime rather than always probing a Claude-specific path. + +### 3. Watchdog writes back into actor PTY + +The watchdog can inject dehydration messages into live actor PTY sessions when it detects flood / repeat / error patterns. + +Risk: + +- False positives may interrupt legitimate long-running agent output. +- This directly changes agent behavior, not just observability. + +Implementation note: + +- Treat watchdog thresholds and enablement as rollout-sensitive. + +### 4. Orchestrator is not purely passive + +Even with `auto_restart=False`, the orchestrator still enters handover logic when an actor exits. + +Risk: + +- If environment keys are available, it can perform LLM extraction and MCP synchronization paths. +- This means the feature is not merely “recording”; it can become part of the default lifecycle. + +Implementation note: + +- Prefer explicit gating for initial rollout. + +## Recommended Import Strategy + +The safest later implementation path is **not** to import everything in one step on a busy branch. + +### Recommended phase split + +Phase A: import the low-risk foundations first + +1. `a312003` +2. `9692df1` +3. `61cc86b` +4. `fa9238f` +5. `f59763b` +6. `05621ee` + +Goal: + +- Land contracts, parser, ANSI cleaner, PTY hook, and collector groundwork without enabling daemon-side lifecycle behavior by default. + +Phase B: import daemon behavior last + +7. `596886e` +8. `2752d35` + +Goal: + +- Review and possibly refactor daemon startup integration before enabling logger/watchdog/orchestrator by default. + +### Strong recommendation + +Before merging `596886e` into an actively used branch, consider adding an explicit feature gate such as an environment flag or settings switch so that: + +- code can land, +- tests can be written, +- and daemon startup behavior does not change for every user immediately. + +## Suggested Follow-up Validation + +When implementation resumes, the following validation should be done in addition to normal unit tests: + +1. Start daemon with feature disabled and confirm existing startup behavior is unchanged. +2. Start daemon with feature enabled and confirm: + - PTY output interception works + - transcript files are produced as expected + - watchdog does not trigger on normal agent activity + - orchestrator does not unexpectedly restart or mutate actors +3. Validate Codex-focused scenarios separately from Claude/JSONL scenarios. +4. Re-run resume/session workflows if the feature touches PTY lifecycle in the same branch. + +## Decision Snapshot + +Final assessment conclusion: + +- `Cherry-pick compatibility`: `high` +- `Text conflict risk`: `low` +- `Behavior change risk`: `medium to high` +- `Recommended execution style`: `phased import, not bulk merge` + +In short: + +- The stack can be imported. +- The stack should not be treated as operationally harmless. +- The daemon startup wiring should be reviewed and likely gated before real rollout. diff --git a/docs/reference/positioning.md b/docs/reference/positioning.md index 807f8828..c64bc365 100644 --- a/docs/reference/positioning.md +++ b/docs/reference/positioning.md @@ -10,7 +10,18 @@ Core value: - durable collaboration substrate (append-only ledger) - unified control plane across Web/CLI/MCP/IM - explicit message semantics for reliable coordination -- operationally manageable actor runtime model +- operationally manageable actor/profile runtime model + +Current profile-backed path: + +```bash +cccc actor profile upsert --id shared-dev --name "Shared Dev" --runtime claude +cccc actor add dev --profile-id shared-dev +``` + +That profile-backed setup path is part of the product model: reusable runtime +intent belongs to `profile`, while the live scheduled participant remains the +`actor`. ## What CCCC Is Not @@ -22,6 +33,9 @@ CCCC is not: CCCC should own collaboration state and control-plane semantics; other systems can own compute DAGs and business workflows. +For recurring local terms such as `group`, `actor`, `profile`, `attach`, and +`host_surface`, prefer the local glossary over older prose shortcuts. + ## Ideal Adoption Scenarios Use CCCC when you need: @@ -56,3 +70,17 @@ Recommended layering: - IM gateway: remote ops and lightweight interventions This separation keeps CCCC focused, composable, and maintainable. + +## Related Glossary + +- [group](/reference/glossary/group) +- [actor](/reference/glossary/actor) +- [profile](/reference/glossary/profile) +- [attach](/reference/glossary/attach) +- [host_surface](/reference/glossary/host_surface) + +## Change Log + +- `2026-03-23`: Added a profile-backed setup path so positioning language points to the current actor/profile product model, not only abstract semantics. +- `2026-03-21`: Added local glossary alignment guidance so product-positioning prose does not drift away from repo-local semantic definitions. +- `2026-03-23`: Updated positioning language so reusable actor profiles are treated as part of the runtime model instead of disappearing behind actor-only wording. diff --git a/docs/reference/runtime-session-recovery-strategy.md b/docs/reference/runtime-session-recovery-strategy.md new file mode 100644 index 00000000..6d9f0d44 --- /dev/null +++ b/docs/reference/runtime-session-recovery-strategy.md @@ -0,0 +1,428 @@ +# Runtime Session Recovery Strategy + +How CCCC should think about recovery across multiple agent CLIs such as `codex`, `claude`, and future runtimes like `rovodev`. + +## Terminology Alignment + +This document follows the local glossary: + +- `resume` is layered and not limited to native runtime session reuse +- `status` is an evidence-bound observation surface +- `host_surface` means a CCCC-owned readable surface exposing host/runtime truth +- `profile` means reusable actor runtime configuration and launch intent +- `attach` and `authoritative_workspace` remain distinct from actor execution details + +This document is intentionally exploratory. It is not a promise that every runtime can recover its own native session. It defines the recovery layers that CCCC should preserve, the boundaries that must stay explicit, and the adapter shape future runtimes should follow. + +## Why this matters + +Long-running coding agents often accumulate useful working context inside their native CLI session: + +- local reasoning already performed +- files already inspected +- constraints already internalized +- short-horizon implementation intent + +Losing that context can slow work down. + +But blindly restoring old native sessions also creates a different risk: + +- the old session may still be anchored to an outdated requirement +- a new request may actually require a full solution replacement, not a gradual migration +- stale native context can silently bias the runtime toward compatibility patches when the user wants a clean redesign + +So recovery is not a simple "resume whenever possible" feature. CCCC needs a recovery model that helps preserve useful context without forcing old solution assumptions into new work. + +## Two different kinds of recovery + +CCCC should explicitly distinguish these two layers: + +### 1. Native runtime session recovery + +This means recovering the runtime's own session, if the runtime supports it. + +Examples: + +- `codex resume ` +- a future `rovodev` resume command, if one exists +- any runtime-specific mechanism that re-enters the same native conversation/thread/session + +This is vendor-specific, fragile, and often tied to undocumented local state. + +### 2. CCCC work-state recovery + +This means recovering the collaboration state that CCCC owns itself. + +Examples: + +- `cccc_bootstrap` +- `cccc_help` +- `cccc_project_info` +- `cccc_context_get` +- `cccc_task` +- `cccc_coordination` +- `cccc_agent_state` +- `cccc_memory` +- inbox / unread message continuity + +This layer is runtime-agnostic and should remain the primary recovery guarantee. + +## Core principle + +CCCC should treat native session recovery as an enhancement, not the foundation. + +The foundation should be: + +1. restore CCCC-controlled work state +2. decide whether native session continuity is desirable +3. only then attempt runtime-specific native resume + +This keeps CCCC compatible with multiple runtimes and avoids overfitting the architecture to one CLI's private internals. + +## Current state in CCCC + +Today, CCCC already has a strong generic work-state recovery path: + +- `cccc_bootstrap` returns a compact cold-start / resume packet +- `cccc_help` returns role-aware operating guidance +- `cccc_context_get`, `cccc_task`, and `cccc_coordination` restore shared planning state +- `cccc_agent_state` restores actor-owned working state +- `cccc_memory` and `memory_recall_gate` restore durable context + +By contrast, native session recovery is currently specialized for `codex` PTY actors. + +The next planned layer is actor-scoped native resume intent: + +- user config says whether an actor should prefer native resume +- runtime recovery code decides whether that request can be satisfied safely + +That separation matters. "User wants this actor to try resume" is a different +fact from "CCCC discovered enough runtime evidence to perform resume now". +That actor-scoped preference may live in direct actor settings or in a linked +reusable `profile`, but either way it remains intent rather than proof. + +For the concrete `codex`-first product shape, see +`Actor Native Resume Bindings`. + +That specialization is useful, but it should be understood as one runtime adapter, not the universal model. + +## The real risk: stale-context contamination + +The main failure mode is not only "session failed to resume". + +Another important failure mode is: + +- requirement `A` existed +- the runtime built up a lot of local context around `A` +- later the user introduces requirement `B` +- `B` is not an incremental extension of `A` +- but the resumed native session keeps trying to gradually migrate `A` toward `B` +- the result becomes a compromise implementation the user never wanted + +This is especially dangerous when: + +- the core architecture is being replaced +- the data model is changing +- a previous implementation path should be abandoned completely +- the user explicitly does not want compatibility baggage + +So a strong recovery design must also include a strong **non-resume** path. + +## Recovery should be policy-driven + +CCCC should not have only one recovery mode. + +It should think in terms of recovery policies: + +### Policy A: `native_resume_preferred` + +Use when: + +- the same requirement is continuing +- the runtime session is likely still healthy +- local native context is valuable and not misleading + +Behavior: + +- restore CCCC work state +- try native runtime resume +- if that fails, fall back to fresh session + CCCC recovery + +### Policy B: `fresh_native_session_with_cccc_recovery` + +Use when: + +- the runtime session is unavailable or unreliable +- the runtime has no stable native resume mechanism +- CCCC context is more important than native continuity + +Behavior: + +- do not attempt native resume +- start a fresh runtime session +- inject CCCC recovery context through `cccc_bootstrap`, `cccc_help`, project info, tasks, memory, and inbox + +This should be the default cross-runtime fallback. + +### Policy C: `strict_replan` + +Use when: + +- requirement drift is high +- the old implementation path should be discarded +- the user wants a new architecture or a clean replacement + +Behavior: + +- do not resume native session +- restore only the minimum shared context needed to understand goals and constraints +- explicitly tell the actor to treat prior implementation direction as non-binding +- require a fresh plan / design checkpoint before implementation + +This policy is important for avoiding the "A slowly migrates into B" contamination pattern. + +## What should count as the source of truth + +CCCC should treat these sources differently: + +### First-hand session truth + +The strongest source is the runtime reporting its own current session identity from inside the live session. + +For example, if a runtime supports an in-session command such as `/status`, that output is the strongest direct evidence of: + +- current session identifier +- current working directory +- current runtime mode + +This is useful because it reflects the session the runtime itself believes is active. + +### CCCC-owned recovery truth + +The strongest cross-runtime recovery truth is the state CCCC already owns: + +- group / actor identity +- scope attachment +- coordination +- tasks +- inbox +- memory +- actor state + +This is the system-level continuity CCCC can reliably preserve across runtimes. + +This class of readable CCCC-owned truth is also what the local glossary calls a +`host_surface`. + +### Actor-configured recovery intent + +There is also a third useful class of truth: explicit actor launch preference. + +Examples: + +- resume enabled for this actor +- stored `session_id` for this actor +- runtime-specific resume preference retained even while disabled + +This is not the same as a runtime artifact and not the same as PTY runner +evidence. It is product configuration supplied by the user. + +### Local artifact discovery + +Searching local runtime files is useful, but should be treated as best-effort only. + +Examples: +- runtime-specific session files +- active-presence files +- sqlite state stores +- session indexes + +These may change at any time if the vendor CLI changes its private format or layout. + +## Related Glossary + +- [profile](/reference/glossary/profile) +- [resume](/reference/glossary/resume) +- [status](/reference/glossary/status) +- [host_surface](/reference/glossary/host_surface) +- [attach](/reference/glossary/attach) +- [authoritative_workspace](/reference/glossary/authoritative_workspace) + +## Change Log + +- `2026-03-21`: Added local glossary alignment so recovery strategy prose uses repo-local meanings for `resume`, `status`, `host_surface`, and workspace authority. +- `2026-03-23`: Added `profile` alignment so actor-configured recovery intent is kept separate from live runtime evidence. + +So local artifact discovery should help recovery, but should not define the architecture. + +## Design rule for future runtimes like `rovodev` + +When adding a new runtime, CCCC should avoid cloning the `codex` recovery mechanism directly. + +Instead, each runtime should answer these questions explicitly: + +1. Does the runtime expose a stable native session identifier? +2. Can that identifier be queried safely from inside the session? +3. Does the runtime provide an official resume command? +4. Is there a stable local state file or API, or only private internals? +5. Can CCCC start a fresh session and still recover enough work context through MCP? + +If the answer to `4` is "private internals only", CCCC should label that path as best-effort and optional. + +## Proposed runtime recovery adapter contract + +Future runtimes should conceptually implement an adapter with fields like: + +- `runtime_id` +- `supports_native_resume` +- `supports_in_session_identity_probe` +- `identity_probe_mode` +- `identity_probe_command` +- `parse_identity_output(...)` +- `build_resume_command(...)` +- `discover_local_resume_artifacts(...)` +- `recommended_default_policy` +- `supports_strict_replan_hint` + +This keeps the common architecture stable while allowing runtime-specific behavior to vary. + +## Why actor-configured intent should survive disabled state + +If a user temporarily disables native resume for an actor, CCCC should not have +to forget the session identifier they entered. + +Otherwise the product forces users into an awkward cycle: + +1. disable resume to test a clean launch +2. later re-enable resume +3. manually re-enter the same session identifier again + +That is avoidable friction. A better model is: + +- `enabled` controls launch behavior now +- `session_id` remains stored until the user clears it explicitly + +This is especially useful for long-running projects where the same native +session may be valuable over multiple days of work. + +## Safe use of manual session reporting + +If a runtime supports a safe in-session status command, CCCC should prefer this flow: + +1. `foreman` asks peers to enter their own runtime terminal +2. each peer manually runs the runtime's self-status command +3. each peer reports back through `cccc_message_reply` +4. CCCC records or summarizes the reported session identity + +Important: + +- do not inject the command automatically into a live PTY unless the runtime explicitly guarantees that doing so is safe +- terminal output is not delivery; the peer must still report through CCCC messaging + +This pattern is more robust than trying to infer live session identity only from local files. + +## Do not overfit recovery to native sessions + +A future runtime may have: + +- no official resume command +- no stable session file +- unstable or changing hook behavior +- no queryable local session database + +CCCC should still work well in that case. + +The minimum supported path should always remain: + +- start a fresh runtime process +- run `cccc_bootstrap` +- restore project and coordination state +- recover memory selectively +- continue from CCCC-owned context + +That gives CCCC a viable multi-runtime story even when native vendor session recovery is weak. + +## Recommended implementation order + +### Phase 1: make CCCC recovery first-class + +Prioritize: + +- clearer resume guidance around `cccc_bootstrap` +- better summary of recovered tasks / blockers / focus +- better role-note and help refresh flow +- better memory recall gating +- better review of unread obligations after restart + +This benefits every runtime. + +### Phase 2: make session inventory explicit + +CCCC should surface a structured inventory that distinguishes: + +- `runtime_reported_session_id` +- `cccc_persisted_session_id` +- `local_artifact_session_id` +- `native_resume_available` +- `recommended_recovery_policy` + +This avoids pretending all evidence sources have the same trust level. + +### Phase 3: add runtime-specific adapters + +For each runtime such as `codex`, `claude`, or `rovodev`: + +- implement only the adapter pieces that the runtime can support safely +- do not force unsupported runtimes into fake parity +- prefer honest degradation over brittle heuristics + +## Recommended stance for `codex` today + +For `codex`, the practical stance should be: + +- first-hand in-session identity is valuable +- `session_id` is worth capturing +- local file discovery can help +- `session_log_path` discovery is best-effort, not a guaranteed interface +- `CCCC` recovery must still work when native resume artifacts are incomplete + +And for the next product step: + +- actor-bound `session_id` input is a reasonable user-facing primitive +- group-template flows should accept actor resume bindings outside the template + file itself +- actor settings should allow disabling native resume without deleting the saved + `session_id` + +So `codex` should be treated as: + +- **good candidate for native resume** +- **not the template that every future runtime must imitate** + +## Recommended stance for future runtimes + +For future runtimes such as `rovodev`, CCCC should assume: + +- native session recovery may be unavailable, incomplete, or unstable at first +- work-state recovery must still be complete enough to continue useful work +- strict replan must be easy when the user wants a true replacement instead of an incremental migration + +This gives CCCC a path that is both practical and honest. + +## Working conclusion + +CCCC should not define recovery as: + +> "restore the exact native vendor session for every runtime" + +CCCC should define recovery as: + +> "restore collaboration continuity reliably, then use native runtime resume only when it is safe, valuable, and supported" + +That definition is much more compatible with: + +- multiple runtimes +- evolving vendor CLIs +- future runtimes like `rovodev` +- users who sometimes want continuity +- users who sometimes want a clean break from stale implementation context diff --git a/src/cccc/cli/actor_cmds.py b/src/cccc/cli/actor_cmds.py index 5a6041b2..d14807f5 100644 --- a/src/cccc/cli/actor_cmds.py +++ b/src/cccc/cli/actor_cmds.py @@ -13,9 +13,274 @@ "cmd_actor_restart", "cmd_actor_update", "cmd_actor_secrets", + "cmd_actor_profile_list", + "cmd_actor_profile_get", + "cmd_actor_profile_upsert", + "cmd_actor_profile_delete", + "cmd_actor_profile_secrets", "cmd_runtime_list", ] + +def _actor_profile_ref_request_args(args: argparse.Namespace) -> dict[str, str]: + scope = str(getattr(args, "scope", "") or "global").strip().lower() or "global" + if scope not in {"global", "user"}: + raise ValueError("invalid profile scope") + owner_id = str(getattr(args, "owner_id", "") or "").strip() + if scope == "global": + owner_id = "" + if scope == "user" and not owner_id: + raise ValueError("user scope profile requires owner_id") + return { + "profile_scope": scope, + "profile_owner": owner_id, + } + + +def _parse_profile_command_arg(raw: Any) -> list[str]: + text = str(raw or "").strip() + if not text: + return [] + try: + return shlex.split(text, posix=(os.name != "nt")) + except Exception: + return [text] + + +def _actor_profile_link_cli_args(args: argparse.Namespace) -> tuple[str, str, str]: + profile_id = str(getattr(args, "profile_id", "") or "").strip() + profile_scope = str(getattr(args, "profile_scope", "") or "global").strip().lower() or "global" + if profile_scope not in {"global", "user"}: + raise ValueError("invalid profile scope") + profile_owner = str(getattr(args, "profile_owner_id", "") or "").strip() + if profile_scope == "global": + profile_owner = "" + if profile_id and profile_scope == "user" and not profile_owner: + raise ValueError("user scope profile requires owner_id") + return profile_id, profile_scope, profile_owner + + +def cmd_actor_profile_list(args: argparse.Namespace) -> int: + by = str(getattr(args, "by", "user") or "user").strip() or "user" + view = str(getattr(args, "view", "global") or "global").strip().lower() or "global" + if view not in {"global", "my", "all"}: + _print_json({"ok": False, "error": {"code": "invalid_request", "message": "invalid view"}}) + return 2 + if not _ensure_daemon_running(): + _print_json({"ok": False, "error": {"code": "daemon_unavailable", "message": "daemon unavailable"}}) + return 2 + resp = call_daemon({"op": "actor_profile_list", "args": {"by": by, "view": view}}) + _print_json(resp) + return 0 if resp.get("ok") else 2 + + +def cmd_actor_profile_get(args: argparse.Namespace) -> int: + profile_id = str(getattr(args, "profile_id", "") or "").strip() + by = str(getattr(args, "by", "user") or "user").strip() or "user" + if not profile_id: + _print_json({"ok": False, "error": {"code": "missing_profile_id", "message": "missing profile_id"}}) + return 2 + try: + ref_args = _actor_profile_ref_request_args(args) + except ValueError as e: + _print_json({"ok": False, "error": {"code": "invalid_request", "message": str(e)}}) + return 2 + if not _ensure_daemon_running(): + _print_json({"ok": False, "error": {"code": "daemon_unavailable", "message": "daemon unavailable"}}) + return 2 + req_args = { + "profile_id": profile_id, + "by": by, + **ref_args, + } + resp = call_daemon({"op": "actor_profile_get", "args": req_args}) + _print_json(resp) + return 0 if resp.get("ok") else 2 + + +def cmd_actor_profile_upsert(args: argparse.Namespace) -> int: + by = str(getattr(args, "by", "user") or "user").strip() or "user" + profile: dict[str, Any] = {} + + profile_id = str(getattr(args, "profile_id", "") or "").strip() + if profile_id: + profile["id"] = profile_id + + if getattr(args, "name", None) is not None: + profile["name"] = str(getattr(args, "name", "") or "") + if getattr(args, "runtime", None) is not None: + profile["runtime"] = str(getattr(args, "runtime", "") or "").strip() + if getattr(args, "runner", None) is not None: + profile["runner"] = str(getattr(args, "runner", "") or "").strip() + if getattr(args, "command", None) is not None: + profile["command"] = _parse_profile_command_arg(getattr(args, "command", None)) + if getattr(args, "submit", None) is not None: + profile["submit"] = str(getattr(args, "submit", "") or "").strip() + + scope = getattr(args, "scope", None) + owner_id = getattr(args, "owner_id", None) + if scope is not None: + normalized_scope = str(scope or "global").strip().lower() or "global" + if normalized_scope not in {"global", "user"}: + _print_json({"ok": False, "error": {"code": "invalid_request", "message": "invalid profile scope"}}) + return 2 + profile["scope"] = normalized_scope + if owner_id is not None: + profile["owner_id"] = str(owner_id or "").strip() + if str(profile.get("scope") or "global") == "global": + profile["owner_id"] = "" + if str(profile.get("scope") or "global") == "user" and not str(profile.get("owner_id") or "").strip(): + _print_json({"ok": False, "error": {"code": "invalid_request", "message": "user scope profile requires owner_id"}}) + return 2 + + capability_defaults_raw = getattr(args, "capability_defaults", "") + if str(capability_defaults_raw or "").strip(): + try: + profile["capability_defaults"] = _parse_json_object_arg( + capability_defaults_raw, + field="capability_defaults", + ) + except ValueError as e: + _print_json({"ok": False, "error": {"code": "invalid_capability_defaults", "message": str(e)}}) + return 2 + + if not profile: + _print_json({"ok": False, "error": {"code": "empty_profile", "message": "nothing to upsert"}}) + return 2 + + expected_revision = getattr(args, "expected_revision", None) + normalized_expected_revision: int | None = None + if expected_revision is not None: + try: + normalized_expected_revision = int(expected_revision) + except Exception: + _print_json({"ok": False, "error": {"code": "invalid_request", "message": "expected_revision must be an integer"}}) + return 2 + + if not _ensure_daemon_running(): + _print_json({"ok": False, "error": {"code": "daemon_unavailable", "message": "daemon unavailable"}}) + return 2 + + req_args: dict[str, Any] = { + "by": by, + "profile": profile, + } + if normalized_expected_revision is not None: + req_args["expected_revision"] = normalized_expected_revision + + resp = call_daemon({"op": "actor_profile_upsert", "args": req_args}) + _print_json(resp) + return 0 if resp.get("ok") else 2 + + +def cmd_actor_profile_delete(args: argparse.Namespace) -> int: + profile_id = str(getattr(args, "profile_id", "") or "").strip() + by = str(getattr(args, "by", "user") or "user").strip() or "user" + if not profile_id: + _print_json({"ok": False, "error": {"code": "missing_profile_id", "message": "missing profile_id"}}) + return 2 + try: + ref_args = _actor_profile_ref_request_args(args) + except ValueError as e: + _print_json({"ok": False, "error": {"code": "invalid_request", "message": str(e)}}) + return 2 + if not _ensure_daemon_running(): + _print_json({"ok": False, "error": {"code": "daemon_unavailable", "message": "daemon unavailable"}}) + return 2 + req_args: dict[str, Any] = { + "profile_id": profile_id, + "by": by, + "force_detach": bool(getattr(args, "force_detach", False)), + **ref_args, + } + resp = call_daemon({"op": "actor_profile_delete", "args": req_args}) + _print_json(resp) + return 0 if resp.get("ok") else 2 + + +def cmd_actor_profile_secrets(args: argparse.Namespace) -> int: + profile_id = str(getattr(args, "profile_id", "") or "").strip() + by = str(getattr(args, "by", "user") or "user").strip() or "user" + if not profile_id: + _print_json({"ok": False, "error": {"code": "missing_profile_id", "message": "missing profile_id"}}) + return 2 + try: + ref_args = _actor_profile_ref_request_args(args) + except ValueError as e: + _print_json({"ok": False, "error": {"code": "invalid_request", "message": str(e)}}) + return 2 + if bool(getattr(args, "keys", False)) and ( + bool(getattr(args, "set", []) or []) + or bool(getattr(args, "unset", []) or []) + or bool(getattr(args, "clear", False)) + ): + _print_json( + { + "ok": False, + "error": { + "code": "invalid_request", + "message": "--keys cannot be combined with --set/--unset/--clear", + }, + } + ) + return 2 + if bool(getattr(args, "keys", False)): + if not _ensure_daemon_running(): + _print_json({"ok": False, "error": {"code": "daemon_unavailable", "message": "daemon unavailable"}}) + return 2 + req_args = { + "profile_id": profile_id, + "by": by, + **ref_args, + } + resp = call_daemon({"op": "actor_profile_secret_keys", "args": req_args}) + _print_json(resp) + return 0 if resp.get("ok") else 2 + + set_vars: dict[str, str] = {} + for item in (getattr(args, "set", []) or []): + if not isinstance(item, str) or "=" not in item: + continue + k, v = item.split("=", 1) + k = k.strip() + if not k: + continue + set_vars[k] = v + + unset_keys: list[str] = [] + for item in (getattr(args, "unset", []) or []): + k = str(item or "").strip() + if k: + unset_keys.append(k) + + clear = bool(getattr(args, "clear", False)) + if not set_vars and not unset_keys and not clear: + _print_json({"ok": False, "error": {"code": "empty_secret_update", "message": "nothing to update"}}) + return 2 + if not _ensure_daemon_running(): + _print_json({"ok": False, "error": {"code": "daemon_unavailable", "message": "daemon unavailable"}}) + return 2 + + req_args = { + "profile_id": profile_id, + "by": by, + **ref_args, + } + + resp = call_daemon( + { + "op": "actor_profile_secret_update", + "args": { + **req_args, + "set": set_vars, + "unset": unset_keys, + "clear": clear, + }, + } + ) + _print_json(resp) + return 0 if resp.get("ok") else 2 + def cmd_actor_list(args: argparse.Namespace) -> int: group_id = _resolve_group_id(getattr(args, "group", "")) if not group_id: @@ -24,9 +289,8 @@ def cmd_actor_list(args: argparse.Namespace) -> int: if _ensure_daemon_running(): resp = call_daemon({"op": "actor_list", "args": {"group_id": group_id}}) - if resp.get("ok"): - _print_json(resp) - return 0 + _print_json(resp) + return 0 if resp.get("ok") else 2 group = load_group(group_id) if group is None: @@ -51,6 +315,11 @@ def cmd_actor_add(args: argparse.Namespace) -> int: submit = str(args.submit or "enter").strip() or "enter" runner = str(getattr(args, "runner", "") or "pty").strip() or "pty" runtime = str(getattr(args, "runtime", "") or "codex").strip() or "codex" + try: + profile_id, profile_scope, profile_owner = _actor_profile_link_cli_args(args) + except ValueError as e: + _print_json({"ok": False, "error": {"code": "invalid_request", "message": str(e)}}) + return 2 command: list[str] = [] if args.command: try: @@ -103,12 +372,18 @@ def cmd_actor_add(args: argparse.Namespace) -> int: "command": command, "env": env, "default_scope_key": default_scope_key, + "profile_id": profile_id, + "profile_scope": profile_scope, + "profile_owner": profile_owner, }, } ) - if resp.get("ok"): - _print_json(resp) - return 0 + _print_json(resp) + return 0 if resp.get("ok") else 2 + + if profile_id: + _print_json({"ok": False, "error": {"code": "daemon_unavailable", "message": "daemon unavailable for profile-linked actor add"}}) + return 2 try: require_actor_permission(group, by=by, action="actor.add") @@ -147,9 +422,8 @@ def cmd_actor_remove(args: argparse.Namespace) -> int: if _ensure_daemon_running(): resp = call_daemon({"op": "actor_remove", "args": {"group_id": group_id, "actor_id": actor_id, "by": by}}) - if resp.get("ok"): - _print_json(resp) - return 0 + _print_json(resp) + return 0 if resp.get("ok") else 2 group = load_group(group_id) if group is None: @@ -175,9 +449,8 @@ def cmd_actor_start(args: argparse.Namespace) -> int: if _ensure_daemon_running(): resp = call_daemon({"op": "actor_start", "args": {"group_id": group_id, "actor_id": actor_id, "by": by}}) - if resp.get("ok"): - _print_json(resp) - return 0 + _print_json(resp) + return 0 if resp.get("ok") else 2 group = load_group(group_id) if group is None: @@ -203,9 +476,8 @@ def cmd_actor_stop(args: argparse.Namespace) -> int: if _ensure_daemon_running(): resp = call_daemon({"op": "actor_stop", "args": {"group_id": group_id, "actor_id": actor_id, "by": by}}) - if resp.get("ok"): - _print_json(resp) - return 0 + _print_json(resp) + return 0 if resp.get("ok") else 2 group = load_group(group_id) if group is None: @@ -231,9 +503,8 @@ def cmd_actor_restart(args: argparse.Namespace) -> int: if _ensure_daemon_running(): resp = call_daemon({"op": "actor_restart", "args": {"group_id": group_id, "actor_id": actor_id, "by": by}}) - if resp.get("ok"): - _print_json(resp) - return 0 + _print_json(resp) + return 0 if resp.get("ok") else 2 group = load_group(group_id) if group is None: @@ -257,6 +528,12 @@ def cmd_actor_update(args: argparse.Namespace) -> int: actor_id = str(args.actor_id or "").strip() by = str(args.by or "user").strip() + try: + profile_id, profile_scope, profile_owner = _actor_profile_link_cli_args(args) + except ValueError as e: + _print_json({"ok": False, "error": {"code": "invalid_request", "message": str(e)}}) + return 2 + profile_action = str(getattr(args, "profile_action", "") or "").strip() group = load_group(group_id) if group is None: @@ -305,15 +582,26 @@ def cmd_actor_update(args: argparse.Namespace) -> int: if args.enabled is not None: patch["enabled"] = bool(args.enabled) - if not patch: + if not patch and not profile_id and not profile_action: _print_json({"ok": False, "error": {"code": "empty_patch", "message": "nothing to update"}}) return 2 if _ensure_daemon_running(): - resp = call_daemon({"op": "actor_update", "args": {"group_id": group_id, "actor_id": actor_id, "patch": patch, "by": by}}) + req_args: dict[str, Any] = {"group_id": group_id, "actor_id": actor_id, "patch": patch, "by": by} + if profile_id: + req_args["profile_id"] = profile_id + req_args["profile_scope"] = profile_scope + req_args["profile_owner"] = profile_owner + if profile_action: + req_args["profile_action"] = profile_action + resp = call_daemon({"op": "actor_update", "args": req_args}) _print_json(resp) return 0 if resp.get("ok") else 2 + if profile_id or profile_action: + _print_json({"ok": False, "error": {"code": "daemon_unavailable", "message": "daemon unavailable for actor profile linkage update"}}) + return 2 + try: require_actor_permission(group, by=by, action="actor.update", target_actor_id=actor_id) actor = update_actor(group, actor_id, patch) @@ -333,16 +621,23 @@ def cmd_actor_secrets(args: argparse.Namespace) -> int: actor_id = str(args.actor_id or "").strip() by = str(args.by or "user").strip() or "user" - - if not _ensure_daemon_running(): - _print_json({"ok": False, "error": {"code": "daemon_unavailable", "message": "daemon unavailable"}}) + if bool(getattr(args, "keys", False)) and ( + bool(getattr(args, "set", []) or []) + or bool(getattr(args, "unset", []) or []) + or bool(getattr(args, "clear", False)) + or bool(getattr(args, "restart", False)) + ): + _print_json( + { + "ok": False, + "error": { + "code": "invalid_request", + "message": "--keys cannot be combined with --set/--unset/--clear/--restart", + }, + } + ) return 2 - if getattr(args, "keys", False): - resp = call_daemon({"op": "actor_env_private_keys", "args": {"group_id": group_id, "actor_id": actor_id, "by": by}}) - _print_json(resp) - return 0 if resp.get("ok") else 2 - set_vars: dict[str, str] = {} for item in (args.set or []): if not isinstance(item, str) or "=" not in item: @@ -361,6 +656,18 @@ def cmd_actor_secrets(args: argparse.Namespace) -> int: clear = bool(getattr(args, "clear", False)) restart = bool(getattr(args, "restart", False)) + if not getattr(args, "keys", False) and not set_vars and not unset_keys and not clear: + _print_json({"ok": False, "error": {"code": "empty_secret_update", "message": "nothing to update"}}) + return 2 + + if not _ensure_daemon_running(): + _print_json({"ok": False, "error": {"code": "daemon_unavailable", "message": "daemon unavailable"}}) + return 2 + + if getattr(args, "keys", False): + resp = call_daemon({"op": "actor_env_private_keys", "args": {"group_id": group_id, "actor_id": actor_id, "by": by}}) + _print_json(resp) + return 0 if resp.get("ok") else 2 resp = call_daemon( { diff --git a/src/cccc/cli/main.py b/src/cccc/cli/main.py index a061d406..1ba54621 100644 --- a/src/cccc/cli/main.py +++ b/src/cccc/cli/main.py @@ -102,6 +102,9 @@ def build_parser() -> argparse.ArgumentParser: p_actor_add.add_argument("--command", default="", help="Command to run (shell-like string; optional, auto-set by --runtime)") p_actor_add.add_argument("--env", action="append", default=[], help="Environment var (KEY=VAL), repeatable") p_actor_add.add_argument("--scope", default="", help="Default scope path for this actor (optional; must be attached)") + p_actor_add.add_argument("--profile-id", default="", help="Link this actor to a reusable profile") + p_actor_add.add_argument("--profile-scope", choices=["global", "user"], default="global", help="Profile scope for --profile-id (default: global)") + p_actor_add.add_argument("--profile-owner-id", default="", help="Profile owner id for user-scoped profiles") p_actor_add.add_argument("--submit", choices=["enter", "newline", "none"], default="enter", help="Submit key (default: enter)") p_actor_add.add_argument("--by", default="user", help="Requester (default: user)") p_actor_add.add_argument("--group", default="", help="Target group_id (default: active group)") @@ -138,6 +141,10 @@ def build_parser() -> argparse.ArgumentParser: p_actor_update.add_argument("--command", default=None, help="Replace command (shell-like string); use empty to clear") p_actor_update.add_argument("--env", action="append", default=[], help="Replace env with these KEY=VAL entries (repeatable)") p_actor_update.add_argument("--scope", default="", help="Set default scope path (must be attached)") + p_actor_update.add_argument("--profile-id", default="", help="Attach this actor to a reusable profile") + p_actor_update.add_argument("--profile-scope", choices=["global", "user"], default="global", help="Profile scope for --profile-id (default: global)") + p_actor_update.add_argument("--profile-owner-id", default="", help="Profile owner id for user-scoped profiles") + p_actor_update.add_argument("--profile-action", choices=["convert_to_custom"], default=None, help="Profile linkage action") p_actor_update.add_argument("--submit", choices=["enter", "newline", "none"], default=None, help="Submit key") p_actor_update.add_argument("--enabled", type=int, choices=[0, 1], default=None, help="Set enabled (1) or disabled (0)") p_actor_update.add_argument("--by", default="user", help="Requester (default: user)") @@ -155,6 +162,59 @@ def build_parser() -> argparse.ArgumentParser: p_actor_secrets.add_argument("--group", default="", help="Target group_id (default: active group)") p_actor_secrets.set_defaults(func=cmd_actor_secrets) + p_actor_profile = actor_sub.add_parser("profile", help="Manage reusable actor runtime profiles") + actor_profile_sub = p_actor_profile.add_subparsers(dest="profile_action", required=True) + + p_actor_profile_list = actor_profile_sub.add_parser("list", help="List reusable actor profiles") + p_actor_profile_list.add_argument("--view", choices=["global", "my", "all"], default="global", help="Profile view (default: global)") + p_actor_profile_list.add_argument("--by", default="user", help="Requester (default: user)") + p_actor_profile_list.set_defaults(func=cmd_actor_profile_list) + + p_actor_profile_get = actor_profile_sub.add_parser("get", help="Show one reusable actor profile") + p_actor_profile_get.add_argument("profile_id", help="Profile id") + p_actor_profile_get.add_argument("--scope", choices=["global", "user"], default="global", help="Profile scope (default: global)") + p_actor_profile_get.add_argument("--owner-id", default="", help="Profile owner id for user-scoped profiles") + p_actor_profile_get.add_argument("--by", default="user", help="Requester (default: user)") + p_actor_profile_get.set_defaults(func=cmd_actor_profile_get) + + p_actor_profile_upsert = actor_profile_sub.add_parser("upsert", help="Create or update a reusable actor profile") + p_actor_profile_upsert.add_argument("--id", dest="profile_id", default="", help="Profile id (optional for create)") + p_actor_profile_upsert.add_argument("--name", default=None, help="Profile display name") + p_actor_profile_upsert.add_argument( + "--runtime", + choices=["claude", "codex", "droid", "amp", "auggie", "neovate", "gemini", "kimi", "custom"], + default=None, + help="Profile runtime", + ) + p_actor_profile_upsert.add_argument("--runner", choices=["pty", "headless"], default=None, help="Profile runner") + p_actor_profile_upsert.add_argument("--command", default=None, help="Command to store for this profile") + p_actor_profile_upsert.add_argument("--submit", choices=["enter", "newline", "none"], default=None, help="Submit key") + p_actor_profile_upsert.add_argument("--scope", choices=["global", "user"], default=None, help="Profile scope") + p_actor_profile_upsert.add_argument("--owner-id", default=None, help="Profile owner id for user-scoped profiles") + p_actor_profile_upsert.add_argument("--expected-revision", type=int, default=None, help="Expected profile revision for optimistic writes") + p_actor_profile_upsert.add_argument("--capability-defaults", default="", help="JSON object for capability defaults") + p_actor_profile_upsert.add_argument("--by", default="user", help="Requester (default: user)") + p_actor_profile_upsert.set_defaults(func=cmd_actor_profile_upsert) + + p_actor_profile_delete = actor_profile_sub.add_parser("delete", help="Delete a reusable actor profile") + p_actor_profile_delete.add_argument("profile_id", help="Profile id") + p_actor_profile_delete.add_argument("--scope", choices=["global", "user"], default="global", help="Profile scope (default: global)") + p_actor_profile_delete.add_argument("--owner-id", default="", help="Profile owner id for user-scoped profiles") + p_actor_profile_delete.add_argument("--force-detach", action="store_true", help="Detach linked actors before delete when supported") + p_actor_profile_delete.add_argument("--by", default="user", help="Requester (default: user)") + p_actor_profile_delete.set_defaults(func=cmd_actor_profile_delete) + + p_actor_profile_secrets = actor_profile_sub.add_parser("secrets", help="Manage runtime-only secrets for a reusable actor profile") + p_actor_profile_secrets.add_argument("profile_id", help="Profile id") + p_actor_profile_secrets.add_argument("--scope", choices=["global", "user"], default="global", help="Profile scope (default: global)") + p_actor_profile_secrets.add_argument("--owner-id", default="", help="Profile owner id for user-scoped profiles") + p_actor_profile_secrets.add_argument("--set", action="append", default=[], help="Set secret env (KEY=VALUE), repeatable") + p_actor_profile_secrets.add_argument("--unset", action="append", default=[], help="Unset secret key (KEY), repeatable") + p_actor_profile_secrets.add_argument("--clear", action="store_true", help="Clear all secrets for this profile") + p_actor_profile_secrets.add_argument("--keys", action="store_true", help="List configured keys (no values)") + p_actor_profile_secrets.add_argument("--by", default="user", help="Requester (default: user)") + p_actor_profile_secrets.set_defaults(func=cmd_actor_profile_secrets) + p_inbox = sub.add_parser("inbox", help="List unread messages for an actor (chat messages + system notifications)") p_inbox.add_argument("--actor-id", required=True, help="Target actor id") p_inbox.add_argument("--by", default="user", help="Requester (default: user)") diff --git a/tests/test_cli_actor_daemon_error_preservation.py b/tests/test_cli_actor_daemon_error_preservation.py new file mode 100644 index 00000000..6d4fdf9f --- /dev/null +++ b/tests/test_cli_actor_daemon_error_preservation.py @@ -0,0 +1,100 @@ +import unittest +from argparse import Namespace +from types import SimpleNamespace +from unittest.mock import patch + + +class TestCliActorDaemonErrorPreservation(unittest.TestCase): + def test_actor_list_preserves_daemon_error(self) -> None: + from cccc import cli + + daemon_resp = { + "ok": False, + "error": {"code": "permission_denied", "message": "not allowed"}, + } + with patch.object(cli, "_resolve_group_id", return_value="g_test"), \ + patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", return_value=daemon_resp), \ + patch.object(cli, "list_actors", side_effect=AssertionError("local fallback must not run")), \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_list(Namespace(group="g_test")) + + self.assertEqual(code, 2) + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "permission_denied") + + def test_actor_remove_preserves_daemon_error(self) -> None: + from cccc import cli + + daemon_resp = { + "ok": False, + "error": {"code": "permission_denied", "message": "not allowed"}, + } + with patch.object(cli, "load_group", return_value=SimpleNamespace(group_id="g_test", doc={"scopes": []})), \ + patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", return_value=daemon_resp), \ + patch.object(cli, "remove_actor", side_effect=AssertionError("local fallback must not run")), \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_remove(Namespace(group="g_test", actor_id="peer-a", by="user")) + + self.assertEqual(code, 2) + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "permission_denied") + + def test_actor_start_preserves_daemon_error(self) -> None: + from cccc import cli + + daemon_resp = { + "ok": False, + "error": {"code": "permission_denied", "message": "not allowed"}, + } + with patch.object(cli, "load_group", return_value=SimpleNamespace(group_id="g_test", doc={"scopes": []})), \ + patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", return_value=daemon_resp), \ + patch.object(cli, "update_actor", side_effect=AssertionError("local fallback must not run")), \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_start(Namespace(group="g_test", actor_id="peer-a", by="user")) + + self.assertEqual(code, 2) + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "permission_denied") + + def test_actor_stop_preserves_daemon_error(self) -> None: + from cccc import cli + + daemon_resp = { + "ok": False, + "error": {"code": "permission_denied", "message": "not allowed"}, + } + with patch.object(cli, "load_group", return_value=SimpleNamespace(group_id="g_test", doc={"scopes": []})), \ + patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", return_value=daemon_resp), \ + patch.object(cli, "update_actor", side_effect=AssertionError("local fallback must not run")), \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_stop(Namespace(group="g_test", actor_id="peer-a", by="user")) + + self.assertEqual(code, 2) + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "permission_denied") + + def test_actor_restart_preserves_daemon_error(self) -> None: + from cccc import cli + + daemon_resp = { + "ok": False, + "error": {"code": "permission_denied", "message": "not allowed"}, + } + with patch.object(cli, "load_group", return_value=SimpleNamespace(group_id="g_test", doc={"scopes": []})), \ + patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", return_value=daemon_resp), \ + patch.object(cli, "update_actor", side_effect=AssertionError("local fallback must not run")), \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_restart(Namespace(group="g_test", actor_id="peer-a", by="user")) + + self.assertEqual(code, 2) + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "permission_denied") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_cli_actor_profile_commands.py b/tests/test_cli_actor_profile_commands.py new file mode 100644 index 00000000..4c50e1c3 --- /dev/null +++ b/tests/test_cli_actor_profile_commands.py @@ -0,0 +1,542 @@ +import unittest +from argparse import Namespace +from unittest.mock import patch + + +class TestCliActorProfileCommands(unittest.TestCase): + def test_parser_accepts_actor_profile_commands(self) -> None: + from cccc import cli + + parser = cli.build_parser() + + args = parser.parse_args(["actor", "profile", "list", "--view", "my"]) + self.assertEqual(args.cmd, "actor") + self.assertEqual(args.action, "profile") + self.assertEqual(args.profile_action, "list") + self.assertEqual(args.view, "my") + + args = parser.parse_args(["actor", "profile", "get", "shared", "--scope", "user", "--owner-id", "member-user"]) + self.assertEqual(args.profile_action, "get") + self.assertEqual(args.profile_id, "shared") + self.assertEqual(args.scope, "user") + self.assertEqual(args.owner_id, "member-user") + + args = parser.parse_args( + [ + "actor", + "profile", + "upsert", + "--id", + "shared", + "--name", + "Shared Profile", + "--runtime", + "claude", + "--command", + "claude --resume", + "--capability-defaults", + '{"autoload_capabilities":["pack:space"]}', + ] + ) + self.assertEqual(args.profile_action, "upsert") + self.assertEqual(args.profile_id, "shared") + self.assertEqual(args.name, "Shared Profile") + self.assertEqual(args.runtime, "claude") + self.assertEqual(args.command, "claude --resume") + + args = parser.parse_args(["actor", "profile", "delete", "shared", "--force-detach"]) + self.assertEqual(args.profile_action, "delete") + self.assertEqual(args.profile_id, "shared") + self.assertTrue(args.force_detach) + + args = parser.parse_args(["actor", "profile", "secrets", "shared", "--keys"]) + self.assertEqual(args.profile_action, "secrets") + self.assertEqual(args.profile_id, "shared") + self.assertTrue(args.keys) + + def test_actor_profile_list_routes_to_daemon(self) -> None: + from cccc import cli + + calls = [] + + def _fake_call_daemon(req): + calls.append(req) + return {"ok": True, "result": {"profiles": []}} + + args = Namespace(view="all", by="user") + with patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", side_effect=_fake_call_daemon), \ + patch.object(cli, "_print_json"): + code = cli.cmd_actor_profile_list(args) + + self.assertEqual(code, 0) + self.assertEqual(len(calls), 1) + req = calls[0] + self.assertEqual(req.get("op"), "actor_profile_list") + self.assertEqual(req.get("args", {}).get("view"), "all") + self.assertEqual(req.get("args", {}).get("by"), "user") + + def test_actor_profile_list_rejects_invalid_view_before_daemon(self) -> None: + from cccc import cli + + args = Namespace(view="team", by="user") + with patch.object(cli, "_ensure_daemon_running") as mock_daemon, \ + patch.object(cli, "call_daemon") as mock_call, \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_profile_list(args) + + self.assertEqual(code, 2) + mock_daemon.assert_not_called() + mock_call.assert_not_called() + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "invalid_request") + + def test_actor_profile_get_routes_scope_and_owner(self) -> None: + from cccc import cli + + calls = [] + + def _fake_call_daemon(req): + calls.append(req) + return {"ok": True, "result": {"profile": {"id": "shared"}}} + + args = Namespace(profile_id="shared", scope="user", owner_id="member-user", by="user") + with patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", side_effect=_fake_call_daemon), \ + patch.object(cli, "_print_json"): + code = cli.cmd_actor_profile_get(args) + + self.assertEqual(code, 0) + req = calls[0] + self.assertEqual(req.get("op"), "actor_profile_get") + self.assertEqual(req.get("args", {}).get("profile_id"), "shared") + self.assertEqual(req.get("args", {}).get("profile_scope"), "user") + self.assertEqual(req.get("args", {}).get("profile_owner"), "member-user") + + def test_actor_profile_get_strips_owner_for_global_scope(self) -> None: + from cccc import cli + + calls = [] + + def _fake_call_daemon(req): + calls.append(req) + return {"ok": True, "result": {"profile": {"id": "shared"}}} + + args = Namespace(profile_id="shared", scope="global", owner_id="member-user", by="user") + with patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", side_effect=_fake_call_daemon), \ + patch.object(cli, "_print_json"): + code = cli.cmd_actor_profile_get(args) + + self.assertEqual(code, 0) + req = calls[0] + self.assertEqual(req.get("args", {}).get("profile_scope"), "global") + self.assertEqual(req.get("args", {}).get("profile_owner"), "") + + def test_actor_profile_get_rejects_invalid_scope_before_daemon(self) -> None: + from cccc import cli + + args = Namespace(profile_id="shared", scope="team", owner_id="", by="user") + with patch.object(cli, "_ensure_daemon_running") as mock_daemon, \ + patch.object(cli, "call_daemon") as mock_call, \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_profile_get(args) + + self.assertEqual(code, 2) + mock_daemon.assert_not_called() + mock_call.assert_not_called() + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "invalid_request") + + def test_actor_profile_get_rejects_user_scope_without_owner_before_daemon(self) -> None: + from cccc import cli + + args = Namespace(profile_id="shared", scope="user", owner_id="", by="user") + with patch.object(cli, "_ensure_daemon_running") as mock_daemon, \ + patch.object(cli, "call_daemon") as mock_call, \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_profile_get(args) + + self.assertEqual(code, 2) + mock_daemon.assert_not_called() + mock_call.assert_not_called() + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "invalid_request") + + def test_actor_profile_upsert_builds_profile_payload(self) -> None: + from cccc import cli + + calls = [] + + def _fake_call_daemon(req): + calls.append(req) + return {"ok": True, "result": {"profile": {"id": "shared"}}} + + args = Namespace( + profile_id="shared", + name="Shared Profile", + runtime="claude", + runner="pty", + command="claude --resume", + submit="newline", + scope="user", + owner_id="member-user", + expected_revision=3, + capability_defaults='{"autoload_capabilities":["pack:space"],"default_scope":"session"}', + by="user", + ) + with patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", side_effect=_fake_call_daemon), \ + patch.object(cli, "_print_json"): + code = cli.cmd_actor_profile_upsert(args) + + self.assertEqual(code, 0) + req = calls[0] + self.assertEqual(req.get("op"), "actor_profile_upsert") + payload = req.get("args", {}).get("profile") or {} + self.assertEqual(payload.get("id"), "shared") + self.assertEqual(payload.get("name"), "Shared Profile") + self.assertEqual(payload.get("runtime"), "claude") + self.assertEqual(payload.get("runner"), "pty") + self.assertEqual(payload.get("command"), ["claude", "--resume"]) + self.assertEqual(payload.get("submit"), "newline") + self.assertEqual(payload.get("scope"), "user") + self.assertEqual(payload.get("owner_id"), "member-user") + self.assertEqual(payload.get("capability_defaults"), {"autoload_capabilities": ["pack:space"], "default_scope": "session"}) + self.assertEqual(req.get("args", {}).get("expected_revision"), 3) + + def test_actor_profile_upsert_strips_owner_for_global_scope(self) -> None: + from cccc import cli + + calls = [] + + def _fake_call_daemon(req): + calls.append(req) + return {"ok": True, "result": {"profile": {"id": "shared"}}} + + args = Namespace( + profile_id="shared", + name="Shared Profile", + runtime="codex", + runner="pty", + command="", + submit="enter", + scope="global", + owner_id="member-user", + expected_revision=None, + capability_defaults="", + by="user", + ) + with patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", side_effect=_fake_call_daemon), \ + patch.object(cli, "_print_json"): + code = cli.cmd_actor_profile_upsert(args) + + self.assertEqual(code, 0) + req = calls[0] + payload = req.get("args", {}).get("profile") or {} + self.assertEqual(payload.get("scope"), "global") + self.assertEqual(payload.get("owner_id"), "") + + def test_actor_profile_delete_routes_force_detach(self) -> None: + from cccc import cli + + calls = [] + + def _fake_call_daemon(req): + calls.append(req) + return {"ok": True, "result": {"deleted": True}} + + args = Namespace(profile_id="shared", scope="global", owner_id="", force_detach=True, by="user") + with patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", side_effect=_fake_call_daemon), \ + patch.object(cli, "_print_json"): + code = cli.cmd_actor_profile_delete(args) + + self.assertEqual(code, 0) + req = calls[0] + self.assertEqual(req.get("op"), "actor_profile_delete") + self.assertEqual(req.get("args", {}).get("profile_id"), "shared") + self.assertEqual(req.get("args", {}).get("profile_scope"), "global") + self.assertEqual(req.get("args", {}).get("profile_owner"), "") + self.assertTrue(req.get("args", {}).get("force_detach")) + + def test_actor_profile_delete_strips_owner_for_global_scope(self) -> None: + from cccc import cli + + calls = [] + + def _fake_call_daemon(req): + calls.append(req) + return {"ok": True, "result": {"deleted": True}} + + args = Namespace(profile_id="shared", scope="global", owner_id="member-user", force_detach=False, by="user") + with patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", side_effect=_fake_call_daemon), \ + patch.object(cli, "_print_json"): + code = cli.cmd_actor_profile_delete(args) + + self.assertEqual(code, 0) + req = calls[0] + self.assertEqual(req.get("args", {}).get("profile_scope"), "global") + self.assertEqual(req.get("args", {}).get("profile_owner"), "") + + def test_actor_profile_upsert_rejects_invalid_capability_defaults_json(self) -> None: + from cccc import cli + + args = Namespace( + profile_id="shared", + name="Shared Profile", + runtime="codex", + runner="pty", + command="", + submit="enter", + scope="global", + owner_id="", + expected_revision=None, + capability_defaults="{bad json", + by="user", + ) + with patch.object(cli, "call_daemon") as mock_call, \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_profile_upsert(args) + + self.assertEqual(code, 2) + mock_call.assert_not_called() + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "invalid_capability_defaults") + + def test_actor_profile_upsert_rejects_invalid_expected_revision_before_daemon(self) -> None: + from cccc import cli + + args = Namespace( + profile_id="shared", + name="Shared Profile", + runtime="codex", + runner="pty", + command="", + submit="enter", + scope="global", + owner_id="", + expected_revision="abc", + capability_defaults="", + by="user", + ) + with patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon") as mock_call, \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_profile_upsert(args) + + self.assertEqual(code, 2) + mock_call.assert_not_called() + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "invalid_request") + + def test_actor_profile_upsert_rejects_invalid_expected_revision_without_daemon_check(self) -> None: + from cccc import cli + + args = Namespace( + profile_id="shared", + name="Shared Profile", + runtime="codex", + runner="pty", + command="", + submit="enter", + scope="global", + owner_id="", + expected_revision="abc", + capability_defaults="", + by="user", + ) + with patch.object(cli, "_ensure_daemon_running") as mock_daemon, \ + patch.object(cli, "call_daemon") as mock_call, \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_profile_upsert(args) + + self.assertEqual(code, 2) + mock_daemon.assert_not_called() + mock_call.assert_not_called() + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "invalid_request") + + def test_actor_profile_upsert_rejects_user_scope_without_owner_before_daemon(self) -> None: + from cccc import cli + + args = Namespace( + profile_id="shared", + name="Shared Profile", + runtime="codex", + runner="pty", + command="", + submit="enter", + scope="user", + owner_id="", + expected_revision=None, + capability_defaults="", + by="user", + ) + with patch.object(cli, "_ensure_daemon_running") as mock_daemon, \ + patch.object(cli, "call_daemon") as mock_call, \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_profile_upsert(args) + + self.assertEqual(code, 2) + mock_daemon.assert_not_called() + mock_call.assert_not_called() + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "invalid_request") + + def test_actor_profile_upsert_rejects_invalid_scope_before_daemon(self) -> None: + from cccc import cli + + args = Namespace( + profile_id="shared", + name="Shared Profile", + runtime="codex", + runner="pty", + command="", + submit="enter", + scope="team", + owner_id="", + expected_revision=None, + capability_defaults="", + by="user", + ) + with patch.object(cli, "_ensure_daemon_running") as mock_daemon, \ + patch.object(cli, "call_daemon") as mock_call, \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_profile_upsert(args) + + self.assertEqual(code, 2) + mock_daemon.assert_not_called() + mock_call.assert_not_called() + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "invalid_request") + + def test_actor_profile_secrets_keys_routes_to_daemon(self) -> None: + from cccc import cli + + calls = [] + + def _fake_call_daemon(req): + calls.append(req) + return {"ok": True, "result": {"keys": ["OPENAI_API_KEY"]}} + + args = Namespace(profile_id="shared", scope="user", owner_id="member-user", keys=True, set=[], unset=[], clear=False, by="user") + with patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", side_effect=_fake_call_daemon), \ + patch.object(cli, "_print_json"): + code = cli.cmd_actor_profile_secrets(args) + + self.assertEqual(code, 0) + req = calls[0] + self.assertEqual(req.get("op"), "actor_profile_secret_keys") + self.assertEqual(req.get("args", {}).get("profile_id"), "shared") + self.assertEqual(req.get("args", {}).get("profile_scope"), "user") + self.assertEqual(req.get("args", {}).get("profile_owner"), "member-user") + + def test_actor_profile_secrets_rejects_keys_mode_with_update_flags(self) -> None: + from cccc import cli + + args = Namespace( + profile_id="shared", + scope="user", + owner_id="member-user", + keys=True, + set=["OPENAI_API_KEY=secret"], + unset=[], + clear=False, + by="user", + ) + with patch.object(cli, "_ensure_daemon_running") as mock_daemon, \ + patch.object(cli, "call_daemon") as mock_call, \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_profile_secrets(args) + + self.assertEqual(code, 2) + mock_daemon.assert_not_called() + mock_call.assert_not_called() + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "invalid_request") + + def test_actor_profile_secrets_update_routes_set_unset_clear(self) -> None: + from cccc import cli + + calls = [] + + def _fake_call_daemon(req): + calls.append(req) + return {"ok": True, "result": {"updated": True}} + + args = Namespace( + profile_id="shared", + scope="global", + owner_id="", + keys=False, + set=["OPENAI_API_KEY=secret", "MODEL=gpt-5"], + unset=["OLD_KEY"], + clear=True, + by="user", + ) + with patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", side_effect=_fake_call_daemon), \ + patch.object(cli, "_print_json"): + code = cli.cmd_actor_profile_secrets(args) + + self.assertEqual(code, 0) + req = calls[0] + self.assertEqual(req.get("op"), "actor_profile_secret_update") + self.assertEqual(req.get("args", {}).get("set"), {"OPENAI_API_KEY": "secret", "MODEL": "gpt-5"}) + self.assertEqual(req.get("args", {}).get("unset"), ["OLD_KEY"]) + self.assertTrue(req.get("args", {}).get("clear")) + + def test_actor_profile_secrets_update_rejects_empty_operation(self) -> None: + from cccc import cli + + args = Namespace( + profile_id="shared", + scope="global", + owner_id="", + keys=False, + set=[], + unset=[], + clear=False, + by="user", + ) + with patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon") as mock_call, \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_profile_secrets(args) + + self.assertEqual(code, 2) + mock_call.assert_not_called() + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "empty_secret_update") + + def test_actor_profile_secrets_update_rejects_empty_operation_before_daemon_check(self) -> None: + from cccc import cli + + args = Namespace( + profile_id="shared", + scope="global", + owner_id="", + keys=False, + set=["MALFORMED"], + unset=[], + clear=False, + by="user", + ) + with patch.object(cli, "_ensure_daemon_running") as mock_daemon, \ + patch.object(cli, "call_daemon") as mock_call, \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_profile_secrets(args) + + self.assertEqual(code, 2) + mock_daemon.assert_not_called() + mock_call.assert_not_called() + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "empty_secret_update") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_cli_actor_profile_linkage.py b/tests/test_cli_actor_profile_linkage.py new file mode 100644 index 00000000..9dba43bf --- /dev/null +++ b/tests/test_cli_actor_profile_linkage.py @@ -0,0 +1,522 @@ +import unittest +from argparse import Namespace +from types import SimpleNamespace +from unittest.mock import patch + + +class TestCliActorProfileLinkage(unittest.TestCase): + def test_parser_accepts_actor_add_profile_link_args(self) -> None: + from cccc import cli + + parser = cli.build_parser() + args = parser.parse_args( + [ + "actor", + "add", + "peer-a", + "--profile-id", + "shared-profile", + "--profile-scope", + "user", + "--profile-owner-id", + "member-user", + ] + ) + + self.assertEqual(args.actor_id, "peer-a") + self.assertEqual(args.profile_id, "shared-profile") + self.assertEqual(args.profile_scope, "user") + self.assertEqual(args.profile_owner_id, "member-user") + + def test_parser_accepts_actor_update_profile_link_args(self) -> None: + from cccc import cli + + parser = cli.build_parser() + args = parser.parse_args( + [ + "actor", + "update", + "peer-a", + "--profile-id", + "shared-profile", + "--profile-scope", + "user", + "--profile-owner-id", + "member-user", + ] + ) + + self.assertEqual(args.actor_id, "peer-a") + self.assertEqual(args.profile_id, "shared-profile") + self.assertEqual(args.profile_scope, "user") + self.assertEqual(args.profile_owner_id, "member-user") + + def test_parser_accepts_actor_update_profile_action(self) -> None: + from cccc import cli + + parser = cli.build_parser() + args = parser.parse_args( + [ + "actor", + "update", + "peer-a", + "--profile-action", + "convert_to_custom", + ] + ) + + self.assertEqual(args.actor_id, "peer-a") + self.assertEqual(args.profile_action, "convert_to_custom") + + def test_actor_add_routes_profile_linkage_to_daemon(self) -> None: + from cccc import cli + + calls = [] + + def _fake_call_daemon(req): + calls.append(req) + return {"ok": True, "result": {"actor": {"id": "peer-a"}}} + + group = SimpleNamespace(doc={"scopes": []}) + args = Namespace( + group="g_test", + actor_id="peer-a", + title="", + runtime="codex", + command="", + env=[], + scope="", + submit="enter", + by="user", + profile_id="shared-profile", + profile_scope="user", + profile_owner_id="member-user", + ) + with patch.object(cli, "load_group", return_value=group), \ + patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", side_effect=_fake_call_daemon), \ + patch.object(cli, "_print_json"): + code = cli.cmd_actor_add(args) + + self.assertEqual(code, 0) + req = calls[0] + self.assertEqual(req.get("op"), "actor_add") + daemon_args = req.get("args", {}) + self.assertEqual(daemon_args.get("profile_id"), "shared-profile") + self.assertEqual(daemon_args.get("profile_scope"), "user") + self.assertEqual(daemon_args.get("profile_owner"), "member-user") + + def test_actor_add_strips_profile_owner_for_global_scope(self) -> None: + from cccc import cli + + calls = [] + + def _fake_call_daemon(req): + calls.append(req) + return {"ok": True, "result": {"actor": {"id": "peer-a"}}} + + group = SimpleNamespace(doc={"scopes": []}) + args = Namespace( + group="g_test", + actor_id="peer-a", + title="", + runtime="codex", + command="", + env=[], + scope="", + submit="enter", + by="user", + profile_id="shared-profile", + profile_scope="global", + profile_owner_id="member-user", + ) + with patch.object(cli, "load_group", return_value=group), \ + patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", side_effect=_fake_call_daemon), \ + patch.object(cli, "_print_json"): + code = cli.cmd_actor_add(args) + + self.assertEqual(code, 0) + daemon_args = calls[0].get("args", {}) + self.assertEqual(daemon_args.get("profile_scope"), "global") + self.assertEqual(daemon_args.get("profile_owner"), "") + + def test_actor_add_profile_linkage_without_daemon_returns_explicit_error(self) -> None: + from cccc import cli + + group = SimpleNamespace(doc={"scopes": []}) + args = Namespace( + group="g_test", + actor_id="peer-a", + title="", + runtime="codex", + command="", + env=[], + scope="", + submit="enter", + by="user", + profile_id="shared-profile", + profile_scope="user", + profile_owner_id="member-user", + ) + with patch.object(cli, "load_group", return_value=group), \ + patch.object(cli, "_ensure_daemon_running", return_value=False), \ + patch.object(cli, "call_daemon") as mock_call, \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_add(args) + + self.assertEqual(code, 2) + mock_call.assert_not_called() + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "daemon_unavailable") + + def test_actor_add_rejects_user_scope_without_profile_owner_before_daemon(self) -> None: + from cccc import cli + + group = SimpleNamespace(doc={"scopes": []}) + args = Namespace( + group="g_test", + actor_id="peer-a", + title="", + runtime="codex", + command="", + env=[], + scope="", + submit="enter", + by="user", + profile_id="shared-profile", + profile_scope="user", + profile_owner_id="", + ) + with patch.object(cli, "load_group", return_value=group), \ + patch.object(cli, "_ensure_daemon_running") as mock_daemon, \ + patch.object(cli, "call_daemon") as mock_call, \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_add(args) + + self.assertEqual(code, 2) + mock_daemon.assert_not_called() + mock_call.assert_not_called() + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "invalid_request") + + def test_actor_add_profile_linkage_preserves_daemon_error(self) -> None: + from cccc import cli + + group = SimpleNamespace(doc={"scopes": []}) + args = Namespace( + group="g_test", + actor_id="peer-a", + title="", + runtime="codex", + command="", + env=[], + scope="", + submit="enter", + by="user", + profile_id="missing-profile", + profile_scope="user", + profile_owner_id="member-user", + ) + daemon_resp = { + "ok": False, + "error": {"code": "profile_not_found", "message": "profile not found: missing-profile"}, + } + with patch.object(cli, "load_group", return_value=group), \ + patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", return_value=daemon_resp), \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_add(args) + + self.assertEqual(code, 2) + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "profile_not_found") + + def test_actor_add_without_profile_preserves_daemon_error(self) -> None: + from cccc import cli + + group = SimpleNamespace(doc={"scopes": []}) + args = Namespace( + group="g_test", + actor_id="peer-a", + title="", + runtime="codex", + command="", + env=[], + scope="", + submit="enter", + by="user", + profile_id="", + profile_scope="global", + profile_owner_id="", + ) + daemon_resp = { + "ok": False, + "error": {"code": "permission_denied", "message": "not allowed"}, + } + with patch.object(cli, "load_group", return_value=group), \ + patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", return_value=daemon_resp), \ + patch.object(cli, "add_actor", side_effect=AssertionError("local fallback must not run")), \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_add(args) + + self.assertEqual(code, 2) + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "permission_denied") + + def test_actor_update_routes_profile_linkage_without_patch(self) -> None: + from cccc import cli + + calls = [] + + def _fake_call_daemon(req): + calls.append(req) + return {"ok": True, "result": {"actor": {"id": "peer-a"}}} + + group = SimpleNamespace(doc={"scopes": []}) + args = Namespace( + group="g_test", + actor_id="peer-a", + title=None, + runtime=None, + command=None, + env=[], + scope="", + submit=None, + enabled=None, + by="user", + profile_id="shared-profile", + profile_scope="user", + profile_owner_id="member-user", + profile_action=None, + ) + with patch.object(cli, "load_group", return_value=group), \ + patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", side_effect=_fake_call_daemon), \ + patch.object(cli, "_print_json"): + code = cli.cmd_actor_update(args) + + self.assertEqual(code, 0) + req = calls[0] + self.assertEqual(req.get("op"), "actor_update") + daemon_args = req.get("args", {}) + self.assertEqual(daemon_args.get("profile_id"), "shared-profile") + self.assertEqual(daemon_args.get("profile_scope"), "user") + self.assertEqual(daemon_args.get("profile_owner"), "member-user") + self.assertEqual(daemon_args.get("patch"), {}) + + def test_actor_update_strips_profile_owner_for_global_scope(self) -> None: + from cccc import cli + + calls = [] + + def _fake_call_daemon(req): + calls.append(req) + return {"ok": True, "result": {"actor": {"id": "peer-a"}}} + + group = SimpleNamespace(doc={"scopes": []}) + args = Namespace( + group="g_test", + actor_id="peer-a", + title=None, + runtime=None, + command=None, + env=[], + scope="", + submit=None, + enabled=None, + by="user", + profile_id="shared-profile", + profile_scope="global", + profile_owner_id="member-user", + profile_action=None, + ) + with patch.object(cli, "load_group", return_value=group), \ + patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", side_effect=_fake_call_daemon), \ + patch.object(cli, "_print_json"): + code = cli.cmd_actor_update(args) + + self.assertEqual(code, 0) + daemon_args = calls[0].get("args", {}) + self.assertEqual(daemon_args.get("profile_scope"), "global") + self.assertEqual(daemon_args.get("profile_owner"), "") + + def test_actor_update_routes_profile_action_without_patch(self) -> None: + from cccc import cli + + calls = [] + + def _fake_call_daemon(req): + calls.append(req) + return {"ok": True, "result": {"actor": {"id": "peer-a"}}} + + group = SimpleNamespace(doc={"scopes": []}) + args = Namespace( + group="g_test", + actor_id="peer-a", + title=None, + runtime=None, + command=None, + env=[], + scope="", + submit=None, + enabled=None, + by="user", + profile_id="", + profile_scope="global", + profile_owner_id="", + profile_action="convert_to_custom", + ) + with patch.object(cli, "load_group", return_value=group), \ + patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", side_effect=_fake_call_daemon), \ + patch.object(cli, "_print_json"): + code = cli.cmd_actor_update(args) + + self.assertEqual(code, 0) + req = calls[0] + self.assertEqual(req.get("op"), "actor_update") + daemon_args = req.get("args", {}) + self.assertEqual(daemon_args.get("profile_action"), "convert_to_custom") + self.assertEqual(daemon_args.get("patch"), {}) + + def test_actor_update_profile_linkage_without_daemon_returns_explicit_error(self) -> None: + from cccc import cli + + group = SimpleNamespace(doc={"scopes": []}) + args = Namespace( + group="g_test", + actor_id="peer-a", + title=None, + runtime=None, + command=None, + env=[], + scope="", + submit=None, + enabled=None, + by="user", + profile_id="shared-profile", + profile_scope="user", + profile_owner_id="member-user", + profile_action=None, + ) + with patch.object(cli, "load_group", return_value=group), \ + patch.object(cli, "_ensure_daemon_running", return_value=False), \ + patch.object(cli, "call_daemon") as mock_call, \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_update(args) + + self.assertEqual(code, 2) + mock_call.assert_not_called() + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "daemon_unavailable") + + def test_actor_update_rejects_user_scope_without_profile_owner_before_daemon(self) -> None: + from cccc import cli + + group = SimpleNamespace(doc={"scopes": []}) + args = Namespace( + group="g_test", + actor_id="peer-a", + title=None, + runtime=None, + command=None, + env=[], + scope="", + submit=None, + enabled=None, + by="user", + profile_id="shared-profile", + profile_scope="user", + profile_owner_id="", + profile_action=None, + ) + with patch.object(cli, "load_group", return_value=group), \ + patch.object(cli, "_ensure_daemon_running") as mock_daemon, \ + patch.object(cli, "call_daemon") as mock_call, \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_update(args) + + self.assertEqual(code, 2) + mock_daemon.assert_not_called() + mock_call.assert_not_called() + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "invalid_request") + + def test_actor_update_profile_linkage_preserves_daemon_invalid_request(self) -> None: + from cccc import cli + + group = SimpleNamespace(doc={"scopes": []}) + args = Namespace( + group="g_test", + actor_id="peer-a", + title=None, + runtime=None, + command=None, + env=[], + scope="", + submit=None, + enabled=None, + by="user", + profile_id="shared-profile", + profile_scope="user", + profile_owner_id="member-user", + profile_action="convert_to_custom", + ) + daemon_resp = { + "ok": False, + "error": { + "code": "invalid_request", + "message": "profile_action and profile_id are mutually exclusive", + }, + } + with patch.object(cli, "load_group", return_value=group), \ + patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", return_value=daemon_resp), \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_update(args) + + self.assertEqual(code, 2) + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "invalid_request") + + def test_actor_update_without_profile_preserves_daemon_error(self) -> None: + from cccc import cli + + group = SimpleNamespace(doc={"scopes": []}) + args = Namespace( + group="g_test", + actor_id="peer-a", + title="peer-a", + runtime=None, + command=None, + env=[], + scope="", + submit=None, + enabled=None, + by="user", + profile_id="", + profile_scope="global", + profile_owner_id="", + profile_action=None, + ) + daemon_resp = { + "ok": False, + "error": {"code": "permission_denied", "message": "not allowed"}, + } + with patch.object(cli, "load_group", return_value=group), \ + patch.object(cli, "_ensure_daemon_running", return_value=True), \ + patch.object(cli, "call_daemon", return_value=daemon_resp), \ + patch.object(cli, "update_actor", side_effect=AssertionError("local fallback must not run")), \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_update(args) + + self.assertEqual(code, 2) + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "permission_denied") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_cli_actor_secrets_commands.py b/tests/test_cli_actor_secrets_commands.py new file mode 100644 index 00000000..d64a5893 --- /dev/null +++ b/tests/test_cli_actor_secrets_commands.py @@ -0,0 +1,81 @@ +import unittest +from argparse import Namespace +from unittest.mock import patch + + +class TestCliActorSecretsCommands(unittest.TestCase): + def test_actor_secrets_rejects_keys_with_updates_before_daemon(self) -> None: + from cccc import cli + + with patch.object(cli, "_resolve_group_id", return_value="g_test"), \ + patch.object(cli, "_ensure_daemon_running", side_effect=AssertionError("daemon check must not run")), \ + patch.object(cli, "call_daemon", side_effect=AssertionError("daemon call must not run")), \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_secrets( + Namespace( + group="g_test", + actor_id="peer-a", + by="user", + keys=True, + set=["OPENAI_API_KEY=secret"], + unset=[], + clear=False, + restart=False, + ) + ) + + self.assertEqual(code, 2) + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "invalid_request") + + def test_actor_secrets_rejects_keys_with_restart_before_daemon(self) -> None: + from cccc import cli + + with patch.object(cli, "_resolve_group_id", return_value="g_test"), \ + patch.object(cli, "_ensure_daemon_running", side_effect=AssertionError("daemon check must not run")), \ + patch.object(cli, "call_daemon", side_effect=AssertionError("daemon call must not run")), \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_secrets( + Namespace( + group="g_test", + actor_id="peer-a", + by="user", + keys=True, + set=[], + unset=[], + clear=False, + restart=True, + ) + ) + + self.assertEqual(code, 2) + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "invalid_request") + + def test_actor_secrets_rejects_empty_update_before_daemon(self) -> None: + from cccc import cli + + with patch.object(cli, "_resolve_group_id", return_value="g_test"), \ + patch.object(cli, "_ensure_daemon_running", side_effect=AssertionError("daemon check must not run")), \ + patch.object(cli, "call_daemon", side_effect=AssertionError("daemon call must not run")), \ + patch.object(cli, "_print_json") as mock_print: + code = cli.cmd_actor_secrets( + Namespace( + group="g_test", + actor_id="peer-a", + by="user", + keys=False, + set=[], + unset=[], + clear=False, + restart=False, + ) + ) + + self.assertEqual(code, 2) + printed = mock_print.call_args[0][0] if mock_print.call_args else {} + self.assertEqual(str((printed.get("error") or {}).get("code") or ""), "empty_secret_update") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_cli_reference_parity.py b/tests/test_cli_reference_parity.py index 2638ab88..3e945ff9 100644 --- a/tests/test_cli_reference_parity.py +++ b/tests/test_cli_reference_parity.py @@ -19,6 +19,18 @@ def test_cli_reference_includes_group_set_state_with_stopped(self) -> None: self.assertIn("cccc group set-state idle", text) self.assertIn("active/idle/paused/stopped", text) + def test_cli_reference_includes_actor_profile_commands(self) -> None: + repo_root = Path(__file__).resolve().parents[1] + cli_doc = repo_root / "docs" / "reference" / "cli.md" + text = cli_doc.read_text(encoding="utf-8") + + self.assertIn("cccc actor profile list", text) + self.assertIn("cccc actor profile upsert", text) + self.assertIn("cccc actor profile secrets", text) + self.assertIn("Reusable `profile` records can now be managed directly", text) + self.assertIn("--profile-id", text) + self.assertIn("convert_to_custom", text) + if __name__ == "__main__": unittest.main() diff --git a/tests/test_docs_profile_cli_guides.py b/tests/test_docs_profile_cli_guides.py new file mode 100644 index 00000000..b992a487 --- /dev/null +++ b/tests/test_docs_profile_cli_guides.py @@ -0,0 +1,96 @@ +from pathlib import Path +import unittest + + +class TestDocsProfileCliGuides(unittest.TestCase): + def test_getting_started_cli_shows_profile_creation_and_binding(self) -> None: + repo_root = Path(__file__).resolve().parents[1] + doc = repo_root / "docs" / "guide" / "getting-started" / "cli.md" + text = doc.read_text(encoding="utf-8") + + self.assertIn("cccc actor profile upsert", text) + self.assertIn("cccc actor add assistant --profile-id", text) + + def test_workflows_mentions_profile_backed_actor_setup(self) -> None: + repo_root = Path(__file__).resolve().parents[1] + doc = repo_root / "docs" / "guide" / "workflows.md" + text = doc.read_text(encoding="utf-8") + + self.assertIn("cccc actor profile upsert", text) + self.assertIn("--profile-id", text) + + def test_use_cases_shows_profile_backed_actor_setup(self) -> None: + repo_root = Path(__file__).resolve().parents[1] + doc = repo_root / "docs" / "guide" / "use-cases.md" + text = doc.read_text(encoding="utf-8") + + self.assertIn("cccc actor profile upsert", text) + self.assertIn("--profile-id", text) + + def test_best_practices_mentions_profile_backed_runtime_setup(self) -> None: + repo_root = Path(__file__).resolve().parents[1] + doc = repo_root / "docs" / "guide" / "best-practices.md" + text = doc.read_text(encoding="utf-8") + + self.assertIn("cccc actor profile upsert", text) + self.assertIn("--profile-id", text) + + def test_faq_mentions_profile_management_path(self) -> None: + repo_root = Path(__file__).resolve().parents[1] + doc = repo_root / "docs" / "guide" / "faq.md" + text = doc.read_text(encoding="utf-8") + + self.assertIn("cccc actor profile upsert", text) + self.assertIn("--profile-id", text) + + def test_getting_started_index_mentions_profile_backed_setup(self) -> None: + repo_root = Path(__file__).resolve().parents[1] + doc = repo_root / "docs" / "guide" / "getting-started" / "index.md" + text = doc.read_text(encoding="utf-8") + + self.assertIn("profile-backed", text) + self.assertIn("cccc actor profile upsert", text) + + def test_getting_started_web_mentions_profile_backed_setup(self) -> None: + repo_root = Path(__file__).resolve().parents[1] + doc = repo_root / "docs" / "guide" / "getting-started" / "web.md" + text = doc.read_text(encoding="utf-8") + + self.assertIn("profile-backed", text) + self.assertIn("cccc actor profile upsert", text) + + def test_guide_index_points_to_profile_backed_start_path(self) -> None: + repo_root = Path(__file__).resolve().parents[1] + doc = repo_root / "docs" / "guide" / "index.md" + text = doc.read_text(encoding="utf-8") + + self.assertIn("profile-backed", text) + self.assertIn("cccc actor profile upsert", text) + + def test_features_mentions_cli_profile_surface(self) -> None: + repo_root = Path(__file__).resolve().parents[1] + doc = repo_root / "docs" / "reference" / "features.md" + text = doc.read_text(encoding="utf-8") + + self.assertIn("cccc actor profile upsert", text) + self.assertIn("cccc actor add --profile-id", text) + + def test_positioning_mentions_profile_backed_runtime_model(self) -> None: + repo_root = Path(__file__).resolve().parents[1] + doc = repo_root / "docs" / "reference" / "positioning.md" + text = doc.read_text(encoding="utf-8") + + self.assertIn("profile-backed", text) + self.assertIn("cccc actor profile upsert", text) + + def test_architecture_mentions_profile_backed_path(self) -> None: + repo_root = Path(__file__).resolve().parents[1] + doc = repo_root / "docs" / "reference" / "architecture.md" + text = doc.read_text(encoding="utf-8") + + self.assertIn("profile-backed", text) + self.assertIn("cccc actor add --profile-id", text) + + +if __name__ == "__main__": + unittest.main() diff --git a/web/src/App.tsx b/web/src/App.tsx index dfa903bf..81547e3b 100644 --- a/web/src/App.tsx +++ b/web/src/App.tsx @@ -130,6 +130,7 @@ export default function App() { const [mentionSelectedIndex, setMentionSelectedIndex] = React.useState(0); const [mountedActorIds, setMountedActorIds] = React.useState([]); const [ccccHome, setCcccHome] = React.useState(""); + const [daemonVersion, setDaemonVersion] = React.useState(""); const [canAccessGlobalSettings, setCanAccessGlobalSettings] = React.useState(null); // Custom hooks @@ -389,6 +390,7 @@ export default function App() { void api.fetchPing().then((resp) => { if (resp.ok) { setWebReadOnly(Boolean(resp.result?.web?.read_only)); + setDaemonVersion(String(resp.result?.daemon?.version || "").trim()); } }).catch(() => { /* ignore */ @@ -528,11 +530,13 @@ export default function App() { theme={theme} onThemeChange={setTheme} webReadOnly={webReadOnly} + webVersion={__WEB_VERSION__} selectedGroupId={selectedGroupId} groupDoc={groupDoc} selectedGroupRunning={selectedGroupRunning} actors={actors} sseStatus={sseStatus} + daemonVersion={daemonVersion} busy={busy} onOpenSidebar={() => setSidebarOpen(true)} onOpenGroupEdit={canManageGroups ? () => { diff --git a/web/src/components/layout/AppHeader.tsx b/web/src/components/layout/AppHeader.tsx index 9def2a75..b467ebe7 100644 --- a/web/src/components/layout/AppHeader.tsx +++ b/web/src/components/layout/AppHeader.tsx @@ -22,6 +22,8 @@ export interface AppHeaderProps { theme: Theme; onThemeChange: (theme: Theme) => void; webReadOnly?: boolean; + webVersion?: string; + daemonVersion?: string; selectedGroupId: string; groupDoc: GroupDoc | null; selectedGroupRunning: boolean; @@ -44,6 +46,8 @@ export function AppHeader({ theme, onThemeChange, webReadOnly, + webVersion, + daemonVersion, selectedGroupId, groupDoc, selectedGroupRunning, @@ -110,6 +114,26 @@ export function AppHeader({ ); })()} + {(webVersion || daemonVersion) && ( +
+ {webVersion && ( + + {t("connectedWebVersion", { version: webVersion })} + + )} + {daemonVersion && ( + + {t("connectedDaemonVersion", { version: daemonVersion })} + + )} +
+ )} {selectedGroupId && !webReadOnly && onOpenGroupEdit && ( diff --git a/web/src/env.d.ts b/web/src/env.d.ts new file mode 100644 index 00000000..827e42be --- /dev/null +++ b/web/src/env.d.ts @@ -0,0 +1 @@ +declare const __WEB_VERSION__: string; diff --git a/web/src/i18n/locales/en/layout.json b/web/src/i18n/locales/en/layout.json index 84249451..38490e6d 100644 --- a/web/src/i18n/locales/en/layout.json +++ b/web/src/i18n/locales/en/layout.json @@ -15,6 +15,10 @@ "menu": "Menu", "reconnecting": "Reconnecting…", "disconnected": "Disconnected", + "connectedWebVersion": "Web v{{version}}", + "connectedWebVersionTitle": "Current frontend web version: {{version}}", + "connectedDaemonVersion": "Daemon v{{version}}", + "connectedDaemonVersionTitle": "Connected backend daemon version: {{version}}", "dismissError": "Dismiss error", "createNewGroup": "Create new working group", "newGroup": "+ New", diff --git a/web/src/i18n/locales/ja/layout.json b/web/src/i18n/locales/ja/layout.json index e9e7a845..cf9a3b63 100644 --- a/web/src/i18n/locales/ja/layout.json +++ b/web/src/i18n/locales/ja/layout.json @@ -15,6 +15,10 @@ "menu": "メニュー", "reconnecting": "再接続中…", "disconnected": "接続なし", + "connectedWebVersion": "Web v{{version}}", + "connectedWebVersionTitle": "現在のフロントエンド Web バージョン: {{version}}", + "connectedDaemonVersion": "Daemon v{{version}}", + "connectedDaemonVersionTitle": "接続中のバックエンドデーモンのバージョン: {{version}}", "dismissError": "エラーを閉じる", "createNewGroup": "新しい作業グループを作成", "newGroup": "+ 新規", diff --git a/web/src/i18n/locales/zh/layout.json b/web/src/i18n/locales/zh/layout.json index be5c5e82..0103c0c0 100644 --- a/web/src/i18n/locales/zh/layout.json +++ b/web/src/i18n/locales/zh/layout.json @@ -15,6 +15,10 @@ "menu": "菜单", "reconnecting": "重连中…", "disconnected": "已断开", + "connectedWebVersion": "前端 v{{version}}", + "connectedWebVersionTitle": "当前前端 Web 版本:{{version}}", + "connectedDaemonVersion": "守护进程 v{{version}}", + "connectedDaemonVersionTitle": "当前连接的后端守护进程版本:{{version}}", "dismissError": "关闭错误提示", "createNewGroup": "创建新工作组", "newGroup": "+ 新建", diff --git a/web/src/services/api.ts b/web/src/services/api.ts index ede7de69..696bb03f 100644 --- a/web/src/services/api.ts +++ b/web/src/services/api.ts @@ -442,7 +442,18 @@ export async function fetchGroups() { export async function fetchPing(options?: { includeHome?: boolean }) { const includeHome = Boolean(options?.includeHome); const suffix = includeHome ? "?include_home=1" : ""; - return apiJson<{ home?: string; daemon: unknown; version: string; web?: { mode?: string; read_only?: boolean } }>( + return apiJson<{ + home?: string; + daemon?: { + version?: string; + pid?: number; + ts?: string; + ipc_v?: number; + capabilities?: Record; + }; + version: string; + web?: { mode?: string; read_only?: boolean }; + }>( `/api/v1/ping${suffix}` ); } diff --git a/web/vite.config.ts b/web/vite.config.ts index bc369b01..0e8e977e 100644 --- a/web/vite.config.ts +++ b/web/vite.config.ts @@ -1,9 +1,18 @@ +import { readFileSync } from "node:fs"; +import { fileURLToPath, URL } from "node:url"; import { defineConfig } from "vite"; import react from "@vitejs/plugin-react"; +const packageJson = JSON.parse( + readFileSync(fileURLToPath(new URL("./package.json", import.meta.url)), "utf8") +) as { version?: string }; + export default defineConfig({ plugins: [react()], base: "/ui/", + define: { + __WEB_VERSION__: JSON.stringify(String(packageJson.version || "").trim()), + }, resolve: { // Prefer the CJS build for xterm to avoid a minification bug that can break // the ESM build's `requestMode` handler (seen as `ReferenceError: i is not defined`).