From be6befb344d3a76c0ffcc0492e8cd502a7d6a6cb Mon Sep 17 00:00:00 2001 From: "Marcus R. Brown" Date: Thu, 26 Mar 2026 14:49:05 -0700 Subject: [PATCH 1/2] chore: disable CEP sync and break upstream tether Systematic now evolves independently from CEP. This removes the automated sync infrastructure while preserving the CLI converter and historical provenance data for reference. Removed: - .github/workflows/sync-cep.yaml (daily sync workflow) - scripts/check-cep-upstream.ts (upstream change detection) - tests/unit/check-cep-upstream.test.ts (precheck tests) - .opencode/commands/sync-cep.md (sync orchestration command) - .opencode/skills/convert-cc-defs/ (conversion skill) Preserved: - src/lib/converter.ts (CLI still supports ad-hoc conversions) - src/lib/manifest.ts (manifest read/write/validate) - sync-manifest.json (historical provenance record) - All bundled skills and agents (already converted) Updated AGENTS.md to reflect independent evolution. --- .github/workflows/sync-cep.yaml | 168 ----- .opencode/commands/sync-cep.md | 347 --------- .opencode/skills/convert-cc-defs/SKILL.md | 814 ---------------------- AGENTS.md | 22 +- scripts/check-cep-upstream.ts | 448 ------------ tests/unit/check-cep-upstream.test.ts | 753 -------------------- 6 files changed, 10 insertions(+), 2542 deletions(-) delete mode 100644 .github/workflows/sync-cep.yaml delete mode 100644 .opencode/commands/sync-cep.md delete mode 100644 .opencode/skills/convert-cc-defs/SKILL.md delete mode 100644 scripts/check-cep-upstream.ts delete mode 100644 tests/unit/check-cep-upstream.test.ts diff --git a/.github/workflows/sync-cep.yaml b/.github/workflows/sync-cep.yaml deleted file mode 100644 index b6dea57..0000000 --- a/.github/workflows/sync-cep.yaml +++ /dev/null @@ -1,168 +0,0 @@ ---- -name: Sync CEP - -on: - schedule: - - cron: '0 0 * * *' - workflow_dispatch: - inputs: - scope: - description: What to sync - type: choice - options: - - all - - skills - - agents - - commands - default: all - dry_run: - description: Preview changes without creating PR - type: boolean - default: false - -permissions: - contents: read - -concurrency: - group: sync-cep - cancel-in-progress: false - -jobs: - precheck: - name: CEP Pre-check - runs-on: ubuntu-latest - outputs: - exit_code: ${{ steps.precheck.outputs.exit_code }} - summary: ${{ steps.precheck.outputs.summary }} - steps: - - name: Checkout repository - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - with: - fetch-depth: 0 - token: ${{ secrets.FRO_BOT_PAT }} - - - name: Setup Bun - uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6 # v2.2.0 - - - name: Install dependencies - run: bun install --frozen-lockfile - - - name: Run pre-check - id: precheck - run: | - set +e - bun scripts/check-cep-upstream.ts > precheck.json - exit_code=$? - summary='{}' - if [ -s precheck.json ]; then - summary=$(jq -c '.' < precheck.json 2>/dev/null || echo '{}') - fi - echo "exit_code=$exit_code" >> "$GITHUB_OUTPUT" - echo "summary=$summary" >> "$GITHUB_OUTPUT" - jq -c '{hashChanges, newUpstream, deletions, skipped, converterVersionChanged, errors}' < precheck.json 2>/dev/null || cat precheck.json - - env: - GITHUB_TOKEN: ${{ secrets.FRO_BOT_PAT }} - - - name: Report precheck errors - if: steps.precheck.outputs.exit_code == '2' - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 - with: - github-token: ${{ secrets.FRO_BOT_PAT }} - script: | - const summary = JSON.parse(process.env.PRECHECK_SUMMARY || '{}'); - const errors = summary.errors || []; - const errorList = errors.map(e => `- ${e}`).join('\n'); - const runUrl = `${process.env.GITHUB_SERVER_URL}/${process.env.GITHUB_REPOSITORY}/actions/runs/${process.env.GITHUB_RUN_ID}`; - const body = [ - '## Pre-check Errors', - '', - `Pre-check failed with exit code 2. Found ${errors.length} error(s).`, - '', - '
', - `Errors (${errors.length})`, - '', - errorList || '_None_', - '', - '
', - '', - `**Hash changes:** ${(summary.hashChanges || []).length}`, - `**New upstream:** ${(summary.newUpstream || []).length}`, - `**Deletions:** ${(summary.deletions || []).length}`, - '', - `[Workflow run](${runUrl})` - ].join('\n'); - - const { data: issues } = await github.rest.issues.listForRepo({ - owner: context.repo.owner, - repo: context.repo.repo, - labels: 'sync-cep', - state: 'open' - }); - - if (issues[0]) { - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: issues[0].number, - body - }); - } else { - await github.rest.issues.create({ - owner: context.repo.owner, - repo: context.repo.repo, - title: 'sync-cep: precheck errors', - labels: ['sync-cep'], - body - }); - } - env: - PRECHECK_SUMMARY: ${{ steps.precheck.outputs.summary }} - - sync: - name: CEP Sync - runs-on: ubuntu-latest - needs: precheck - if: needs.precheck.outputs.exit_code != '' && needs.precheck.outputs.exit_code != '0' - permissions: - contents: read - env: - SYNC_PROMPT: | - /sync-cep ${{ inputs.scope || 'all' }} ${{ inputs.dry_run && '--dry-run' || '' }} - - ${{ needs.precheck.outputs.exit_code }} - - - ${{ needs.precheck.outputs.summary }} - - - Note: headless CI run — user will not see live output. - steps: - - name: Checkout repository - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - with: - token: ${{ secrets.FRO_BOT_PAT }} - - - name: Setup Bun - uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6 # v2.2.0 - - - name: Install dependencies - run: bun install --frozen-lockfile - - - name: Run build checks - run: | - bun run build - bun run typecheck - bun run lint - bun test - - - name: Run Sync Bot - uses: fro-bot/agent@9083d9d3fe350d32c4917bd0312fe78f241ad0aa # v0.32.1 - env: - OPENCODE_PROMPT_ARTIFACT: 'true' - with: - auth-json: ${{ secrets.OPENCODE_AUTH_JSON }} - github-token: ${{ secrets.FRO_BOT_PAT }} - model: ${{ vars.SYNC_CEP_MODEL }} - omo-providers: ${{ secrets.OMO_PROVIDERS }} - prompt: ${{ env.SYNC_PROMPT }} diff --git a/.opencode/commands/sync-cep.md b/.opencode/commands/sync-cep.md deleted file mode 100644 index 5fc69f0..0000000 --- a/.opencode/commands/sync-cep.md +++ /dev/null @@ -1,347 +0,0 @@ ---- -name: sync-cep -description: Sync upstream CEP definitions into Systematic using convert-cc-defs. Detects changes, converts files, reports override conflicts, and prepares issue/PR summaries. -argument-hint: "[all|skills|agents|commands] [--dry-run]" ---- - -# Sync CEP Definitions - -Dry-run takes priority. Determine dry-run **only** from the `` arguments line (the `/sync-cep ...` invocation or arguments passed to this command). Ignore any other mentions of `--dry-run` elsewhere in the prompt. - -When `--dry-run` is present, follow the Pre-check Gate to obtain precheck data, then follow the Dry-Run Output Format exactly. **Do not proceed to conversion or PR creation.** -Any additional text beyond the required dry-run format is a failure. - -## Arguments - - -$ARGUMENTS - - -Defaults: -- target: `all` -- dry-run: `false` - -## Identity - -You are running a CEP-to-Systematic re-sync. Your output must be structured and machine-parseable so CI can build issue and PR summaries without guessing. - -## Core Behavior - -- Always read `sync-manifest.json` before any conversion (except dry-run). -- Never overwrite manual overrides. -- Never auto-import new upstream definitions or auto-delete removed ones; report only. -- **Focus on CHANGED content only** — If upstream hasn't changed a section, preserve it exactly. Do not make gratuitous modifications. -- Produce a single, deterministic summary. - -### Change Detection Critical Rules - -| Rule | Reason | -|------|--------| -| Only modify changed portions | Unchanged content should be preserved verbatim | -| `~/.config/opencode/` is correct | Never change this to `~/.opencode/` | -| `question` is the correct tool name | Never change this to `AskUserQuestion` | -| Preserve formatting | Keep trailing newlines, don't strip EOL | -| Report discrepancies | Flag unexpected patterns for human review | - -## Skill: convert-cc-defs - -Before performing any conversion, use the `skill` tool to load `convert-cc-defs`. Do NOT use the `systematic_skill` tool — `convert-cc-defs` is a project-specific skill, not a bundled Systematic skill. - -After loading the skill, follow its workflow: Phase 2 (Mechanical Conversion) for each definition, then Phase 3 (Intelligent Rewrite) for context-aware adjustments, then Phase 4 (Write and Register) to update files and manifest. - -The precheck summary contains `hashChanges`, `newUpstream`, `newUpstreamFiles`, and `deletions` arrays. Each entry is a definition path like `skills/brainstorming` or `commands/workflows/review`. Process ALL definition types in the precheck's `hashChanges` array — agents, skills, AND commands. Do not skip a type. - -### New Upstream File Lists - -The `newUpstreamFiles` field is a map from definition key to its file list (e.g., `{"skills/my-skill": ["SKILL.md", "references/guide.md"]}`). When importing new definitions listed in `newUpstream`, use the file list from `newUpstreamFiles` to fetch ALL files — not just the primary definition file. For skills, this means fetching SKILL.md AND every sub-file (references/, scripts/, assets/, etc.). **Importing only SKILL.md while ignoring sub-files renders most multi-file skills non-functional.** - -## Feature: Pre-check Gate - -This command supports two modes for obtaining pre-check data. **The pre-check is a prerequisite — it runs before the dry-run decision.** Even in dry-run mode, you must have precheck data (either injected by the workflow or obtained by running the script) before producing the summary. - -### Mode 1: Workflow-injected (CI) - -When `` and `` XML tags are present in the prompt, use them directly. The sync workflow already ran the pre-check script — do not rerun it. - -### Mode 2: Interactive (local session) - -When the XML tags are absent, run the pre-check script yourself: - -```bash -bun scripts/check-cep-upstream.ts -``` - -The script outputs JSON to stdout and uses its exit code to signal results: -- The Bash tool captures both stdout (the JSON precheck summary) and the exit code. -- Use the JSON output as the precheck summary and the exit code as the precheck exit code. -- Then proceed with the same exit-code logic described below. - -**Note:** If the JSON output is large, you can redirect to a file and read it back: -```bash -bun scripts/check-cep-upstream.ts | tee /tmp/precheck.json; exit ${PIPESTATUS[0]} -``` - -**Environment:** The script requires `GITHUB_TOKEN` for authenticated GitHub API access. If not set, try `export GITHUB_TOKEN=$(gh auth token)` before running. - -### Pre-check Exit Codes - -| Exit Code | Meaning | Action | -|-----------|---------|--------| -| `0` | No changes detected | Stop and report "no changes." (Sync job should not run in this case.) | -| `1` | Changes detected, no errors | Proceed with conversion run normally. | -| `2` | Changes detected but with errors | Errors indicate missing upstream files (the manifest references files that no longer exist upstream). Proceed with conversion for definitions that are **not** affected by errors. Report errored definitions separately — do not attempt to convert them. Include the errors list from the pre-check summary in the output. | - -### Pre-check Error Handling - -When `` is `2`: -- The `errors` array in the pre-check summary lists missing upstream content paths. -- Extract the affected definition keys from the error paths (e.g., `Missing upstream content: plugins/compound-engineering/skills/foo/SKILL.md` → `skills/foo`). -- Skip those definitions during conversion. -- Include an **Errors** section in the output summary listing each error and the affected definitions. -- The remaining `hashChanges`, `newUpstream`, and `deletions` are still valid and should be processed normally. - -### Dry-Run Exit Condition (HARD STOP) - -If `--dry-run` is present in the user request: -- Output the dry-run summary only. -- If `` is `2`, the summary MUST include the errors and which definitions would be skipped. -- Do **not** call conversion tools or skills (no `convert-cc-defs`, no file editing). Running the pre-check script to obtain data is allowed and required in interactive mode. -- Do **not** proceed to live sync. -- Do **not** say you will continue or proceed with live sync. -- End the response immediately after the summary. -- Final line MUST be exactly: `DRY_RUN_STOP` -- Never ask follow-up questions in dry-run mode. -- Do not include any text after `DRY_RUN_STOP`. -- Do not mention `convert-cc-defs` or how to proceed with a live sync. - -### Dry-Run Output Format - -When in dry-run, output exactly and only the following structure. The word `Summary` must be a heading. Nothing else is allowed: - -``` -## Summary - - -DRY_RUN_STOP -``` - -Rules: -- No tables, code blocks, or extra headings. -- No follow-up questions. -- The last non-empty line must be exactly `DRY_RUN_STOP`. - -The **only** acceptable dry-run output is the literal template above with `` replaced by plain sentences. You must end immediately after `DRY_RUN_STOP`. - -## Feature: Conversion Run - -- If `--dry-run` is set: do not invoke `convert-cc-defs`, do not edit files, do not run conversions, and do not proceed to live sync. Only report what would happen using the pre-check summary (which was already obtained as a prerequisite) and then stop. -- Otherwise: invoke the `convert-cc-defs` skill for the selected target scope and apply the re-sync workflow steps in that skill (mechanical conversion + intelligent rewrite + merge). - -## Feature: Mandatory Post-Conversion Fixup - -**This step is NON-NEGOTIABLE.** After every conversion run (mechanical converter + LLM rewrite), run the batch sed fixup defined in `convert-cc-defs` Phase 2c. LLMs consistently fail at simple string replacement — the sed pass is the only reliable way to catch all remaining CC/CEP references. - -### Why This Exists - -In every sync run to date, the LLM rewrite pass has left behind: -- `compound-engineering:` prefixes that should be `systematic:` -- `.claude/` paths that should be `.opencode/` -- `Claude Code` product references that should be `OpenCode` -- `AskUserQuestion` tool names that should be `question` -- `CLAUDE.md` references that should be `AGENTS.md` - -These are simple string replacements. The LLM does not need to do them — and repeatedly fails to. The batch sed catches them deterministically. - -### Execution Steps - -1. **Run the ordered sed** from `convert-cc-defs` Phase 2c on all converted `.md` files -2. **Exclude** `sync-manifest.json` (upstream paths are correct) and `claude-permissions-optimizer/SKILL.md` (CC refs are intentional — only convert prefix and tool names) -3. **Fix edge cases manually**: badge URLs, `EveryInc/systematic`, `claude.com/opencode`, `Task()` → `task()` (see Phase 2c edge cases table) -4. **Run verification grep** (both CHECK 1 and CHECK 2 from Phase 2c) — zero hits required on non-exception files -5. **Fail the run** if CHECK 2 (over-conversions) returns any hits — this means a double-conversion bug - -### File Loop Pattern - -```bash -{ - git diff --name-only HEAD -- '*.md' - git ls-files --others --exclude-standard -- '*.md' -} | sort -u | grep -v '^skills/claude-permissions-optimizer/SKILL\.md$' | while IFS= read -r f; do - if [ -f "$f" ]; then - sed -i '' \ - -e 's/compound-engineering\.local\.md/systematic.local.md/g' \ - -e 's/compound-engineering-plugin/systematic/g' \ - -e 's/compound-engineering pipeline artifacts/systematic pipeline artifacts/g' \ - -e 's|\.context/compound-engineering/|.context/systematic/|g' \ - -e 's|plugins/compound-engineering/|plugins/systematic/|g' \ - -e 's/compound-engineering:/systematic:/g' \ - -e 's/Compound_Engineering/Systematic/g' \ - -e 's/Compound Engineering/Systematic/g' \ - -e 's/compound-engineering/systematic/g' \ - -e 's|~/\.claude/skills/|~/.config/opencode/skills/|g' \ - -e 's|~/\.claude/settings\.json|~/.config/opencode/settings.json|g' \ - -e 's|~/\.claude/|~/.config/opencode/|g' \ - -e 's|`\.claude/skills/|`.opencode/skills/|g' \ - -e 's|`\.claude/settings\.json|`.opencode/settings.json|g' \ - -e 's| \.claude/skills/| .opencode/skills/|g' \ - -e 's| \.claude/settings\.json| .opencode/settings.json|g' \ - -e 's|(\.claude/|(\.opencode/|g' \ - -e 's|`\.claude/|`.opencode/|g' \ - -e 's| \.claude/| .opencode/|g' \ - -e 's|"\.claude/|".opencode/|g' \ - -e 's|CLAUDE\.md|AGENTS.md|g' \ - -e 's/Claude Code/OpenCode/g' \ - -e 's/claude-code/opencode/g' \ - -e 's/AskUserQuestion/question/g' \ - -e 's/TaskCreate/todowrite/g' \ - "$f" - fi -done -``` - -Then apply targeted fixes to `claude-permissions-optimizer/SKILL.md`: - -```bash -sed -i '' \ - -e 's/compound-engineering-plugin/systematic/g' \ - -e 's/compound-engineering:/systematic:/g' \ - -e 's/compound-engineering/systematic/g' \ - -e 's/Compound Engineering/Systematic/g' \ - -e 's/AskUserQuestion/question/g' \ - skills/claude-permissions-optimizer/SKILL.md -``` - -## Tooling and Command Safety - -- Never use `gh` or other external CLI tools in dry-run mode (exception: the pre-check script must run in interactive mode to obtain summary data). -- Do not call conversion tools or edit files during dry-run. -- Prefer local reads of `sync-manifest.json` and bundled files when summarizing outside dry-run. - -## Feature: Commit, Branch, and PR (MANDATORY after changes) - -After a successful conversion run (not dry-run) that modified any files, you **MUST** create or update a PR. A sync run that changes files but does not produce a PR is a **failed run**. - -### Step 1: Check for changes - -```bash -git status --porcelain agents/ skills/ commands/ sync-manifest.json -``` - -If the output is empty, no files were changed — skip to Step 4: Post to tracking issue. - -### Step 2: Create branch and commit - -```bash -git checkout -B chore/sync-cep -git add agents/ skills/ commands/ sync-manifest.json -git commit -m "chore: sync CEP upstream definitions" -``` - -### Step 3: Push and create or update PR - -First, write the output summary to a temp file for use as the PR body. The summary MUST follow the Output Formatting template (hash changes table, conflicts, errors, etc.): - -```bash -cat > /tmp/sync-cep-pr-body.md <<'ENDOFBODY' -## CEP Sync Summary - -(paste the full output summary here) -ENDOFBODY -``` - -Push the branch: -```bash -git push -u origin chore/sync-cep --force-with-lease -``` - -Check if a PR already exists: -```bash -gh pr list --head chore/sync-cep --state open --json number --jq '.[0].number // empty' -``` - -- **If a PR number is returned:** update its body: - ```bash - gh pr edit --body-file /tmp/sync-cep-pr-body.md - ``` -- **If empty (no PR):** create one: - ```bash - gh pr create --base main --head chore/sync-cep \ - --title "chore: sync CEP upstream definitions" \ - --body-file /tmp/sync-cep-pr-body.md \ - --label "sync-cep" - ``` - -**Important:** Environment variables do not persist across separate Bash tool calls. Always write the PR body to a file first, then reference it with `--body-file`. - -### Step 4: Post to tracking issue - -Find the open tracking issue labeled `sync-cep`: -```bash -gh issue list --label sync-cep --state open --json number --jq '.[0].number // empty' -``` - -- **If an issue exists:** post a comment with the summary and a link to the PR. -- **If no issue exists:** create one with title `CEP Sync Run - YYYY-MM-DD`, label `sync-cep`, and the summary as the body. - -### Reuse rules - -- Always reuse branch `chore/sync-cep` — do not create timestamped or numbered branches. -- If a PR already exists for that branch, update it instead of creating a new one. -- Always link the PR in the tracking issue comment. - -## Feature: Conflict Detection - -Use the override merge matrix: -- Upstream unchanged + override exists → keep override -- Upstream changed + override on SAME field → conflict, report only -- Upstream changed + override on DIFFERENT field → apply upstream, preserve overrides -- Override is `"*"` → skip re-sync entirely - -## Feature: Output Formatting - -Use this exact template for all output. Copy it and fill in the placeholders: - -``` -## Summary -- **Scope**: [all|skills|agents|commands] -- **Definitions processed**: N -- **Hash changes applied**: N -- **Conflicts detected**: N -- **Errors (from precheck)**: N - -### Hash Changes -| Definition | Old Hash | New Hash | Status | -|------------|----------|----------|--------| -| path/to/def | abc123 | def456 | ✅ Applied | - -### Conflicts -| Definition | Field | Override Value | Upstream Value | Action | -|------------|-------|---------------|----------------|--------| -(None detected / list conflicts) - -### New Upstream (report-only) -| Definition | Files | -|------------|-------| -| path/to/new-def | SKILL.md, references/guide.md | - -### Upstream Deletions (report-only) -- path/to/deleted-def - -### Errors -- [error message from precheck] → Affected: [definition key] - -### Rewrite Failures -- (None / list failures) - -### Phantom References -- (None / list commands referencing missing agents/skills) -``` - -## Boundaries - -- Do not use `gh` commands or call external CLI tools during dry-run mode (exception: the pre-check script may run in interactive mode). -- Do not auto-merge conflicts. -- Do not modify files outside `agents/`, `skills/`, `commands/`, and `sync-manifest.json`. -- Use `gh` for PR creation, PR updates, issue comments, and (in interactive mode) authentication token setup. -- Branch name is always `chore/sync-cep`. Label is always `sync-cep`. -- **A sync run that changes files but produces no PR is a FAILED run.** diff --git a/.opencode/skills/convert-cc-defs/SKILL.md b/.opencode/skills/convert-cc-defs/SKILL.md deleted file mode 100644 index af2d364..0000000 --- a/.opencode/skills/convert-cc-defs/SKILL.md +++ /dev/null @@ -1,814 +0,0 @@ ---- -name: convert-cc-defs -description: Use when importing, converting, or syncing agent, skill, or command definitions from CEP or other Claude Code-format sources into the Systematic plugin for OpenCode. Triggers on "import from CEP", "sync upstream", "convert CC definition", "add agent/skill/command from upstream", or when updating existing bundled definitions from upstream sources. ---- - -# Convert CC Definitions - -Import and convert agent, skill, and command definitions written in Claude Code (CC) format — primarily from the Compound Engineering Plugin (CEP) upstream — into Systematic's bundled assets. Applies mechanical conversion via the converter pipeline, then intelligent LLM-powered rewrites to ensure definitions work correctly in OpenCode. - -**This is NOT a copy-paste operation.** Systematic is a cohesive OpenCode plugin, not a mirror. Every imported definition must be evaluated, adapted, and branded for the Systematic ecosystem. - -## When to Use - -- Importing new definitions from CEP or Superpowers upstream repos -- Re-syncing existing bundled definitions after upstream changes -- Converting a one-off CC definition file for inclusion in Systematic -- Populating `sync-manifest.json` entries for existing bundled content - -## When NOT to Use - -- Writing new Systematic-native skills/agents/commands (use `create-agent-skill` skill instead) -- Editing existing bundled content that has no upstream source -- Converting definitions for a different project (use the CLI: `systematic convert`) - -## Core Workflow - -```dot -digraph convert_flow { - rankdir=TB; - node [shape=box]; - - "Identify target definitions" [shape=doublecircle]; - "Fetch upstream content" [shape=box]; - "Read manifest for existing entries" [shape=box]; - "Check manual_overrides" [shape=diamond]; - "Run mechanical converter" [shape=box]; - "Intelligent rewrite pass" [shape=box]; - "Branding & consistency review" [shape=box]; - "Write to bundled directory" [shape=box]; - "Update sync-manifest.json" [shape=box]; - "Verify (build + tests)" [shape=box]; - "Done" [shape=doublecircle]; - - "Identify target definitions" -> "Fetch upstream content"; - "Fetch upstream content" -> "Read manifest for existing entries"; - "Read manifest for existing entries" -> "Check manual_overrides"; - "Check manual_overrides" -> "Run mechanical converter" [label="no overrides"]; - "Check manual_overrides" -> "Merge preserving overrides" [label="has overrides"]; - "Merge preserving overrides" -> "Intelligent rewrite pass"; - "Run mechanical converter" -> "Intelligent rewrite pass"; - "Intelligent rewrite pass" -> "Branding & consistency review"; - "Branding & consistency review" -> "Write to bundled directory"; - "Write to bundled directory" -> "Update sync-manifest.json"; - "Update sync-manifest.json" -> "Verify (build + tests)"; - "Verify (build + tests)" -> "Done"; -} -``` - -## Phase 1: Identify and Fetch - -### 1a. Identify Upstream Sources - -Determine what to import. Supported sources: - -| Source | Repo | Content | -|--------|------|---------| -| **CEP** | `EveryInc/compound-engineering-plugin` | Agents, skills, commands | -| **Superpowers** | `obra/superpowers` | Skills (personal workflow skills) | -| **Local file** | N/A | Single CC-format .md file | - -For GitHub repos, use `gh` CLI to fetch content. Note: CEP content lives under `plugins/compound-engineering/` — always use the full path: - -```bash -# Fetch a specific file from CEP (note the full path under plugins/) -gh api repos/EveryInc/compound-engineering-plugin/contents/plugins/compound-engineering/agents/review/security-sentinel.md \ - --jq '.content' | base64 -d > /tmp/upstream-security-sentinel.md - -# Get the latest commit SHA for the agents directory -gh api "repos/EveryInc/compound-engineering-plugin/commits?path=plugins/compound-engineering/agents&per_page=1" \ - --jq '.[0].sha' - -# Get content hash for change detection -shasum -a 256 /tmp/upstream-security-sentinel.md | cut -d' ' -f1 -``` - -**Batch fetch pattern** — for importing multiple files, loop over a list: - -```bash -mkdir -p /tmp/cep-upstream -AGENTS=("review/architecture-strategist" "research/best-practices-researcher" "workflow/bug-reproduction-validator") -for agent_path in "${AGENTS[@]}"; do - name=$(basename "$agent_path") - gh api "repos/EveryInc/compound-engineering-plugin/contents/plugins/compound-engineering/agents/${agent_path}.md" \ - --jq '.content' | base64 -d > "/tmp/cep-upstream/${name}.md" - shasum -a 256 "/tmp/cep-upstream/${name}.md" -done -``` - -**Recursive file listing** — the contents API fails on subdirectories. Use the git tree API for a complete one-shot listing: - -```bash -# List ALL files under skills/ recursively (one API call) -gh api "repos/EveryInc/compound-engineering-plugin/git/trees/?recursive=1" \ - --jq '.tree[] | select(.path | startswith("plugins/compound-engineering/skills/")) | select(.type == "blob") | .path' \ - | sed 's|plugins/compound-engineering/skills/||' -``` - -**Skill folders** — Skills are directories, not just SKILL.md files. A skill folder may contain references/, templates/, workflows/, scripts/, assets/, and schema files. The CLI converter (`bun src/cli.ts convert skill`) ONLY processes SKILL.md files. All other files in the skill folder must be: -1. Fetched from upstream individually via the contents API -2. Copied to the local skill directory, preserving the folder structure -3. Manually rewritten with CC→OC text replacements (`.claude/` → `.opencode/`, `CLAUDE.md` → `AGENTS.md`, `Claude Code` → `OpenCode`, `compound-engineering:` → `systematic:`, etc.) -4. `${CLAUDE_PLUGIN_ROOT}/skills//...` paths simplified to relative paths (skills are bundled in the plugin — no env var prefix needed) - -### Discovering Sub-Files for New Skills - -When importing a **new** skill (not yet in the manifest), you must discover ALL files in the skill directory before fetching. There are two ways to get the file list: - -**Option A: Use precheck `newUpstreamFiles` (automated sync)** — When running via the sync-cep workflow, the precheck summary includes a `newUpstreamFiles` map that lists all files for each new definition: - -```json -{ - "newUpstreamFiles": { - "skills/every-style-editor": ["SKILL.md", "references/EVERY_WRITE_STYLE.md"], - "skills/gemini-imagegen": ["SKILL.md", "requirements.txt", "scripts/generate.py", "scripts/setup.sh"] - } -} -``` - -Use this file list directly — it was collected from the upstream git tree and is authoritative. - -**Option B: Query the git tree API (manual import)** — When importing outside the sync-cep workflow, discover files yourself: - -```bash -# Get the full tree and filter for the skill directory -gh api "repos/EveryInc/compound-engineering-plugin/git/trees/main?recursive=1" \ - --jq '.tree[] | select(.path | startswith("plugins/compound-engineering/skills//")) | select(.type == "blob") | .path' \ - | sed 's|plugins/compound-engineering/skills//||' -``` - -**After discovering the file list**, fetch each file individually using the contents API: - -```bash -for file in SKILL.md references/guide.md scripts/setup.sh; do - mkdir -p "/tmp/cep-upstream//$(dirname "$file")" - gh api "repos/EveryInc/compound-engineering-plugin/contents/plugins/compound-engineering/skills//${file}" \ - --jq '.content' | base64 -d > "/tmp/cep-upstream//${file}" -done -``` - -**CRITICAL**: Failing to discover sub-files results in incomplete skill imports — only SKILL.md gets copied while references/, scripts/, assets/ etc. are silently dropped. This renders many skills non-functional. - -### 1b. Check Existing Manifest - -Read `sync-manifest.json` to determine if this is a new import or an update: - -```bash -# Check if definition already exists in manifest -cat sync-manifest.json | jq '.definitions["agents/review/security-sentinel"]' -``` - -If updating an existing definition: -- Compare `upstream_content_hash` with current upstream to detect changes -- Check `manual_overrides` — these fields/sections MUST be preserved -- Review `rewrites` log — these intelligent rewrites should be re-applied - -### 1c. Evaluate Fit - -**STOP and evaluate before converting.** Not all upstream definitions belong in Systematic. Present your evaluation to the user before proceeding. - -**Evaluation criteria (ALL must pass):** - -1. **Gap analysis:** Does this fill a gap in Systematic's current capabilities? Run `bun src/cli.ts list ` to check. -2. **Philosophy fit:** Is it consistent with Systematic's opinionated workflow approach? (Structured phases, explicit deliverables, skill-driven discipline) -3. **Overlap check:** Does it duplicate an existing bundled definition? If partial overlap, propose enhancing the existing definition instead. -4. **Dependency check:** Does the definition reference agents, skills, or commands that don't exist in Systematic? List any missing dependencies — note as WARN (they can be imported later using this skill, and references should be kept). -5. **Phantom check:** If importing agents or skills referenced by commands, verify they actually exist upstream. Commands may reference definitions that were never created upstream ("phantoms"). Fetch the upstream directory listing to confirm: - ```bash - # Check agents - gh api repos/EveryInc/compound-engineering-plugin/contents/plugins/compound-engineering/agents/review \ - --jq '.[].name' - # Check skills - gh api "repos/EveryInc/compound-engineering-plugin/git/trees/?recursive=1" \ - --jq '.tree[] | select(.path | startswith("plugins/compound-engineering/skills/")) | select(.type == "tree") | .path' \ - | sed 's|plugins/compound-engineering/skills/||' | grep -v '/' - ``` -6. **User value:** Would a Systematic plugin user actually invoke this? Niche CC-specific tools (e.g., `feature-video`, `test-browser`) may not translate. - -**Present your evaluation as a table:** - -```markdown -| Criterion | Pass/Fail | Notes | -|-----------|-----------|-------| -| Gap analysis | PASS | No existing code review simplification agent | -| Philosophy fit | PASS | Structured review methodology | -| Overlap check | PASS | No overlap with existing review agents | -| Dependencies | WARN | References `resolve_todo_parallel` command (not in Systematic) | -| User value | PASS | General-purpose, not framework-specific | -``` - -If any criterion fails, document why and **do not proceed** unless the user explicitly overrides. - -## Phase 2: Mechanical Conversion - -**Consult `docs/CONVERSION-GUIDE.md` for the authoritative field mapping reference.** That document has the complete tables for frontmatter fields, tool names, paths, and edge cases. - -### 2a. Change Detection (CRITICAL) - -**The sync workflow MUST focus on converting CHANGED content only.** If upstream hasn't changed a section, do NOT modify it. - -1. **Compare content hashes** — Only process definitions whose `upstream_content_hash` differs from the current upstream. -2. **Diff before converting** — Run `git diff` or text comparison to identify WHAT changed upstream. -3. **Selective application** — Apply conversions ONLY to the changed portions. Preserve unchanged sections exactly as they are. - -**CRITICAL: Preserve Stable Content** - -| Content Type | Action | -|--------------|--------| -| `~/.config/opencode/` paths | **DO NOT CHANGE** — This is the correct OpenCode global config path | -| `~/.opencode/` paths | **DO NOT CHANGE** — This is WRONG. If you see this, it was a mistake. The correct path is `~/.config/opencode/` | -| `question` tool references | **DO NOT CHANGE** — This is the correct OpenCode tool name | -| `AskUserQuestion` in existing bundled files | Change to `question` — this was a CC tool that must be converted | -| Trailing newlines | **PRESERVE** — Do not strip EOL characters | - -**Path Conversion Rules (MEMORIZE):** - -| CC Path | OC Path | Notes | -|---------|---------|-------| -| `~/.claude/` | `~/.config/opencode/` | Global user config | -| `.claude/` | `.opencode/` | Project-relative | -| `~/.config/opencode/` | `~/.config/opencode/` | **ALREADY CORRECT — NEVER CHANGE** | -| `~/.opencode/` | `~/.config/opencode/` | **WRONG PATH — FIX IF FOUND** | - -**Tool Name Conversion Rules (MEMORIZE):** - -| CC Tool | OC Tool | Direction | -|---------|---------|-----------| -| `AskUserQuestion` | `question` | CC → OC only | -| `TodoWrite` | `todowrite` | CC → OC only | -| `Task` | `task` | CC → OC only | -| `question` | `question` | **ALREADY CORRECT — NEVER CHANGE** | -| `todowrite` | `todowrite` | **ALREADY CORRECT — NEVER CHANGE** | - -> **Common mistake:** Converting `question` → `AskUserQuestion`. This is BACKWARDS. OpenCode uses lowercase tool names. - -### 2b. Apply Mechanical Converter - -Apply the existing converter pipeline. This handles: -- Tool name mappings (`Task` -> `task`, `TodoWrite` -> `todowrite`, etc.) -- Path replacements (`.claude/` -> `.opencode/`, `CLAUDE.md` -> `AGENTS.md`) -- Prefix conversions (`compound-engineering:` -> `systematic:`) -- Frontmatter field mapping (tools array->map, permissionMode->permission, maxSteps->steps, etc.) -- Model normalization (provider prefix) - -> **Note:** The `color` field in CC agent frontmatter (e.g., `color: violet`) is passed through by the converter. OpenCode does not currently use this field, but it is harmless to keep. - -**For a single file via CLI:** - -```bash -bun src/cli.ts convert agent /tmp/upstream-security-sentinel.md -``` - -> **Tip:** Use `bun src/cli.ts` for local development instead of `bunx systematic` to avoid slow resolution. - -**For programmatic use:** - -```typescript -import { convertContent } from './lib/converter.js' -const converted = convertContent(upstreamContent, 'agent') -``` - -**What mechanical conversion does NOT handle** (requires intelligent rewrite): -- Description enhancement with trigger conditions -- Branding beyond regex patterns (contextual references to "Claude Code", "Claude", "CEP") -- Content restructuring for Systematic's style -- Removing CC-specific features with no OC equivalent (`${CLAUDE_SESSION_ID}`) -- Adding Systematic-specific sections (integration points, skill cross-references) -- **Skill path patterns in body text** — the converter handles `.claude/` -> `.opencode/` in general, but does NOT handle `~/.claude/skills/` -> `~/.config/opencode/skills/` or bare `.claude/skills/` -> `.opencode/skills/` references embedded in skill-discovery instructions. Audit these manually. -- **Tool/path references inside code blocks** — the converter skips fenced code blocks to avoid false positives, so `Task()`, `TodoWrite`, `CLAUDE.md`, `.claude/skills/` inside ``` blocks must be fixed manually -- **Attribution badges and footers** inside heredoc code blocks (commit messages, PR bodies) -- **CC-specific features with no OC equivalent** (Swarm Mode / `Teammate` API, "remote" execution via Claude Code web) -- **CC-specific exclusion rules** — some agents reference `compound-engineering pipeline artifacts` or other CC-specific content (e.g., `docs/plans/*.md` exclusions) that should be removed as they're not applicable to Systematic -- **Frontmatter quoting normalization** — the converter may change double quotes to single quotes in `argument-hint` and other frontmatter string values. This is cosmetic but verify quoting consistency after conversion. - -### 2c. Mandatory Post-Conversion Fixup (Batch sed) - -**LLMs consistently fail at simple string replacement.** The mechanical converter catches some patterns, but many remain — particularly inside code blocks, multi-platform tool lists, and contextual references. After every conversion (both mechanical converter AND LLM rewrite), run these deterministic sed replacements as a mandatory safety net. - -**Replacement order matters.** Apply more-specific patterns before general ones to avoid double-conversion. Run all phases in sequence on each file: - -```bash -# Target: all converted .md files EXCEPT exclusions (see below) -# PHASE A: compound-engineering → systematic (most specific first) -sed -i '' \ - -e 's/compound-engineering\.local\.md/systematic.local.md/g' \ - -e 's/compound-engineering-plugin/systematic/g' \ - -e 's/compound-engineering pipeline artifacts/systematic pipeline artifacts/g' \ - -e 's|\.context/compound-engineering/|.context/systematic/|g' \ - -e 's|plugins/compound-engineering/|plugins/systematic/|g' \ - -e 's/compound-engineering:/systematic:/g' \ - -e 's/Compound_Engineering/Systematic/g' \ - -e 's/Compound Engineering/Systematic/g' \ - -e 's/compound-engineering/systematic/g' \ - "$FILE" - -# PHASE B: Path conversions (most specific first) -sed -i '' \ - -e 's|~/\.claude/skills/|~/.config/opencode/skills/|g' \ - -e 's|~/\.claude/settings\.json|~/.config/opencode/settings.json|g' \ - -e 's|~/\.claude/|~/.config/opencode/|g' \ - -e 's|`\.claude/skills/|`.opencode/skills/|g' \ - -e 's|`\.claude/settings\.json|`.opencode/settings.json|g' \ - -e 's| \.claude/skills/| .opencode/skills/|g' \ - -e 's| \.claude/settings\.json| .opencode/settings.json|g' \ - -e 's|(\.claude/|(\.opencode/|g' \ - -e 's|`\.claude/|`.opencode/|g' \ - -e 's| \.claude/| .opencode/|g' \ - -e 's|"\.claude/|".opencode/|g' \ - -e 's|CLAUDE\.md|AGENTS.md|g' \ - "$FILE" - -# PHASE C: Branding and tool names -sed -i '' \ - -e 's/Claude Code/OpenCode/g' \ - -e 's/claude-code/opencode/g' \ - -e 's/AskUserQuestion/question/g' \ - -e 's/TaskCreate/todowrite/g' \ - "$FILE" -``` - -#### File Exclusions - -| File/Pattern | Reason | What to convert instead | -|---|---|---| -| `sync-manifest.json` | Upstream paths (`plugins/compound-engineering/...`) are correct source references | Never run sed on manifest upstream paths | -| `claude-permissions-optimizer/SKILL.md` | Skill targets Claude Code settings files — CC refs and `.claude/` paths are intentional | Only convert: prefix (`compound-engineering:` → `systematic:`), tool names (`AskUserQuestion` → `question`), product name (`Compound Engineering` → `Systematic`) | - -#### Edge Cases Requiring Manual Review After sed - -These patterns recur across every sync and need manual attention: - -| Pattern | Problem | Fix | -|---|---|---| -| `EveryInc/systematic` in URLs | Over-conversion of `EveryInc/compound-engineering-plugin` | Use generic example (`acme/my-app`) or remove entirely | -| `claude.com/opencode` | `Claude Code` → `OpenCode` mangles URLs like `claude.com/claude-code` | Replace with `opencode.ai` | -| CEP version attribution badges | `[![Compound Engineering v[VERSION]]...]` becomes `[![Systematic v[VERSION]]...]` with dead URL | Remove the entire badge line | -| `Task()` in code blocks | Uppercase `Task()` in code examples not caught by branding sed | Manually fix to `task()` | -| `https://github.com/EveryInc/systematic` | Non-existent URL from catch-all sed | Context-dependent: remove, genericize, or update to actual Systematic repo URL | -| Multi-platform tool lists | `(question in OpenCode, ...)` after conversion — verify the list is coherent | Should read naturally with OpenCode as the primary platform | - -#### Post-Fixup Verification (MANDATORY) - -After running the batch sed fixup, verify with grep. Both checks MUST pass: - -```bash -# CHECK 1: Remaining CC/CEP refs (should be zero for non-exception files) -grep -rnE 'Claude Code|claude-code|\.claude/|CLAUDE\.md|~/\.claude|AskUserQuestion|TaskCreate|compound-engineering|Compound Engineering|Compound_Engineering|EveryInc/systematic|claude\.com/opencode' - -# CHECK 2: Over-conversions (should ALWAYS be zero across ALL files) -grep -rnE '\.opencode/\.opencode/|config/opencode/\.config/|systematic\.systematic|systematic:systematic:|opencode\.ai/opencode' -``` - -If CHECK 1 returns hits on non-exception files, fix them. If CHECK 2 returns any hits, you have a double-conversion bug — investigate immediately. - -## Phase 3: Intelligent Rewrite - -This is the critical step that distinguishes a good import from a broken one. Every converted definition MUST go through intelligent rewrite. - -### 3a. Description Rewrite - -CC descriptions are typically short and capability-focused. OC descriptions must include **trigger conditions** for auto-invocation. - -**CC style (bad for OC):** -```yaml -description: Reviews code for unnecessary complexity and suggests simplifications -``` - -**OC/Systematic style (good):** -```yaml -description: "Use this agent when you need a final review pass focused on simplicity, YAGNI principles, and removing unnecessary complexity. ..." -``` - -**Rewrite rules for descriptions:** -- Start with "Use this agent/skill/command when..." or "This skill should be used when..." -- Include specific trigger symptoms and situations -- For agents: include `` blocks showing context + user message + assistant response + commentary -- Max 1024 characters total for skill descriptions -- Write in third person (injected into system prompt) - -### 3b. Branding Audit - -The mechanical converter catches regex-matchable patterns. You must catch contextual ones: - -| Pattern to find | Replacement | -|-----------------|-------------| -| "Claude Code" (the product) | "OpenCode" | -| "Claude" (the AI model, in instructions) | Keep — model name is fine | -| "CEP" or "Compound Engineering Plugin" | "Systematic" | -| "claude-code" (in paths not caught by converter) | "opencode" | -| References to CC-specific behavior | Adapt or remove | -| `Task agent-name("prompt")` patterns | `task` tool or `@agent-name` | -| `TodoWrite` in prose (not just tool calls) | `todowrite` or "update your task list" | -| "the built-in grep tool" (lowercase tool name) | "the built-in Grep tool" (OC capitalizes tool names) | -| `compound-engineering pipeline artifacts` | Remove or replace with "systematic pipeline artifacts" | -| Version attribution footers (e.g., `*Based on Claude Code v2.1.19*`) | **Remove entirely** — CC version numbers are not applicable to Systematic. Blind `Claude Code` → `OpenCode` rewrite turns these into nonsensical `Based on OpenCode v2.1.19`. | -| Source attribution URLs (e.g., `claude.com`, `docs.anthropic.com`) | **Keep as-is** — these are upstream source references, not branding | - -### 3c. Content Adaptation - -Review and adapt the body content for Systematic's style: - -**For agents:** -- Ensure the system prompt is self-contained (agent definitions are the full prompt) -- Add structured output format if missing (numbered phases, deliverables) -- Reference Systematic tools and skill cross-references where appropriate -- Match temperature/mode conventions (see `docs/CONVERSION-GUIDE.md`) - -**For skills:** -- Add "When to Use" / "When NOT to Use" sections if missing -- Add integration points with other Systematic workflows -- Add cross-references to related bundled skills/commands -- Ensure progressive disclosure (SKILL.md + references/ if content is heavy) - -**For commands:** -- Verify slash command sequences use `systematic:` prefix -- Ensure `$ARGUMENTS` and positional args are correct (OC is 1-indexed) -- Verify referenced agents and skills exist in Systematic's bundled set -- **Keep references to not-yet-imported agents/skills** — they will be brought over later using this same skill and will keep their names. Only apply mechanical CC→OC tool/path conversion, not removal. - -### 3d. Code Block Audit - -The mechanical converter intentionally skips content inside fenced code blocks to avoid false positives. You MUST manually audit all code blocks for: - -| Pattern in code blocks | Action | -|------------------------|--------| -| `Task(agent-name)` or `Task({ ... })` | → `task(agent-name)` / `task({ ... })` | -| `TodoWrite` | → `todowrite` | -| `CLAUDE.md` | → `AGENTS.md` | -| `.claude/skills/` or `.claude/` paths | → `.opencode/skills/` or `.opencode/` | -| `Teammate({ operation: ... })` | Add aspirational note or adapt to `task` with background execution | -| Attribution badges/footers (`Compound Engineered`, `Claude Code` links) | → Systematic branding | -| `AskUserQuestion` | → `question tool` | - -> **High-risk pattern:** Long skills with many code examples (e.g., orchestrating-swarms has 47 `Task({` calls across 1700+ lines). After mechanical conversion, run a targeted search for capitalized tool names inside code blocks: `grep -n "Task(\|TodoWrite\|AskUserQuestion" `. Fix all occurrences — users copying broken examples will get runtime errors. - -### 3e. CC-Specific Features - -Some CC features have no direct OC equivalent. For each, make a case-by-case decision: - -| CC Feature | OC Equivalent | Recommendation | -|------------|---------------|----------------| -| `Teammate` API (spawnTeam, requestShutdown, cleanup) | None — `task` with `run_in_background` is partial | Keep as aspirational reference with explanatory note | -| "Remote" execution (Claude Code web background) | None | Remove — no OC equivalent exists | -| `${CLAUDE_SESSION_ID}` | None | Remove (keep in upstream API spec docs as-is) | -| `${CLAUDE_PLUGIN_ROOT}` | None — bundled skills use relative paths | Simplify to relative paths (e.g., `scripts/worktree-manager.sh` not `${CLAUDE_PLUGIN_ROOT}/skills/git-worktree/scripts/worktree-manager.sh`) | -| `AskUserQuestion` with complex schemas | `question` tool (simpler) | Adapt to OC's question tool format | - -Present CC-specific feature decisions to the user before proceeding. - -### 3f. Quality Checklist - -Before writing the file, verify: - -- [ ] Frontmatter parses without errors -- [ ] Description includes trigger conditions (not just capability summary) -- [ ] No stale references to Claude Code, CEP, `.claude/` paths -- [ ] Tool references use OC names (`task`, `todowrite`, `google_search`, `systematic_skill`) -- [ ] **Code blocks audited** — tool names, paths, and branding inside ``` blocks are fixed -- [ ] Attribution badges/footers in heredoc code blocks updated to Systematic branding -- [ ] CC-specific features with no OC equivalent handled (removed, adapted, or noted as aspirational) -- [ ] Cross-referenced agents/skills/commands exist in Systematic (or are marked for future import) -- [ ] **Path sanity check** — No `~/.opencode/` paths (should be `~/.config/opencode/`) -- [ ] **Tool name sanity check** — No `AskUserQuestion` (should be `question`) -- [ ] **Formatting preserved** — Trailing newlines maintained, no gratuitous whitespace changes -- [ ] Content matches Systematic's style (structured phases, explicit deliverables) -- [ ] Agent `mode` field is set (`subagent`, `primary`, or `all`) -- [ ] Agent `temperature` is appropriate for the agent's purpose -- [ ] **Batch sed fixup ran** — Phase 2c sed commands executed on all converted files -- [ ] **Grep verification passed** — Phase 2c CHECK 1 (remaining refs) and CHECK 2 (over-conversions) both clean -- [ ] **Edge cases reviewed** — Checked for mangled URLs, dead badge links, uppercase `Task()` in code blocks (see Phase 2c edge cases table) - -### 3g. Discrepancy Reporting - -If you encounter content that looks incorrect but wasn't changed by upstream: - -1. **DO NOT silently fix it** — You might break something -2. **Report it as a discrepancy** — Include in the sync summary -3. **Flag for human review** — "Found existing path `~/.opencode/` in line X — should this be `~/.config/opencode/`?" - -Example discrepancy report: -```markdown -### Discrepancies Found (not from upstream changes) - -| File | Line | Issue | Recommended Action | -|------|------|-------|-------------------| -| skills/foo/SKILL.md | 42 | Uses `~/.opencode/` path | Verify if intentional, fix to `~/.config/opencode/` if not | -| commands/bar.md | 15 | Uses `AskUserQuestion` | Convert to `question` tool | -``` - -## Phase 4: Write and Register - -### 4a. Place the File - -| Type | Location | Naming | -|------|----------|--------| -| Agent | `agents//.md` | kebab-case, category from purpose | -| Skill | `skills//SKILL.md` | kebab-case directory | -| Command | `commands/.md` or `commands/workflows/.md` | kebab-case, `workflows/` for workflow commands | - -Categories for agents: `design/`, `research/`, `review/`, `workflow/` - -### 4b. Update sync-manifest.json - -**Every imported definition MUST have a manifest entry.** This is not optional. - -**Use the `date` CLI to generate the `synced_at` timestamp in ISO 8601 UTC format:** - -```bash -date -u +'%Y-%m-%dT%H:%M:%SZ' -``` - -Run this once at the start of a batch import and use the same timestamp for all entries in that batch. - -```json -{ - "definitions": { - "agents/review/security-sentinel": { - "source": "cep", - "upstream_path": "plugins/compound-engineering/agents/review/security-sentinel.md", - "upstream_commit": "abc123def456...", - "synced_at": "", - "notes": "Imported from CEP. Enhanced description with trigger examples. Updated tool references.", - "upstream_content_hash": "sha256-of-upstream-content", - "rewrites": [ - { - "field": "description", - "reason": "CC description lacked OC trigger conditions and examples", - "original": "Security audits, vulnerability assessment, OWASP compliance" - }, - { - "field": "body:branding", - "reason": "Contextual reference to Claude Code in analysis instructions" - } - ], - "manual_overrides": [] - } - } -} -``` - -**Manifest key format:** Repo-relative path without file extension. -- Agents: `agents//` -- Skills: `skills/` -- Commands: `commands/` or `commands/workflows/` - -**Multi-file skills MUST include a `files` array** listing all files in the skill directory (relative to the skill's upstream path). This is how the precheck script knows which files to hash for change detection. Without it, sub-file changes go undetected: - -```json -{ - "skills/my-skill": { - "files": ["SKILL.md", "references/guide.md", "scripts/setup.sh"], - ... - } -} -``` - -**Ensure `sources` has an entry for the upstream repo:** - -```json -{ - "sources": { - "cep": { - "repo": "EveryInc/compound-engineering-plugin", - "branch": "main", - "url": "https://github.com/EveryInc/compound-engineering-plugin" - }, - "superpowers": { - "repo": "obra/superpowers", - "branch": "main", - "url": "https://github.com/obra/superpowers" - } - } -} -``` - -### 4c. Record Rewrites - -**Every intelligent rewrite MUST be logged in the `rewrites` array.** This is how future syncs know what to re-apply. - -Each rewrite entry needs: -- `field`: What was changed (`description`, `body:branding`, `body:tool-references`, `body:structure`, `frontmatter:`) -- `reason`: Why (one sentence) -- `original`: The original value before rewrite (optional but recommended for descriptions) - -### 4d. Respect Manual Overrides - -If `manual_overrides` contains entries, those fields/sections were customized after import. On re-import: -1. Read the current bundled file -2. Extract the overridden fields/sections -3. Apply conversion to non-overridden content only -4. Merge overrides back in -5. Update manifest `synced_at` and `upstream_commit` but keep `manual_overrides` intact - -**Override entries MUST be structured objects (string arrays are invalid):** - -```json -{ - "manual_overrides": [ - { - "field": "description", - "reason": "Customized triggers for our auth-heavy codebase", - "original": "Security audits, vulnerability assessment, OWASP compliance", - "overridden_at": "2026-02-10T06:30:00Z" - } - ] -} -``` - -Each entry has: -- `field`: Same naming convention as `rewrites[].field` (e.g., `description`, `body:section-name`, `frontmatter:`, `*` for full local ownership) -- `reason`: Why the override exists — one sentence -- `original`: Pre-override value (for conflict detection and rollback; truncate to 500 chars for large sections) -- `overridden_at`: ISO 8601 UTC timestamp (`date -u +'%Y-%m-%dT%H:%M:%SZ'`) - -**Override vs rewrite precedence:** If a field appears in both `rewrites` and `manual_overrides`, the manual override takes precedence. The rewrite is kept for historical record but will NOT be re-applied to that field on re-sync. - -### 4e. Record Manual Edit - -When you make a targeted edit to an already-imported definition file (NOT during initial import — this is for post-import customization): - -1. **Before editing**: Read and store the current value of the field(s) you will change -2. **Make your edit** to the bundled definition file -3. **Update `sync-manifest.json`**: - a. Read the current manifest entry for this definition - b. For each field you changed: - - If field is already in `manual_overrides`: update `reason` if it changed, keep the `original` from the FIRST override (don't overwrite history), keep `overridden_at` unchanged - - If field is NOT in `manual_overrides`: add new entry with `field`, `reason`, `original` (value from step 1), and `overridden_at` (current UTC timestamp) - c. Write the updated manifest -4. **Validate**: `cat sync-manifest.json | python3 -m json.tool > /dev/null` (confirm valid JSON) - -**Idempotency rules:** -- Running this workflow twice for the same edit MUST NOT duplicate entries (check `field` name before adding) -- MUST NOT overwrite the `original` value (first override's original is canonical) -- MUST NOT change `overridden_at` if the field was already overridden - -| Thought | Reality | -|---------|---------| -| "I'll update the manifest later" | Update NOW, in the same operation as the edit | -| "The reason is obvious" | Future agents can't read your mind. Write it. | -| "I don't need to store the original" | Without it, you can't detect conflicts on re-sync | -| "This field is too small to track" | If you changed it, track it. No exceptions. | - -## Phase 5: Verify - -### 5a. Build and Test - -```bash -bun run build && bun run typecheck && bun run lint && bun test -``` - -### 5b. Validate Converted Content - -```bash -# Check the definition loads via the plugin -bun src/cli.ts list agents -bun src/cli.ts list skills -bun src/cli.ts list commands - -# For agents: verify frontmatter extracts correctly -bun test tests/unit/agents.test.ts - -# For skills: verify skill loading -bun test tests/unit/skills.test.ts - -# Integration: verify conversion round-trip -bun test tests/integration/converter-validation.test.ts -``` - -### 5c. Manual Spot-Check - -For each converted definition, verify: -1. `systematic_skill` tool lists it (for skills) -2. Agent config merges correctly (for agents) -3. Command is available via slash prefix (for commands) - -## Re-Sync Workflow (Updating Existing Definitions) - -When pulling upstream changes for an already-imported definition: - -1. **Read manifest entry** — Get `upstream_content_hash`, `rewrites`, `manual_overrides` -2. **Fetch upstream** — Get current content and compute hash -3. **Compare hashes** — If unchanged, skip (idempotent) -4. **Diff upstream changes** — Use `git diff` or text diff to understand what changed -5. **Re-apply mechanical conversion** on the new upstream content -6. **Re-apply rewrites** from the manifest log — same fields, same reasons, adapted to new content (skip fields that have manual overrides — overrides take precedence) -7. **Handle manual overrides** using the merge matrix below -8. **Update manifest** — New commit SHA, hash, timestamp (`date -u +'%Y-%m-%dT%H:%M:%SZ'`). Update rewrites if they changed. -9. **Verify** — Build, test, spot-check - -### Issue/PR Dedupe and Reporting - -When running automated syncs, always: -- Reuse branch `chore/sync-cep` for all sync PRs. -- If a PR exists for that branch, update it instead of creating a new one. -- Use or create a tracking issue labeled `sync-cep` and append run summaries as comments. - -Include the following sections in both issue and PR bodies: -- Summary -- Hash changes table (definition, old hash, new hash) -- Conflicts (manual overrides) -- New upstream definitions (report-only) -- Upstream deletions (report-only, include keep/remove prompt) -- Rewrite failures -- Phantom references (commands referencing missing agents/skills) - -### Override Merge Matrix - -| Scenario | Detection | Agent Behavior | -|----------|-----------|----------------| -| Upstream unchanged + override exists | New upstream hash matches stored hash | **Preserve override.** No action needed. | -| Upstream changed + override on SAME field | Changed upstream field overlaps `manual_overrides[].field` | **Flag conflict.** Present both versions to user. Do NOT auto-merge. | -| Upstream changed + override on DIFFERENT field | Changed fields don't intersect with override fields | **Apply upstream changes normally**, preserve override fields. | -| Override is `"*"` (full local ownership) | Any upstream change | **Skip re-sync entirely.** Log that upstream was skipped. | - -### Conflict Presentation - -When upstream changes conflict with a manual override, present to the user: - -```markdown -CONFLICT: `agents/review/security-sentinel` field `description` - -**Your override** (overridden_at: 2026-02-15T10:30:00Z): -> "Use when auditing authentication flows for OWASP Top 10..." - -**Upstream change** (commit abc123): -> "Security audits with enhanced SAST integration..." - -**Override reason**: "Customized triggers for our auth-heavy codebase" - -Options: -1. Keep override (skip upstream change for this field) -2. Accept upstream (remove override entry from manifest) -3. Merge manually (edit the field, then record as new override via Phase 4e) -``` - -## Batch Import - -For importing multiple definitions at once: - -1. **Get shared metadata** — fetch the upstream commit SHA and generate a UTC timestamp (`date -u +'%Y-%m-%dT%H:%M:%SZ'`) once at the start. Use the same values for all entries in the batch. -2. **Fetch all upstream files** — use the batch fetch pattern from Phase 1a to download all targets in one pass. -3. List target definitions (files to import) -4. For each definition, run Phases 1-4 sequentially (converter + intelligent rewrite + write + manifest) -5. Run Phase 5 once at the end (build/test covers all) -6. Commit all changes together with a descriptive message: - -```bash -git add agents/ skills/ commands/ sync-manifest.json -git commit -m "feat: import N definitions from CEP upstream (commit abc123)" -``` - -## Red Flags — STOP If You Think This - -| Thought | Reality | -|---------|---------| -| "The converter handles everything" | Converter is mechanical only — you MUST do intelligent rewrite | -| "The description is fine as-is" | CC descriptions lack trigger conditions. Always rewrite. | -| "I'll update the manifest later" | Manifest update is part of the workflow, not a follow-up | -| "This is just a quick import" | Every import requires evaluation, conversion, rewrite, manifest, verification | -| "I don't need to check for branding" | Regex misses contextual references. Always audit manually. | -| "I'll skip the evaluation since the user asked for it" | User request != automatic fit. Evaluate and present findings. | -| "Manual overrides don't apply — this is a new import" | Check anyway. Previous imports may have been customized since. | - -## Anti-Patterns - -| Anti-Pattern | Correct Approach | -|--------------|-----------------| -| Copy file and only run converter | Always do intelligent rewrite pass | -| Import everything from upstream | Curate — evaluate fit before importing | -| Skip manifest update | Every import MUST have a manifest entry | -| Overwrite human edits on re-sync | Check `manual_overrides` first, use merge matrix | -| Leave CC descriptions as-is | Rewrite with OC trigger conditions | -| Forget to log rewrites | Every intelligent change goes in `rewrites[]` | -| Import definition that duplicates existing | Enhance existing instead | -| Skip branding audit | Always check for contextual Claude/CEP references | - -## Handling Upstream Deletions - -If an upstream file was deleted but the manifest still has an entry: - -1. The `findStaleEntries()` function in `src/lib/manifest.ts` detects these -2. Do NOT auto-remove the bundled definition — it may have been intentionally kept -3. Flag to the user: "Upstream deleted ``. Keep local copy or remove?" -4. If removing: delete the bundled file AND the manifest entry -5. If keeping: add `"manual_overrides": [{"field": "*", "reason": "Local ownership", "overridden_at": ""}]` to indicate full local ownership - -## Reference: Existing Bundled Content - -**Always check live inventory** — this list may be stale: - -```bash -bun src/cli.ts list agents -bun src/cli.ts list skills -bun src/cli.ts list commands -``` - -## Reference: Key Files - -| File | Purpose | -|------|---------| -| `src/lib/converter.ts` | Mechanical conversion pipeline | -| `src/lib/manifest.ts` | Manifest types and read/write/validate | -| `sync-manifest.json` | Provenance data (repo root) | -| `sync-manifest.schema.json` | JSON Schema for manifest | -| `docs/CONVERSION-GUIDE.md` | Full field mapping reference | diff --git a/AGENTS.md b/AGENTS.md index 7c0f390..39dd96d 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -4,7 +4,7 @@ ## Overview -OpenCode plugin providing structured engineering workflows. Ported from the [Compound Engineering Plugin (CEP)](https://github.com/EveryInc/compound-engineering-plugin) for Claude Code, with improvements and OpenCode SDK integration. Converts CC-format agents and skills to OpenCode format. Tracks upstream provenance via `sync-manifest.json`. +OpenCode plugin providing structured engineering workflows for AI-powered development. Originally adapted from the [Compound Engineering Plugin (CEP)](https://github.com/EveryInc/compound-engineering-plugin) for Claude Code, Systematic now evolves independently with its own direction for advanced AI workflows. The CLI retains CC-format conversion capabilities for ad-hoc imports. Historical provenance is tracked in `sync-manifest.json`. **Two distinct parts:** 1. **TypeScript source** (`src/`) — Plugin logic, tools, config handling @@ -48,19 +48,19 @@ systematic/ │ └── lib/ # 13 core modules (see src/lib/AGENTS.md) ├── skills/ # 48 bundled skills (SKILL.md format) ├── agents/ # 29 bundled agents (5 categories: design/docs/research/review/workflow) -├── commands/ # Empty (.gitkeep) — all commands converted to skills in CEP sync +├── commands/ # Empty (.gitkeep) — commands converted to skills; dir kept for backward compat ├── docs/ # Starlight docs workspace (see docs/AGENTS.md) │ ├── scripts/ # Content generation from bundled assets │ └── src/content/ # Manual guides + generated reference ├── registry/ # OCX registry config + profiles (omo, standalone) -├── scripts/ # Build scripts (build-registry.ts, check-cep-upstream.ts) +├── scripts/ # Build scripts (build-registry.ts) ├── assets/ # Static assets (banner SVG) ├── tests/ │ ├── unit/ # 13 test files │ └── integration/ # 2 test files ├── .opencode/ # Project-specific OC config + skills + commands -│ ├── skills/ # Project-only skills (convert-cc-defs) -│ └── commands/ # Project-only commands (generate-readme, sync-cep) +│ ├── skills/ # Project-only skills +│ └── commands/ # Project-only commands (generate-readme) ├── sync-manifest.json # Upstream provenance tracking └── dist/ # Build output ``` @@ -74,7 +74,7 @@ systematic/ | Skill tool implementation | `src/lib/skill-tool.ts` | | Skill loading + formatting | `src/lib/skill-loader.ts` | | Bootstrap injection | `src/lib/bootstrap.ts` | -| CEP→OpenCode conversion | `src/lib/converter.ts` | +| CC→OpenCode conversion (CLI) | `src/lib/converter.ts` | | YAML frontmatter parsing | `src/lib/frontmatter.ts` | | Agent config validation + type guards | `src/lib/validation.ts` | | Asset discovery | `src/lib/skills.ts`, `agents.ts`, `commands.ts` | @@ -84,9 +84,7 @@ systematic/ | CLI commands | `src/cli.ts` | | Add new skill | `skills//SKILL.md` | | Add new agent | `agents//.md` | -| Import from CEP upstream | `.opencode/skills/convert-cc-defs/SKILL.md` | | OCX registry building | `scripts/build-registry.ts` | -| Upstream sync checking | `scripts/check-cep-upstream.ts` | | Docs content generation | `docs/scripts/transform-content.ts` | | Docs site config | `docs/astro.config.mjs` | @@ -98,7 +96,7 @@ systematic/ | `createConfigHandler` | fn | src/lib/config-handler.ts:215 | 3 | Config hook — merges bundled assets | | `createSkillTool` | fn | src/lib/skill-tool.ts:87 | 3 | systematic_skill tool factory | | `getBootstrapContent` | fn | src/lib/bootstrap.ts:32 | 3 | System prompt injection | -| `convertContent` | fn | src/lib/converter.ts:371 | 4 | CEP→OpenCode body conversion | +| `convertContent` | fn | src/lib/converter.ts:371 | 4 | CC→OpenCode body conversion | | `convertFileWithCache` | fn | src/lib/converter.ts:411 | 6 | Cached file conversion (mtime invalidation) | | `findSkillsInDir` | fn | src/lib/skills.ts:90 | 6 | Skill discovery (highest centrality) | | `findAgentsInDir` | fn | src/lib/agents.ts:49 | 4 | Agent discovery (category from subdir) | @@ -153,7 +151,7 @@ All disabled lists merge (union), bootstrap config shallow-merges. ## Upstream Sync -CEP definitions are imported via the `convert-cc-defs` skill (`.opencode/skills/`). `sync-manifest.json` tracks provenance: upstream commit, content hash, rewrites applied, and manual overrides. Re-sync compares hashes for idempotency. +CEP definitions were historically imported via the `convert-cc-defs` skill. `sync-manifest.json` tracks provenance: upstream commit, content hash, rewrites applied, and manual overrides. **Automated sync is now disabled** — Systematic evolves independently. The CLI `convert` command remains available for ad-hoc CC→OpenCode conversions. The latest upstream sync (commit 74fb717) converted all commands to skills — `commands/` now contains only `.gitkeep`. Command code paths (`findCommandsInDir`, `loadCommandAsConfig`) remain for backward compatibility and project-specific commands. @@ -167,5 +165,5 @@ The latest upstream sync (commit 74fb717) converted all commands to skills — ` - Use `bun src/cli.ts` for local dev instead of `bunx systematic` to avoid slow resolution - `commands/` dir retained (with `.gitkeep`) for backward compatibility — code paths still support commands - `registry/` provides OCX component-level installation with omo and standalone profiles -- `scripts/check-cep-upstream.ts` detects new/changed upstream definitions for sync -- `.opencode/commands/` has project-only commands: `generate-readme` (README generation), `sync-cep` (upstream sync) +- `.opencode/commands/` has project-only commands: `generate-readme` (README generation) +- `sync-manifest.json` is historical provenance data — no longer actively synced diff --git a/scripts/check-cep-upstream.ts b/scripts/check-cep-upstream.ts deleted file mode 100644 index 392a222..0000000 --- a/scripts/check-cep-upstream.ts +++ /dev/null @@ -1,448 +0,0 @@ -#!/usr/bin/env bun -import { createHash } from 'node:crypto' -import { CONVERTER_VERSION } from '../src/lib/converter.js' -import { readManifest, type SyncManifest } from '../src/lib/manifest.js' - -export interface CheckSummary { - hashChanges: string[] - newUpstream: string[] - newUpstreamFiles: Record - deletions: string[] - skipped: string[] - converterVersionChanged: boolean - errors: string[] -} - -export interface CheckInputs { - manifest: SyncManifest - upstreamDefinitionKeys: string[] - upstreamContents: Record - treePaths: string[] - converterVersion: number -} - -export interface FetchResult { - definitionKeys: string[] - contents: Record - treePaths: string[] - hadError: boolean -} - -const MANIFEST_PATH = 'sync-manifest.json' - -const hashContent = (content: string): string => - createHash('sha256').update(content).digest('hex') - -const joinUpstreamPath = (base: string, file: string): string => - `${base.replace(/\/$/, '')}/${file}` - -const CEP_PREFIX = 'plugins/compound-engineering/' - -export const toDefinitionKey = (path: string): string | null => { - const prefix = CEP_PREFIX - if (!path.startsWith(prefix)) return null - - const rest = path.slice(prefix.length) - if (rest.startsWith('agents/') && rest.endsWith('.md')) { - return rest.replace(/\.md$/, '') - } - - if (rest.startsWith('commands/') && rest.endsWith('.md')) { - return rest.replace(/\.md$/, '') - } - - if (rest.startsWith('skills/')) { - const parts = rest.split('/') - if (parts.length === 2 && parts[1].endsWith('.md')) { - return `${parts[0]}/${parts[1].replace(/\.md$/, '')}` - } - if (parts.length >= 3 && parts[2] === 'SKILL.md') { - return `${parts[0]}/${parts[1]}` - } - } - - return null -} - -const collectSkillFiles = (treePaths: string[], key: string): string[] => { - const dirPrefix = `${CEP_PREFIX}${key}/` - const files: string[] = [] - for (const path of treePaths) { - if (path.startsWith(dirPrefix)) { - files.push(path.slice(dirPrefix.length)) - } - } - return files.sort() -} - -/** - * Given the full tree paths and a set of new definition keys, collect all files - * belonging to each new definition. For skills this means all files under the - * skill directory; for agents/commands it's the single .md file. - */ -export const collectNewUpstreamFiles = ( - treePaths: string[], - newKeys: string[], -): Record => { - const result: Record = {} - const treeSet = new Set(treePaths) - for (const key of newKeys) { - if (key.startsWith('skills/')) { - const files = collectSkillFiles(treePaths, key) - if (files.length > 0) { - result[key] = files - } - } else { - const filePath = `${CEP_PREFIX}${key}.md` - if (treeSet.has(filePath)) { - result[key] = [`${key.split('/').pop()}.md`] - } - } - } - return result -} - -const hasWildcardOverride = (manifest: SyncManifest, key: string): boolean => { - const overrides = manifest.definitions[key]?.manual_overrides ?? [] - return overrides.some((override) => override.field === '*') -} - -const computeSkillHash = ( - basePath: string, - files: string[], - upstreamContents: Record, - errors: string[], -): string | null => { - const ordered = [...files].sort() - let hasMissing = false - const parts: string[] = [] - for (const file of ordered) { - const path = joinUpstreamPath(basePath, file) - const content = upstreamContents[path] - if (content == null) { - errors.push( - `Missing upstream content for sub-file (may be a transient fetch failure or the file was removed upstream): ${path}`, - ) - hasMissing = true - continue - } - parts.push(content) - } - if (hasMissing) return null - return hashContent(parts.join('\0')) -} - -const recordMissingContent = ( - upstreamContents: Record, - path: string, - errors: string[], -): boolean => { - if (path in upstreamContents) return false - errors.push( - `Missing upstream content for sub-file (may be a transient fetch failure or the file was removed upstream): ${path}`, - ) - return true -} - -const computeEntryHash = ( - entry: SyncManifest['definitions'][string], - upstreamContents: Record, - errors: string[], -): string | null => { - const upstreamPath = entry.upstream_path - if (!entry.files?.length) { - if (recordMissingContent(upstreamContents, upstreamPath, errors)) { - return null - } - return hashContent(upstreamContents[upstreamPath] ?? '') - } - - return computeSkillHash(upstreamPath, entry.files, upstreamContents, errors) -} - -export const getRequiredUpstreamContentPaths = ({ - manifest, - upstreamDefinitionKeys, -}: { - manifest: SyncManifest - upstreamDefinitionKeys: string[] -}): string[] => { - const paths = new Set() - for (const key of upstreamDefinitionKeys) { - const entry = manifest.definitions[key] - if (!entry) continue - if (entry.files && entry.files.length > 0) { - for (const file of entry.files) { - paths.add(joinUpstreamPath(entry.upstream_path, file)) - } - } else { - paths.add(entry.upstream_path) - } - } - return Array.from(paths).sort() -} - -const isObject = (value: unknown): value is Record => - typeof value === 'object' && value !== null && !Array.isArray(value) - -const isString = (value: unknown): value is string => typeof value === 'string' - -const parseTreePaths = (raw: string): string[] => { - let parsed: unknown - try { - parsed = JSON.parse(raw) - } catch { - return [] - } - - if (!isObject(parsed)) return [] - const tree = parsed.tree - if (!Array.isArray(tree)) return [] - - const results: string[] = [] - for (const item of tree) { - if (!isObject(item)) continue - if (item.type !== 'blob') continue - if (!isString(item.path)) continue - results.push(item.path) - } - return results -} - -const MAX_RETRIES = 3 -const BASE_DELAY_MS = 1000 -const MAX_DELAY_MS = 10000 - -const isRetryStatus = (status: number): boolean => - status === 403 || status === 429 - -const readRetryAfterSeconds = (response: Response): number | null => { - const value = response.headers.get('retry-after') - if (!value) return null - const parsed = Number.parseInt(value, 10) - return Number.isNaN(parsed) ? null : parsed -} - -const computeDelayMs = (attempt: number, response?: Response): number => { - const retryAfter = response ? readRetryAfterSeconds(response) : null - if (retryAfter != null) { - return Math.min(retryAfter * 1000, MAX_DELAY_MS) - } - return Math.min(BASE_DELAY_MS * 2 ** (attempt - 1), MAX_DELAY_MS) -} - -const sleep = (ms: number): Promise => - new Promise((resolve) => setTimeout(resolve, ms)) - -const fetchWithRetry = async ( - url: string, - fetchFn: (url: string) => Promise, -): Promise<{ response: Response | null; hadError: boolean }> => { - for (let attempt = 1; attempt <= MAX_RETRIES; attempt += 1) { - const response = await fetchFn(url) - if (response.ok) return { response, hadError: false } - if (!isRetryStatus(response.status)) { - return { response, hadError: true } - } - if (attempt === MAX_RETRIES) { - return { response, hadError: true } - } - await sleep(computeDelayMs(attempt, response)) - } - - return { response: null, hadError: true } -} - -export const fetchUpstreamData = async ( - repo: string, - branch: string, - paths: string[], - fetchFn: (url: string) => Promise, -): Promise => { - let hadError = false - const definitionKeys = new Set() - const contents: Record = {} - - const treeUrl = `https://api.github.com/repos/${repo}/git/trees/${branch}?recursive=1` - const treeResult = await fetchWithRetry(treeUrl, fetchFn) - if (!treeResult.response || !treeResult.response.ok) { - return { definitionKeys: [], contents: {}, treePaths: [], hadError: true } - } - const treeRaw = await treeResult.response.text() - const treePaths = parseTreePaths(treeRaw) - for (const path of treePaths) { - const key = toDefinitionKey(path) - if (key != null) definitionKeys.add(key) - } - - for (const path of paths) { - const contentUrl = `https://api.github.com/repos/${repo}/contents/${path}?ref=${branch}` - const result = await fetchWithRetry(contentUrl, fetchFn) - if (!result.response || !result.response.ok) { - if (result.response?.status !== 404) { - hadError = true - } - continue - } - const payload: unknown = await result.response.json() - if (!isObject(payload) || !isString(payload.content)) { - hadError = true - continue - } - const decoded = Buffer.from(payload.content, 'base64').toString('utf8') - contents[path] = decoded - } - - return { - definitionKeys: Array.from(definitionKeys).sort(), - contents, - treePaths, - hadError, - } -} - -export const computeCheckSummary = ({ - manifest, - upstreamDefinitionKeys, - upstreamContents, - treePaths, - converterVersion, -}: CheckInputs): CheckSummary => { - const hashChanges: string[] = [] - const newUpstream: string[] = [] - const deletions: string[] = [] - const skipped: string[] = [] - const errors: string[] = [] - - const manifestKeys = Object.keys(manifest.definitions) - const upstreamSet = new Set(upstreamDefinitionKeys) - - for (const key of upstreamDefinitionKeys) { - if (!manifest.definitions[key]) { - newUpstream.push(key) - } - } - - for (const key of manifestKeys) { - if (!upstreamSet.has(key)) { - deletions.push(key) - continue - } - - if (hasWildcardOverride(manifest, key)) { - skipped.push(key) - continue - } - - const entry = manifest.definitions[key] - const currentHash = entry.upstream_content_hash ?? '' - const nextHash = computeEntryHash(entry, upstreamContents, errors) - if (!nextHash) { - continue - } - - if (nextHash !== currentHash) { - hashChanges.push(key) - } - } - - const newUpstreamFiles = collectNewUpstreamFiles(treePaths, newUpstream) - - return { - hashChanges, - newUpstream, - newUpstreamFiles, - deletions, - skipped, - errors, - converterVersionChanged: - manifest.converter_version !== undefined && - manifest.converter_version !== converterVersion, - } -} - -export const hasChanges = (summary: CheckSummary): boolean => { - return ( - summary.hashChanges.length > 0 || - summary.newUpstream.length > 0 || - summary.deletions.length > 0 || - summary.converterVersionChanged - ) -} - -export const getExitCode = ( - summary: CheckSummary, - hadError: boolean, -): number => { - if (hadError || summary.errors.length > 0) return 2 - return hasChanges(summary) ? 1 : 0 -} - -/** - * Creates an authenticated fetch wrapper for GitHub API requests. - * - * Returns a fetch function that automatically includes GitHub authentication headers - * when provided a token. Falls back to unauthenticated fetch for empty/missing tokens. - * This increases API rate limits from 60 to 5000 requests/hour during CI runs. - * - * @param token - GitHub authentication token (PAT or fine-grained token) - * @returns A fetch function with authentication headers, or raw fetch if no token - */ -export const createAuthenticatedFetch = ( - token: string | undefined, -): ((url: string) => Promise) => { - if (!token) return fetch - return (url: string) => - fetch(url, { - headers: { - Authorization: `Bearer ${token}`, - Accept: 'application/vnd.github.v3+json', - }, - }) -} - -const main = (): void => { - const manifest = readManifest(MANIFEST_PATH) - if (!manifest) { - process.exit(2) - } - - const source = manifest.sources.cep - if (!source) { - process.exit(2) - } - - const run = async (): Promise => { - const requiredPaths = getRequiredUpstreamContentPaths({ - manifest, - upstreamDefinitionKeys: Object.keys(manifest.definitions), - }) - const fetchFn = createAuthenticatedFetch(process.env.GITHUB_TOKEN) - const fetchResult = await fetchUpstreamData( - source.repo, - source.branch, - requiredPaths, - fetchFn, - ) - - const summary = computeCheckSummary({ - manifest, - upstreamDefinitionKeys: fetchResult.definitionKeys, - upstreamContents: fetchResult.contents, - treePaths: fetchResult.treePaths, - converterVersion: CONVERTER_VERSION, - }) - - console.log(JSON.stringify(summary, null, 2)) - process.exit(getExitCode(summary, fetchResult.hadError)) - } - - run().catch((error: unknown) => { - console.error('check-cep-upstream failed:', error) - process.exit(2) - }) -} - -if (import.meta.main) { - main() -} diff --git a/tests/unit/check-cep-upstream.test.ts b/tests/unit/check-cep-upstream.test.ts deleted file mode 100644 index 9651d87..0000000 --- a/tests/unit/check-cep-upstream.test.ts +++ /dev/null @@ -1,753 +0,0 @@ -import { describe, expect, it } from 'bun:test' -import { createHash } from 'node:crypto' -import { - collectNewUpstreamFiles, - computeCheckSummary, - createAuthenticatedFetch, - fetchUpstreamData, - getExitCode, - getRequiredUpstreamContentPaths, - hasChanges, - toDefinitionKey, -} from '../../scripts/check-cep-upstream.ts' -import type { SyncManifest } from '../../src/lib/manifest.ts' - -const hash = (content: string): string => - createHash('sha256').update(content).digest('hex') - -const baseManifest = (): SyncManifest => ({ - converter_version: 2, - sources: { - cep: { - repo: 'EveryInc/compound-engineering-plugin', - branch: 'main', - url: 'https://github.com/EveryInc/compound-engineering-plugin', - }, - }, - definitions: { - 'agents/review/security-sentinel': { - source: 'cep', - upstream_path: - 'plugins/compound-engineering/agents/review/security-sentinel.md', - upstream_commit: 'abc123', - synced_at: '2026-02-15T00:00:00Z', - notes: 'test', - upstream_content_hash: hash('agent'), - }, - }, -}) - -describe('check-cep-upstream helpers', () => { - it('maps upstream paths to manifest definition keys', () => { - expect( - toDefinitionKey( - 'plugins/compound-engineering/agents/review/security-sentinel.md', - ), - ).toBe('agents/review/security-sentinel') - expect( - toDefinitionKey( - 'plugins/compound-engineering/commands/workflows/plan.md', - ), - ).toBe('commands/workflows/plan') - expect( - toDefinitionKey( - 'plugins/compound-engineering/skills/agent-native-architecture/SKILL.md', - ), - ).toBe('skills/agent-native-architecture') - expect( - toDefinitionKey( - 'plugins/compound-engineering/skills/agent-native-architecture/references/one.md', - ), - ).toBeNull() - }) - - it('collects upstream content paths for tracked definitions', () => { - const manifest = baseManifest() - manifest.definitions['skills/agent-native-architecture'] = { - source: 'cep', - upstream_path: - 'plugins/compound-engineering/skills/agent-native-architecture', - upstream_commit: 'abc123', - synced_at: '2026-02-15T00:00:00Z', - notes: 'test', - files: ['SKILL.md', 'references/one.md'], - upstream_content_hash: hash('ab'), - } - - const required = getRequiredUpstreamContentPaths({ - manifest, - upstreamDefinitionKeys: [ - 'agents/review/security-sentinel', - 'skills/agent-native-architecture', - ], - }) - - expect(required).toEqual([ - 'plugins/compound-engineering/agents/review/security-sentinel.md', - 'plugins/compound-engineering/skills/agent-native-architecture/SKILL.md', - 'plugins/compound-engineering/skills/agent-native-architecture/references/one.md', - ]) - }) - it('returns no changes when hashes and converter version match', () => { - const manifest = baseManifest() - const upstreamContents = { - 'plugins/compound-engineering/agents/review/security-sentinel.md': - 'agent', - } - - const summary = computeCheckSummary({ - manifest, - upstreamDefinitionKeys: ['agents/review/security-sentinel'], - upstreamContents, - treePaths: [], - converterVersion: 2, - }) - - expect(hasChanges(summary)).toBe(false) - expect(summary.errors).toEqual([]) - expect(getExitCode(summary, false)).toBe(0) - }) - - it('reports hash changes when upstream content differs', () => { - const manifest = baseManifest() - const upstreamContents = { - 'plugins/compound-engineering/agents/review/security-sentinel.md': - 'changed', - } - - const summary = computeCheckSummary({ - manifest, - upstreamDefinitionKeys: ['agents/review/security-sentinel'], - upstreamContents, - treePaths: [], - converterVersion: 2, - }) - - expect(summary.hashChanges).toEqual(['agents/review/security-sentinel']) - expect(summary.errors).toEqual([]) - expect(hasChanges(summary)).toBe(true) - expect(getExitCode(summary, false)).toBe(1) - }) - - it('reports converter version change', () => { - const manifest = baseManifest() - manifest.converter_version = 1 - const upstreamContents = { - 'plugins/compound-engineering/agents/review/security-sentinel.md': - 'agent', - } - - const summary = computeCheckSummary({ - manifest, - upstreamDefinitionKeys: ['agents/review/security-sentinel'], - upstreamContents, - treePaths: [], - converterVersion: 2, - }) - - expect(summary.converterVersionChanged).toBe(true) - expect(summary.errors).toEqual([]) - expect(hasChanges(summary)).toBe(true) - }) - - it('reports new upstream definitions and deletions', () => { - const manifest = baseManifest() - const upstreamContents = { - 'plugins/compound-engineering/agents/review/security-sentinel.md': - 'agent', - } - - const summary = computeCheckSummary({ - manifest, - upstreamDefinitionKeys: ['skills/new-skill'], - upstreamContents, - treePaths: [ - 'plugins/compound-engineering/skills/new-skill/SKILL.md', - 'plugins/compound-engineering/skills/new-skill/references/guide.md', - ], - converterVersion: 2, - }) - - expect(summary.newUpstream).toEqual(['skills/new-skill']) - expect(summary.newUpstreamFiles).toEqual({ - 'skills/new-skill': ['SKILL.md', 'references/guide.md'], - }) - expect(summary.deletions).toEqual(['agents/review/security-sentinel']) - expect(summary.errors).toEqual([]) - expect(hasChanges(summary)).toBe(true) - }) - - it('handles multi-file skills hashing', () => { - const manifest: SyncManifest = { - converter_version: 2, - sources: { - cep: { - repo: 'EveryInc/compound-engineering-plugin', - branch: 'main', - url: 'https://github.com/EveryInc/compound-engineering-plugin', - }, - }, - definitions: { - 'skills/agent-native-architecture': { - source: 'cep', - upstream_path: - 'plugins/compound-engineering/skills/agent-native-architecture', - upstream_commit: 'abc123', - synced_at: '2026-02-15T00:00:00Z', - notes: 'test', - files: ['SKILL.md', 'references/one.md'], - upstream_content_hash: hash(`a\0b`), - }, - }, - } - - const upstreamContents = { - 'plugins/compound-engineering/skills/agent-native-architecture/SKILL.md': - 'a', - 'plugins/compound-engineering/skills/agent-native-architecture/references/one.md': - 'c', - } - - const summary = computeCheckSummary({ - manifest, - upstreamDefinitionKeys: ['skills/agent-native-architecture'], - upstreamContents, - treePaths: [], - converterVersion: 2, - }) - - expect(summary.hashChanges).toEqual(['skills/agent-native-architecture']) - expect(summary.errors).toEqual([]) - }) - - it('reports no change for multi-file skill with matching content', () => { - const manifest: SyncManifest = { - converter_version: 2, - sources: { - cep: { - repo: 'EveryInc/compound-engineering-plugin', - branch: 'main', - url: 'https://github.com/EveryInc/compound-engineering-plugin', - }, - }, - definitions: { - 'skills/agent-native-architecture': { - source: 'cep', - upstream_path: - 'plugins/compound-engineering/skills/agent-native-architecture', - upstream_commit: 'abc123', - synced_at: '2026-02-15T00:00:00Z', - notes: 'test', - files: ['SKILL.md', 'references/one.md'], - upstream_content_hash: hash(`a\0b`), - }, - }, - } - - const upstreamContents = { - 'plugins/compound-engineering/skills/agent-native-architecture/SKILL.md': - 'a', - 'plugins/compound-engineering/skills/agent-native-architecture/references/one.md': - 'b', - } - - const summary = computeCheckSummary({ - manifest, - upstreamDefinitionKeys: ['skills/agent-native-architecture'], - upstreamContents, - treePaths: [], - converterVersion: 2, - }) - - expect(summary.hashChanges).toEqual([]) - expect(summary.errors).toEqual([]) - expect(hasChanges(summary)).toBe(false) - }) - - it('skips definitions with wildcard manual_overrides', () => { - const manifest = baseManifest() - manifest.definitions['agents/review/security-sentinel'].manual_overrides = [ - { - field: '*', - reason: 'Local ownership', - overridden_at: '2026-02-15T00:00:00Z', - }, - ] - - const upstreamContents = { - 'plugins/compound-engineering/agents/review/security-sentinel.md': - 'changed', - } - - const summary = computeCheckSummary({ - manifest, - upstreamDefinitionKeys: ['agents/review/security-sentinel'], - upstreamContents, - treePaths: [], - converterVersion: 2, - }) - - expect(summary.skipped).toEqual(['agents/review/security-sentinel']) - expect(summary.errors).toEqual([]) - expect(summary.hashChanges).toEqual([]) - }) - - it('returns error exit code when error flag is set', () => { - const summary = { - hashChanges: [], - newUpstream: [], - newUpstreamFiles: {}, - deletions: [], - converterVersionChanged: false, - skipped: [], - errors: [], - } - - expect(getExitCode(summary, true)).toBe(2) - }) - - it('flags missing multi-file contents as errors', () => { - const manifest: SyncManifest = { - converter_version: 2, - sources: { - cep: { - repo: 'EveryInc/compound-engineering-plugin', - branch: 'main', - url: 'https://github.com/EveryInc/compound-engineering-plugin', - }, - }, - definitions: { - 'skills/agent-native-architecture': { - source: 'cep', - upstream_path: - 'plugins/compound-engineering/skills/agent-native-architecture', - upstream_commit: 'abc123', - synced_at: '2026-02-15T00:00:00Z', - notes: 'test', - files: ['SKILL.md', 'references/one.md'], - upstream_content_hash: hash('a' + 'b'), - }, - }, - } - - const upstreamContents = { - 'plugins/compound-engineering/skills/agent-native-architecture/SKILL.md': - 'a', - } - - const summary = computeCheckSummary({ - manifest, - upstreamDefinitionKeys: ['skills/agent-native-architecture'], - upstreamContents, - treePaths: [], - converterVersion: 2, - }) - - expect(summary.hashChanges).toEqual([]) - expect(summary.errors).toEqual([ - 'Missing upstream content for sub-file (may be a transient fetch failure or the file was removed upstream): plugins/compound-engineering/skills/agent-native-architecture/references/one.md', - ]) - expect(getExitCode(summary, false)).toBe(2) - }) - - it('fetches upstream data using tree and content endpoints', async () => { - const responses = new Map() - const repo = 'EveryInc/compound-engineering-plugin' - const branch = 'main' - - responses.set( - `https://api.github.com/repos/${repo}/git/trees/${branch}?recursive=1`, - new Response( - JSON.stringify({ - tree: [ - { - path: 'plugins/compound-engineering/agents/review/security-sentinel.md', - type: 'blob', - }, - ], - }), - { status: 200 }, - ), - ) - - responses.set( - `https://api.github.com/repos/${repo}/contents/plugins/compound-engineering/agents/review/security-sentinel.md?ref=${branch}`, - new Response( - JSON.stringify({ content: Buffer.from('agent').toString('base64') }), - { status: 200 }, - ), - ) - - const fetchFn = async (url: string): Promise => { - const response = responses.get(url) - if (!response) return new Response('missing', { status: 404 }) - return response - } - - const result = await fetchUpstreamData( - repo, - branch, - ['plugins/compound-engineering/agents/review/security-sentinel.md'], - fetchFn, - ) - - expect(result.hadError).toBe(false) - expect(result.definitionKeys).toEqual(['agents/review/security-sentinel']) - expect(result.treePaths).toEqual([ - 'plugins/compound-engineering/agents/review/security-sentinel.md', - ]) - expect(result.contents).toEqual({ - 'plugins/compound-engineering/agents/review/security-sentinel.md': - 'agent', - }) - }) - - it('retries content fetch on 429 and succeeds', async () => { - const repo = 'EveryInc/compound-engineering-plugin' - const branch = 'main' - const contentPath = - 'plugins/compound-engineering/agents/review/security-sentinel.md' - const responses = new Map() - - responses.set( - `https://api.github.com/repos/${repo}/git/trees/${branch}?recursive=1`, - new Response( - JSON.stringify({ - tree: [ - { - path: contentPath, - type: 'blob', - }, - ], - }), - { status: 200 }, - ), - ) - - let contentCalls = 0 - const fetchFn = async (url: string): Promise => { - if ( - url === - `https://api.github.com/repos/${repo}/contents/${contentPath}?ref=${branch}` - ) { - contentCalls += 1 - if (contentCalls < 2) { - return new Response('rate limited', { status: 429 }) - } - return new Response( - JSON.stringify({ content: Buffer.from('agent').toString('base64') }), - { status: 200 }, - ) - } - return responses.get(url) ?? new Response('missing', { status: 404 }) - } - - const result = await fetchUpstreamData(repo, branch, [contentPath], fetchFn) - - expect(contentCalls).toBe(2) - expect(result.hadError).toBe(false) - expect(result.contents[contentPath]).toBe('agent') - }) - - it('retries tree fetch on 403 and succeeds', async () => { - const repo = 'EveryInc/compound-engineering-plugin' - const branch = 'main' - const contentPath = - 'plugins/compound-engineering/agents/review/security-sentinel.md' - - let treeCalls = 0 - const fetchFn = async (url: string): Promise => { - if ( - url === - `https://api.github.com/repos/${repo}/git/trees/${branch}?recursive=1` - ) { - treeCalls += 1 - if (treeCalls < 2) { - return new Response('forbidden', { status: 403 }) - } - return new Response( - JSON.stringify({ - tree: [ - { - path: contentPath, - type: 'blob', - }, - ], - }), - { status: 200 }, - ) - } - if ( - url === - `https://api.github.com/repos/${repo}/contents/${contentPath}?ref=${branch}` - ) { - return new Response( - JSON.stringify({ content: Buffer.from('agent').toString('base64') }), - { status: 200 }, - ) - } - return new Response('missing', { status: 404 }) - } - - const result = await fetchUpstreamData(repo, branch, [contentPath], fetchFn) - - expect(treeCalls).toBe(2) - expect(result.hadError).toBe(false) - expect(result.definitionKeys).toEqual(['agents/review/security-sentinel']) - }) - - it('returns hadError when retries are exhausted', async () => { - const repo = 'EveryInc/compound-engineering-plugin' - const branch = 'main' - const contentPath = - 'plugins/compound-engineering/agents/review/security-sentinel.md' - - const fetchFn = async (url: string): Promise => { - if ( - url === - `https://api.github.com/repos/${repo}/git/trees/${branch}?recursive=1` - ) { - return new Response('rate limited', { status: 429 }) - } - return new Response('missing', { status: 404 }) - } - - const result = await fetchUpstreamData(repo, branch, [contentPath], fetchFn) - - expect(result.hadError).toBe(true) - }) - - it('does not set hadError for 404 content responses', async () => { - const repo = 'EveryInc/compound-engineering-plugin' - const branch = 'main' - const contentPath = - 'plugins/compound-engineering/agents/review/security-sentinel.md' - - const fetchFn = async (url: string): Promise => { - if ( - url === - `https://api.github.com/repos/${repo}/git/trees/${branch}?recursive=1` - ) { - return new Response( - JSON.stringify({ - tree: [ - { - path: contentPath, - type: 'blob', - }, - ], - }), - { status: 200 }, - ) - } - return new Response('not found', { status: 404 }) - } - - const result = await fetchUpstreamData(repo, branch, [contentPath], fetchFn) - - expect(result.hadError).toBe(false) - expect(result.contents).toEqual({}) - expect(result.definitionKeys).toEqual(['agents/review/security-sentinel']) - }) - - it('sets hadError for 500 content responses', async () => { - const repo = 'EveryInc/compound-engineering-plugin' - const branch = 'main' - const contentPath = - 'plugins/compound-engineering/agents/review/security-sentinel.md' - - const fetchFn = async (url: string): Promise => { - if ( - url === - `https://api.github.com/repos/${repo}/git/trees/${branch}?recursive=1` - ) { - return new Response( - JSON.stringify({ - tree: [ - { - path: contentPath, - type: 'blob', - }, - ], - }), - { status: 200 }, - ) - } - return new Response('server error', { status: 500 }) - } - - const result = await fetchUpstreamData(repo, branch, [contentPath], fetchFn) - - expect(result.hadError).toBe(true) - expect(result.contents).toEqual({}) - }) - - it('returns treePaths from fetchUpstreamData', async () => { - const repo = 'EveryInc/compound-engineering-plugin' - const branch = 'main' - - const fetchFn = async (url: string): Promise => { - if (url.includes('/git/trees/')) { - return new Response( - JSON.stringify({ - tree: [ - { - path: 'plugins/compound-engineering/skills/my-skill/SKILL.md', - type: 'blob', - }, - { - path: 'plugins/compound-engineering/skills/my-skill/references/guide.md', - type: 'blob', - }, - { - path: 'plugins/compound-engineering/skills/my-skill/scripts', - type: 'tree', - }, - ], - }), - { status: 200 }, - ) - } - return new Response('not found', { status: 404 }) - } - - const result = await fetchUpstreamData(repo, branch, [], fetchFn) - - expect(result.treePaths).toEqual([ - 'plugins/compound-engineering/skills/my-skill/SKILL.md', - 'plugins/compound-engineering/skills/my-skill/references/guide.md', - ]) - }) - - it('collects files for new skill definitions from tree paths', () => { - const treePaths = [ - 'plugins/compound-engineering/skills/new-skill/SKILL.md', - 'plugins/compound-engineering/skills/new-skill/references/guide.md', - 'plugins/compound-engineering/skills/new-skill/scripts/setup.sh', - 'plugins/compound-engineering/skills/existing-skill/SKILL.md', - 'plugins/compound-engineering/agents/review/some-agent.md', - ] - - const result = collectNewUpstreamFiles(treePaths, ['skills/new-skill']) - - expect(result).toEqual({ - 'skills/new-skill': [ - 'SKILL.md', - 'references/guide.md', - 'scripts/setup.sh', - ], - }) - }) - - it('collects single file for new agent definitions', () => { - const treePaths = [ - 'plugins/compound-engineering/agents/review/new-agent.md', - 'plugins/compound-engineering/agents/review/other-agent.md', - ] - - const result = collectNewUpstreamFiles(treePaths, [ - 'agents/review/new-agent', - ]) - - expect(result).toEqual({ - 'agents/review/new-agent': ['new-agent.md'], - }) - }) - - it('returns empty files for keys not found in tree', () => { - const treePaths = ['plugins/compound-engineering/agents/review/existing.md'] - - const result = collectNewUpstreamFiles(treePaths, ['skills/ghost-skill']) - - expect(result).toEqual({}) - }) - - it('collects files for multiple new definitions at once', () => { - const treePaths = [ - 'plugins/compound-engineering/skills/skill-a/SKILL.md', - 'plugins/compound-engineering/skills/skill-a/references/ref.md', - 'plugins/compound-engineering/skills/skill-b/SKILL.md', - 'plugins/compound-engineering/commands/workflows/new-cmd.md', - ] - - const result = collectNewUpstreamFiles(treePaths, [ - 'skills/skill-a', - 'skills/skill-b', - 'commands/workflows/new-cmd', - ]) - - expect(result).toEqual({ - 'skills/skill-a': ['SKILL.md', 'references/ref.md'], - 'skills/skill-b': ['SKILL.md'], - 'commands/workflows/new-cmd': ['new-cmd.md'], - }) - }) - - it('returns raw fetch when no token is provided', () => { - const fetchFn = createAuthenticatedFetch(undefined) - expect(fetchFn).toBe(fetch) - }) - - it('returns raw fetch when token is empty string', () => { - const fetchFn = createAuthenticatedFetch('') - expect(fetchFn).toBe(fetch) - }) - - it('returns authenticated fetch wrapper when token is provided', async () => { - const fetchFn = createAuthenticatedFetch('ghp_test123') - expect(fetchFn).not.toBe(fetch) - - let capturedHeaders: Headers | undefined - const originalFetch = globalThis.fetch - const mockFetch = async (_input: RequestInfo | URL, init?: RequestInit) => { - capturedHeaders = new Headers(init?.headers) - return new Response('ok', { status: 200 }) - } - globalThis.fetch = Object.assign(mockFetch, { - preconnect: originalFetch.preconnect, - }) as typeof fetch - - try { - await fetchFn('https://api.github.com/test') - expect(capturedHeaders?.get('Authorization')).toBe('Bearer ghp_test123') - expect(capturedHeaders?.get('Accept')).toBe( - 'application/vnd.github.v3+json', - ) - } finally { - globalThis.fetch = originalFetch - } - }) - - it('includes newUpstreamFiles in computeCheckSummary for new skills', () => { - const manifest = baseManifest() - const treePaths = [ - 'plugins/compound-engineering/skills/new-multi-skill/SKILL.md', - 'plugins/compound-engineering/skills/new-multi-skill/references/api.md', - 'plugins/compound-engineering/skills/new-multi-skill/scripts/init.sh', - 'plugins/compound-engineering/agents/review/security-sentinel.md', - ] - - const summary = computeCheckSummary({ - manifest, - upstreamDefinitionKeys: [ - 'agents/review/security-sentinel', - 'skills/new-multi-skill', - ], - upstreamContents: { - 'plugins/compound-engineering/agents/review/security-sentinel.md': - 'agent', - }, - treePaths, - converterVersion: 2, - }) - - expect(summary.newUpstream).toEqual(['skills/new-multi-skill']) - expect(summary.newUpstreamFiles).toEqual({ - 'skills/new-multi-skill': [ - 'SKILL.md', - 'references/api.md', - 'scripts/init.sh', - ], - }) - }) -}) From 42a53572b5242cb5d3f445f6906d1fbe67304f8a Mon Sep 17 00:00:00 2001 From: "Marcus R. Brown" Date: Thu, 26 Mar 2026 15:10:35 -0700 Subject: [PATCH 2/2] fix: remove orphaned sync-cep tests and dead helpers Address PR review feedback: - Delete sync-cep workflow simulation describe block (11 tests) - Delete convert-cc-defs skill discoverability describe block (3 tests) - Remove dead helpers: buildSyncCepTestConfig, buildSyncPrompt, shouldRunSync, PrecheckSummary type - Remove unused imports: extractCommandFrontmatter, parseFrontmatter - Fix stale compound-engineering reference in orchestrating-swarms Verification: - Build: exit 0 - Typecheck: exit 0 - Lint: 54 files, no fixes - Unit tests: 305/305 pass (12 files) - Integration tests: 19/19 pass (2 files) --- skills/orchestrating-swarms/SKILL.md | 2 +- tests/integration/opencode.test.ts | 182 --------------------------- 2 files changed, 1 insertion(+), 183 deletions(-) diff --git a/skills/orchestrating-swarms/SKILL.md b/skills/orchestrating-swarms/SKILL.md index 39764a9..8528987 100644 --- a/skills/orchestrating-swarms/SKILL.md +++ b/skills/orchestrating-swarms/SKILL.md @@ -314,7 +314,7 @@ task({ ## Plugin Agent Types -From the `compound-engineering` plugin (examples): +From the Systematic plugin (examples): ### Review Agents ```javascript diff --git a/tests/integration/opencode.test.ts b/tests/integration/opencode.test.ts index d43a2e1..f05b3df 100644 --- a/tests/integration/opencode.test.ts +++ b/tests/integration/opencode.test.ts @@ -3,9 +3,7 @@ import fs from 'node:fs' import os from 'node:os' import path from 'node:path' import type { Config } from '@opencode-ai/sdk' -import { extractCommandFrontmatter } from '../../src/lib/commands.ts' import { createConfigHandler } from '../../src/lib/config-handler.ts' -import { parseFrontmatter } from '../../src/lib/frontmatter.ts' const OPENCODE_AVAILABLE = (() => { const result = Bun.spawnSync(['which', 'opencode']) @@ -19,14 +17,6 @@ const OPENCODE_TEST_MODEL = 'opencode/big-pickle' const REPO_ROOT = path.resolve(import.meta.dirname, '../..') -interface PrecheckSummary { - hashChanges: string[] - newUpstream: string[] - deletions: string[] - skipped: string[] - converterVersionChanged: boolean -} - interface OpencodeResult { stdout: string stderr: string @@ -45,59 +35,6 @@ function buildOpencodeConfig(): string { }) } -function buildSyncCepTestConfig(): string { - const commandPath = path.join(REPO_ROOT, '.opencode/commands/sync-cep.md') - const content = fs.readFileSync(commandPath, 'utf8') - const { body } = parseFrontmatter(content) - const frontmatter = extractCommandFrontmatter(content) - - return JSON.stringify({ - command: { - 'sync-cep': { - template: body.trim(), - description: frontmatter.description, - agent: frontmatter.agent, - model: frontmatter.model, - subtask: frontmatter.subtask, - }, - }, - agent: { - build: { - permission: { - edit: 'deny', - bash: 'deny', - }, - }, - }, - }) -} - -function buildSyncPrompt( - summary: PrecheckSummary, - scope: string, - dryRun: boolean, - exitCode: number = 1, -): string { - const dryRunFlag = dryRun ? '--dry-run' : '' - const dryRunNotice = dryRun - ? 'DRY-RUN MODE: Do not call any tools or external commands.' - : '' - return `/sync-cep ${scope} ${dryRunFlag} -${dryRunNotice} - -${exitCode} - - -${JSON.stringify(summary)} - - -Note: headless CI run — user will not see live output.` -} - -function shouldRunSync(exitCode: number): boolean { - return exitCode !== 0 && exitCode !== -1 -} - async function runOpencode( prompt: string, options: RunOpencodeOptions, @@ -214,85 +151,6 @@ describe.skipIf(!OPENCODE_AVAILABLE)('opencode integration', () => { ) }) -describe('sync-cep workflow simulation', () => { - const fixtures = [ - { - name: 'hash-change', - summary: { - hashChanges: ['skills/brainstorming'], - newUpstream: [], - deletions: [], - skipped: [], - converterVersionChanged: false, - }, - }, - { - name: 'report-only', - summary: { - hashChanges: [], - newUpstream: ['skills/new-skill'], - deletions: ['agents/review/security-sentinel'], - skipped: [], - converterVersionChanged: false, - }, - }, - { - name: 'converter-version', - summary: { - hashChanges: [], - newUpstream: [], - deletions: [], - skipped: [], - converterVersionChanged: true, - }, - }, - ] - - test.each(fixtures)('builds sync prompt for $name', ({ summary }) => { - const prompt = buildSyncPrompt(summary, 'all', true) - expect(prompt).toContain(JSON.stringify(summary)) - expect(prompt).toContain('/sync-cep all --dry-run') - expect(prompt).toContain('1') - expect(prompt).toContain('headless CI') - }) - - test('builds sync prompt with exit code 2 and errors', () => { - const summary: PrecheckSummary = { - hashChanges: ['skills/brainstorming'], - newUpstream: [], - deletions: [], - skipped: [], - converterVersionChanged: false, - } - const prompt = buildSyncPrompt(summary, 'all', true, 2) - expect(prompt).toContain('2') - expect(prompt).toContain(JSON.stringify(summary)) - expect(prompt).toContain('/sync-cep all --dry-run') - }) - - test('sync gate honors precheck exit codes', () => { - expect(shouldRunSync(0)).toBe(false) - expect(shouldRunSync(1)).toBe(true) - expect(shouldRunSync(2)).toBe(true) - expect(shouldRunSync(-1)).toBe(false) - }) - - test.skipIf(!OPENCODE_AVAILABLE)( - 'runs sync-cep command with dry-run prompt', - async () => { - const prompt = buildSyncPrompt(fixtures[0].summary, 'all', true) - const result = await runOpencode(prompt, { - cwd: REPO_ROOT, - configContent: buildSyncCepTestConfig(), - }) - - expect(result.exitCode).not.toBe(-1) - expect(result.stdout).not.toMatch(/\n\s*[→$⚙]/) - }, - TIMEOUT_MS * MAX_RETRIES, - ) -}) - describe('config handler integration', () => { let testEnv: { tempDir: string @@ -416,43 +274,3 @@ describe('opencode availability check', () => { expect(true).toBe(true) }) }) - -describe('convert-cc-defs skill discoverability', () => { - test('SKILL.md exists and is readable', () => { - const skillPath = path.join( - REPO_ROOT, - '.opencode/skills/convert-cc-defs/SKILL.md', - ) - expect(fs.existsSync(skillPath)).toBe(true) - - const content = fs.readFileSync(skillPath, 'utf8') - expect(content.length).toBeGreaterThan(0) - }) - - test('SKILL.md has valid frontmatter with name: convert-cc-defs', () => { - const skillPath = path.join( - REPO_ROOT, - '.opencode/skills/convert-cc-defs/SKILL.md', - ) - const content = fs.readFileSync(skillPath, 'utf8') - - const result = parseFrontmatter(content) - expect(result.hadFrontmatter).toBe(true) - expect(result.parseError).toBe(false) - expect((result.data as Record).name).toBe( - 'convert-cc-defs', - ) - }) - - test('SKILL.md contains Phase 2, Phase 3, and Phase 4 section headings', () => { - const skillPath = path.join( - REPO_ROOT, - '.opencode/skills/convert-cc-defs/SKILL.md', - ) - const content = fs.readFileSync(skillPath, 'utf8') - - expect(content).toContain('## Phase 2: Mechanical Conversion') - expect(content).toContain('## Phase 3: Intelligent Rewrite') - expect(content).toContain('## Phase 4: Write and Register') - }) -})