diff --git a/.gitignore b/.gitignore index 6a57e75..086309e 100644 --- a/.gitignore +++ b/.gitignore @@ -2,8 +2,10 @@ node_modules/ bun.lockb pnpm-lock.yaml dist/ +coverage/ .worktrees/ docs/plans/ +docs/progress/ docs/research/ .DS_Store .history/ diff --git a/CHANGELOG.md b/CHANGELOG.md index c583dd8..b72264f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,20 @@ All notable changes to this project are documented here. Dates use the ISO format (YYYY-MM-DD). +## [Unreleased] + +### Added +- **Dynamic model discovery**: authoritative `/backend-api/codex/models` catalog with per-account cache and strict allowlist. +- **Personality caching**: seeds Friendly/Pragmatic defaults from runtime model metadata when available. + +### Changed +- **Logging safety**: request logs redact `prompt_cache_key` when request logging is enabled. +- **Catalog cache hygiene**: invalid `codex-models-cache-.json` files are deleted on read. +- **Config surface**: removed legacy `codexMode` flag (no longer supported). + +### Docs +- Refresh configuration, architecture, and troubleshooting to match hard-stop and catalog behavior. + ## [4.6.0] - 2026-02-04 **Quarantine + Multi-Account Reliability release**: safer storage handling, clearer recovery, and diff --git a/README.md b/README.md index 4cf03c5..6dd5951 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ ![Image 1: opencode-openai-codex-auth](assets/readme-hero.svg) -**This project is now EOL and no further developments will be made. A complete rewrite, based on the current native implementation of OpenAI's OAuth in Opencode, is now underway and will be available at [https://github.com/iam-brain/opencode-openai-multi](https://github.com/iam-brain/opencode-openai-multi) when complete.** +**Maintenance fork:** This project continues to receive hardening and compatibility updates while a full rewrite (based on OpenCode's native OAuth) is underway at [https://github.com/iam-brain/opencode-openai-multi](https://github.com/iam-brain/opencode-openai-multi). Fork maintained by [iam-brain](https://github.com/iam-brain). @@ -33,12 +33,12 @@ npx -y opencode-openai-codex-multi-auth@latest Then: ```bash opencode auth login -opencode run "write hello world to test.txt" --model=openai/gpt-5.2 --variant=medium +opencode run "write hello world to test.txt" --model=openai/gpt-5.3-codex --variant=medium ``` Legacy OpenCode (v1.0.209 and below): ```bash npx -y opencode-openai-codex-multi-auth@latest --legacy -opencode run "write hello world to test.txt" --model=openai/gpt-5.2-medium +opencode run "write hello world to test.txt" --model=openai/gpt-5.3-codex-medium ``` Uninstall: ```bash @@ -57,6 +57,7 @@ opencode auth login --- ## πŸ“¦ Models +- **gpt-5.3-codex** (low/medium/high/xhigh) - **gpt-5.2** (none/low/medium/high/xhigh) - **gpt-5.2-codex** (low/medium/high/xhigh) - **gpt-5.1-codex-max** (low/medium/high/xhigh) @@ -68,24 +69,22 @@ opencode auth login - Modern (OpenCode v1.0.210+): `config/opencode-modern.json` - Legacy (OpenCode v1.0.209 and below): `config/opencode-legacy.json` - Installer template source: latest GitHub release β†’ GitHub `main` β†’ bundled static template fallback -- Runtime model metadata source: Codex `/backend-api/codex/models` β†’ local cache β†’ GitHub `models.json` (release/main) β†’ static template defaults +- Runtime model metadata source: Codex `/backend-api/codex/models` β†’ per-account local cache (server-derived). Requests fail closed if the catalog is unavailable. Minimal configs are not supported for GPT‑5.x; use the full configs above. -Personality is supported for all current and future models via `options.personality`: +Personality is configured in `~/.config/opencode/openai-codex-auth-config.json` via `custom_settings`: ```json { - "provider": { - "openai": { - "options": { - "personality": "friendly" - }, - "models": { - "gpt-5.3-codex": { - "options": { - "personality": "pragmatic" - } + "custom_settings": { + "options": { + "personality": "Idiot" + }, + "models": { + "gpt-5.3-codex": { + "options": { + "personality": "pragmatic" } } } @@ -93,9 +92,13 @@ Personality is supported for all current and future models via `options.personal } ``` -Accepted values: `none`, `friendly`, `pragmatic` (case-insensitive). +Personality descriptions come from: +- Project-local `.opencode/Personalities/*.md` +- Global `~/.config/opencode/Personalities/*.md` -Legacy note: `codexMode` is deprecated and now a no-op. +The filename (case-insensitive) defines the key (e.g., `Idiot.md`), and the file contents are used verbatim. + +Built-ins: `none`, `default` (uses model runtime defaults), `friendly`, `pragmatic` (fallback if unset). Any other key requires a matching personality file. --- ## ⌨️ Slash Commands (TUI) In the OpenCode TUI, you can use these commands to manage your accounts and monitor usage: @@ -105,23 +108,30 @@ In the OpenCode TUI, you can use these commands to manage your accounts and moni | `/codex-status` | Shows current rate limits (5h/Weekly), credits, and account status (percent left). | | `/codex-switch-accounts ` | Switch the active account by its 1-based index from the status list. | | `/codex-toggle-account ` | Enable or disable an account by its 1-based index (prevents auto-selection). | +| `/codex-remove-account ` | Remove an account by its 1-based index. | --- ## βœ… Features - ChatGPT Plus/Pro OAuth authentication (official flow) -- 22 model presets across GPT‑5.2 / GPT‑5.2 Codex / GPT‑5.1 families +- Model presets across GPT‑5.3 Codex / GPT‑5.2 / GPT‑5.2 Codex / GPT‑5.1 families - Variant system support (v1.0.210+) + legacy presets - Multimodal input enabled for all models - Usage‑aware errors + automatic token refresh -- Online-first template/model metadata resolution with resilient fallbacks +- Authoritative model catalog validation (`/codex/models`) with per-account cache - Multi-account support with sticky selection + PID offset (great for parallel agents) - Account enable/disable management (via `opencode auth login` manage) +- Hard-stop safety loops for unavailable accounts and unsupported models - Strict account identity matching (`accountId` + `email` + `plan`) - Hybrid account selection strategy (health score + token bucket + LRU bias) - Optional round-robin account rotation (maximum throughput) - OpenCode TUI toasts + `codex-status` / `codex-switch-accounts` tools - **Authoritative Codex Status**: Real-time rate limit monitoring (5h/Weekly) with ASCII status bars --- +## πŸ›‘οΈ Safety & Reliability +- Hard-stop safety gate for all-accounts rate-limit/auth-failure loops +- Strict model allowlist from `/backend-api/codex/models` (per-account cached) +- Synthetic error responses that surface the exact failure reason +--- ## πŸ“š Docs - Getting Started: `docs/getting-started.md` - Configuration: `docs/configuration.md` diff --git a/assets/openai-codex-auth-config.schema.json b/assets/openai-codex-auth-config.schema.json index 8469e4f..72713a5 100644 --- a/assets/openai-codex-auth-config.schema.json +++ b/assets/openai-codex-auth-config.schema.json @@ -9,11 +9,48 @@ "type": "string", "description": "JSON schema reference for editor autocompletion" }, - "codexMode": { - "type": "boolean", - "default": false, - "deprecated": true, - "description": "Deprecated legacy field. Bridge mode has been removed and this flag is now a no-op." + "custom_settings": { + "type": "object", + "description": "Override provider options (including personality) without editing opencode.json.", + "properties": { + "options": { + "type": "object", + "description": "Global OpenAI provider option overrides.", + "properties": { + "personality": { + "type": "string", + "description": "Personality key (built-ins: none/default/friendly/pragmatic or a custom .md file name)." + } + }, + "additionalProperties": true + }, + "models": { + "type": "object", + "description": "Per-model overrides keyed by model id.", + "additionalProperties": { + "type": "object", + "properties": { + "options": { + "type": "object", + "properties": { + "personality": { + "type": "string", + "description": "Personality key override for this model." + } + }, + "additionalProperties": true + }, + "variants": { + "type": "object", + "description": "Per-variant overrides keyed by reasoning effort.", + "additionalProperties": true + } + }, + "additionalProperties": true + } + } + }, + "additionalProperties": true }, "accountSelectionStrategy": { "type": "string", @@ -121,6 +158,28 @@ "minimum": 0, "default": 1, "description": "Maximum number of all-accounts wait cycles." + }, + "hardStopMaxWaitMs": { + "type": "number", + "minimum": 0, + "default": 10000, + "description": "Maximum wait (ms) before returning a hard-stop error when no accounts are available." + }, + "hardStopOnUnknownModel": { + "type": "boolean", + "default": true, + "description": "Return a hard-stop error when the requested model is not in the server catalog." + }, + "hardStopOnAllAuthFailed": { + "type": "boolean", + "default": true, + "description": "Return a hard-stop error when all accounts are in auth-failure cooldown." + }, + "hardStopMaxConsecutiveFailures": { + "type": "number", + "minimum": 0, + "default": 5, + "description": "Maximum consecutive failures before returning a hard-stop error." } } } diff --git a/config/README.md b/config/README.md index 2c118cb..e4688db 100644 --- a/config/README.md +++ b/config/README.md @@ -35,7 +35,7 @@ OpenCode v1.0.210+ introduced a **variants system** that allows defining reasoni | `opencode-legacy.json` | 6 | Separate model entries | 20 individual model definitions | Both configs provide: -- βœ… All supported GPT 5.2/5.1 variants: gpt-5.2, gpt-5.2-codex, gpt-5.1, gpt-5.1-codex, gpt-5.1-codex-max, gpt-5.1-codex-mini +- βœ… All supported GPT 5.x variants: gpt-5.3-codex, gpt-5.2, gpt-5.2-codex, gpt-5.1, gpt-5.1-codex, gpt-5.1-codex-max, gpt-5.1-codex-mini - βœ… Proper reasoning effort settings for each variant (including `xhigh` for Codex Max/5.2) - βœ… Context limits (272k context / 128k output for all Codex families) - βœ… Required options: `store: false`, `include: ["reasoning.encrypted_content"]` @@ -68,12 +68,12 @@ Both configs provide: 3. **Run opencode**: ```bash # Modern config (v1.0.210+): - opencode run "task" --model=openai/gpt-5.2 --variant=medium - opencode run "task" --model=openai/gpt-5.2 --variant=high + opencode run "task" --model=openai/gpt-5.3-codex --variant=medium + opencode run "task" --model=openai/gpt-5.3-codex --variant=high # Legacy config: - opencode run "task" --model=openai/gpt-5.2-medium - opencode run "task" --model=openai/gpt-5.2-high + opencode run "task" --model=openai/gpt-5.3-codex-medium + opencode run "task" --model=openai/gpt-5.3-codex-high ``` > **⚠️ Important**: Use the config file appropriate for your OpenCode version. Using the modern config with an older OpenCode version (v1.0.209 or below) will not work correctly. @@ -84,6 +84,7 @@ Both configs provide: Both configs provide access to the same model families: +- **gpt-5.3-codex** (low/medium/high/xhigh) - Primary Codex model - **gpt-5.2** (none/low/medium/high/xhigh) - Latest GPT 5.2 model with full reasoning support - **gpt-5.2-codex** (low/medium/high/xhigh) - GPT 5.2 Codex presets - **gpt-5.1-codex-max** (low/medium/high/xhigh) - Codex Max presets @@ -91,7 +92,7 @@ Both configs provide access to the same model families: - **gpt-5.1-codex-mini** (medium/high) - Codex mini tier presets - **gpt-5.1** (none/low/medium/high) - General-purpose reasoning presets -All appear in the opencode model selector as "GPT 5.1 Codex Low (OAuth)", "GPT 5.1 High (OAuth)", etc. +All appear in the opencode model selector as "GPT 5.3 Codex Low (OAuth)", "GPT 5.3 High (OAuth)", etc. ## Configuration Options diff --git a/config/minimal-opencode.json b/config/minimal-opencode.json index 8f0e318..77e196d 100644 --- a/config/minimal-opencode.json +++ b/config/minimal-opencode.json @@ -8,5 +8,5 @@ } } }, - "model": "openai/gpt-5-codex" + "model": "openai/gpt-5.3-codex" } diff --git a/config/opencode-legacy.json b/config/opencode-legacy.json index b544302..58d560e 100644 --- a/config/opencode-legacy.json +++ b/config/opencode-legacy.json @@ -140,6 +140,106 @@ "store": false } }, + "gpt-5.3-codex-low": { + "name": "GPT 5.3 Codex Low (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "options": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium", + "include": [ + "reasoning.encrypted_content" + ], + "store": false + } + }, + "gpt-5.3-codex-medium": { + "name": "GPT 5.3 Codex Medium (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "options": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium", + "include": [ + "reasoning.encrypted_content" + ], + "store": false + } + }, + "gpt-5.3-codex-high": { + "name": "GPT 5.3 Codex High (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "options": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium", + "include": [ + "reasoning.encrypted_content" + ], + "store": false + } + }, + "gpt-5.3-codex-xhigh": { + "name": "GPT 5.3 Codex Extra High (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "options": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium", + "include": [ + "reasoning.encrypted_content" + ], + "store": false + } + }, "gpt-5.2-codex-low": { "name": "GPT 5.2 Codex Low (OAuth)", "limit": { diff --git a/config/opencode-modern.json b/config/opencode-modern.json index 60dc2a7..81d8ed5 100644 --- a/config/opencode-modern.json +++ b/config/opencode-modern.json @@ -58,6 +58,44 @@ } } }, + "gpt-5.3-codex": { + "name": "GPT 5.3 Codex (OAuth)", + "limit": { + "context": 272000, + "output": 128000 + }, + "modalities": { + "input": [ + "text", + "image" + ], + "output": [ + "text" + ] + }, + "variants": { + "low": { + "reasoningEffort": "low", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "medium": { + "reasoningEffort": "medium", + "reasoningSummary": "auto", + "textVerbosity": "medium" + }, + "high": { + "reasoningEffort": "high", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + }, + "xhigh": { + "reasoningEffort": "xhigh", + "reasoningSummary": "detailed", + "textVerbosity": "medium" + } + } + }, "gpt-5.2-codex": { "name": "GPT 5.2 Codex (OAuth)", "limit": { diff --git a/docs/audit/agent-findings.md b/docs/audit/agent-findings.md new file mode 100644 index 0000000..9b042ae --- /dev/null +++ b/docs/audit/agent-findings.md @@ -0,0 +1,202 @@ +# Agent Audit Log + +Chronological record of all agent audit findings (spec + quality + general audits) for this workstream. + +## 2026-02-06 – General Audit (per-message fetches) + +- Source: `@general` audit (task_id: `ses_3ceb927fbffeYA4lkFQC6HM0jL`). +- Per-request path: `lib/fetch-orchestrator.ts:263` β†’ `lib/request/fetch-helpers.ts:102` β†’ `getCodexInstructions` + `getCodexModelRuntimeDefaults`. +- Per-request network fetches: + - `/backend-api/codex/models` called every request via `getCodexModelRuntimeDefaults` (`lib/prompts/codex-models.ts:287` β†’ `fetchModelsFromServer`). + - GitHub instructions fetch on cache miss/stale (`lib/prompts/codex.ts:136`, `lib/prompts/codex.ts:87`, `lib/prompts/codex.ts:175`). +- Per-request disk/CPU: + - Reads instruction cache/meta each request (`lib/prompts/codex.ts:148`). + - Reads models cache + static defaults each request (`lib/prompts/codex-models.ts:93`, `lib/prompts/codex-models.ts:227`). + - Codex status snapshots write per response + per SSE token_count (`lib/codex-status.ts:156`, `lib/codex-status.ts:330`). +- Existing caching: + - Instructions: ETag + 15-min TTL, on-disk cache (`lib/prompts/codex.ts`). + - Models: on-disk cache exists but server fetch happens before cache (`lib/prompts/codex-models.ts`). +- Suggested caching points: + - In-memory cache for instructions/model catalog; ETag for `/codex/models`. + - Memoize static template defaults to avoid repeated disk reads. + - Debounce codex-status disk writes. + +## 2026-02-06 – General Audit (second pass) + +- Source: `@general` audit (task_id: `ses_3cea77097ffeYwaUz9TG86fpXl`). +- Reconfirmed per-request call chain and caching behavior: + - `getCodexInstructions` called every request via `transformRequestForCodex`. + - `/codex/models` server fetch happens every request before cache. +- Noted per-request disk writes: + - `codexStatus.updateFromHeaders` and `updateFromSnapshot` persist every response/SSE event. +- Safe warm points: + - On plugin init / when `FetchOrchestrator` created in `index.ts`. + - Add in-memory cache in `lib/prompts/codex.ts` and `lib/prompts/codex-models.ts`. + +## 2026-02-06 – Task 1 Spec Review (initial) + +- Source: `@general` spec review (task_id: `ses_3ce991efcffe3p9hBelEPU7nD3`). +- Medium: Startup warm skipped cold start when cache/meta missing; first request still fetches. +- Low: In-memory cache expires after 15 minutes; may re-read disk mid-session. +- Low: Warm iterates all `MODEL_FAMILIES`, may refresh multiple caches at startup. +- Question: Should warm fetch on cold start when no cache/meta exists? + +## 2026-02-06 – Task 1 Spec Review (incorrect worktree) + +- Source: `@general` spec review (task_id: `ses_3ce94f9a1ffevVD7A1bjqN00gC`). +- Note: This review was later determined to inspect the wrong worktree; findings are retained for completeness. +- High: No session-long in-memory cache; per-request cache reads remain. +- High: No startup warm path; instructions fetched per request only. +- High: Cold-start still fetches from network when cache/meta missing. +- Medium: Cache writes not concurrency-safe. +- Low: Debug log gating respected. + +## 2026-02-06 – Task 1 Spec Review (correct worktree) + +- Source: `@general` spec review (task_id: `ses_3ce91db67ffepck4eOhtVmmT4h`). +- Finding: No true gaps vs requirements. +- Residual risk: In-memory cache short-circuits TTL refresh for long-lived processes. + +## 2026-02-06 – Task 1 Code Quality Review + +- Source: `@general` code quality review (task_id: `ses_3ce8f6bcaffepHzpmXYbwRVgF3`). +- High: 304 response with missing cache file falls back to bundled instructions instead of re-fetch. +- Medium: `getModelFamily` checks `gpt-5.2-codex` before `codex-max`, misclassifying `gpt-5.2-codex-max`. +- Medium: In-memory cache bypasses TTL/ETag refresh for long-running sessions. +- Medium: `normalizeProviderModelMetadata` deletes non-codex model entries from provider config. + +## 2026-02-06 – Task 2 Spec Review + +- Source: `@general` spec review (task_id: `ses_3ce820fb4ffesGzQL6caaaHYgO`). +- Low: Lock/atomic cache write path not covered by tests. +- Assumption: Startup warm uses cache only; no revalidation without credentials at startup. + +## 2026-02-06 – Task 2 Code Quality Review + +- Source: `@general` code quality review (task_id: `ses_3ce7f3e35ffebVxNm2PiyGh2qE`). +- Medium: `normalizeProviderModelMetadata` deletes non-codex entries (potential regression). +- Medium: In-memory models cache never re-reads disk; multi-process freshness risk. +- Low: No in-flight request dedup; parallel calls can fetch/write concurrently. +- Low: `readModelsCache` doesn’t validate entries; malformed cache could cause issues. + +## 2026-02-06 – Task 2 Spec Review (post-fix) + +- Source: `@general` spec review (task_id: `ses_3cd0851dfffeUkfUWkqdVClBZN`). +- Pass: Requirements 1–5 met (startup warm, in-session cache, ETag + atomic write, memoized defaults, auth-scoped backoff guard). +- Gap: No explicit test asserting warm avoids network when cache is stale and no auth is provided. +- Gap: Atomic/lockfile write behavior not directly tested. + +## 2026-02-06 – Task 2 Code Quality Review (post-fix) + +- Source: `@general` code quality review (task_id: `ses_3cd06b172ffeaGzskiuHOg46TU`). +- High: Backoff guard only applied when cache exists; cold start with auth + server outage would hit `/codex/models` on every call. Suggested: apply guard regardless of cache presence and allow GitHub/static fallback. +- Medium: Catalog cache is global (single memory + disk file) and not scoped to account identity; model availability could differ by account/plan. + +## 2026-02-06 – Task 2 Spec Review (incorrect path) + +- Source: `@general` spec review (task_id: `ses_3cd02a971ffet5Z3MKOBI5tSrr`). +- Note: Reviewer appears to have inspected repo root instead of worktree; findings below are retained for completeness but superseded by the correct-path review. +- Critical: Claimed missing warm hook, in-memory cache, backoff, ETag handling, lockfile writes, and memoization. +- Important: Claimed missing tests for new behaviors. + +## 2026-02-06 – Task 2 Spec Review (correct path) + +- Source: `@general` spec review (task_id: `ses_3cd0057c7ffefNFj9mPQkFBrfb`). +- Pass: Requirements 1–5 met, including auth-scoped backoff and short retry window. +- Gap: No explicit test for warm avoiding network when cache is stale and no auth is provided. +- Gap: Atomic/lockfile write behavior not directly tested. + +## 2026-02-06 – Task 2 Code Quality Review (correct path) + +- Source: `@general` code quality review (task_id: `ses_3ccff1174ffez2ih78Nnx8737K`). +- High: Backoff guard did not apply when cache was missing; repeated `/codex/models` attempts possible on cold start with auth + server outage. Suggested applying guard regardless of cache presence. +- Medium: Catalog cache is global (single memory + disk file) and not scoped to account identity. + +## 2026-02-06 – Task 2 Code Quality Review (final) + +- Source: `@general` code quality review (task_id: `ses_3cced71beffeOAC5oWWtfRMsoH`). +- Low: Backoff key uses `accountId ?? "auth"` when accessToken present; access-token-only calls share a single bucket, so fresh tokens may still be throttled for up to 60s. +- Low: When server backoff is active and there is no cache, GitHub fallback is retried on each call; offline scenarios may cause repeated GitHub attempts/log spam. + +## 2026-02-06 – Task 3 Spec Review + +- Source: `@general` spec review (task_id: `ses_3ccafe7f6ffeI0Q9b6r4omMgxe`). +- Pass: Internal defaults read from cached catalog + static templates. +- Pass: User config overrides defaults; no config writes. +- Pass: Only base models are added; variants remain internal. +- Pass: Defaults populate when config lacks models. +- Pass: Display names derived deterministically. +- Gap: Tests do not directly cover config hook or variant cleanup behavior. + +## 2026-02-06 – Task 3 Code Quality Review + +- Source: `@general` code quality review (task_id: `ses_3ccae4250ffeRN0D9Hh437MTRF`). +- Medium: Shallow merge of model overrides would drop default limit/variants/options; suggested deep merge per model. +- Medium: Fallback applied `gpt-5.1` defaults to any `gpt-*` slug; should limit to `gpt-5.*`. +- Low: Test did not restore `XDG_CONFIG_HOME` or cleanup temp dir on failure. + +## 2026-02-06 – Task 4 Code Quality Review (normalizeModel false-positive reduction) + +- Source: `@general` audit (task_id: `ses_3cbe3808bffeYlvKjJRr6zpOyj`, re-verified after corrections). +- Medium: `docs/development/TESTING.md:18-32` still asserts `gpt-5-mini`/`gpt-5-nano` normalize to `gpt-5`; current behavior preserves `gpt-5-mini`/`gpt-5-nano` (lowercased) in `lib/request/request-transformer.ts`. +- Medium: `docs/development/TESTING.md:66-112` still shows `gpt-5-codex-low`/verbose names normalizing to `gpt-5-codex`; current behavior preserves legacy identifiers instead of coercing. +- Medium: `docs/development/CONFIG_FIELDS.md:167-170` and `docs/development/CONFIG_FIELDS.md:535-586` describe normalizeModel mapping `gpt-5-codex-low` or any "codex" key to `gpt-5-codex`; current implementation no longer performs substring-based coercion. + +## 2026-02-06 – Task 5 Code Quality Review (gpt-5.3-codex first-class) + +- Source: `@general` audit (task_id: `ses_3cbd48399ffe7bBI3iN9CxbtCx`, verified against worktree). +- Pass: Model family list and instruction cache mapping now include `gpt-5.3-codex` (`lib/constants.ts`, `lib/prompts/codex.ts`). +- Low: `gpt-5.3-codex` reuses the `gpt-5.2-codex_prompt.md` upstream prompt file until a dedicated 5.3 prompt exists; if upstream adds one, update `PROMPT_FILES`. + +## 2026-02-06 – Task 6 Documentation Review (normalization + personalities) + +- Source: `@general` audit (task_id: `ses_3cbc43d63ffeOxSxqwDG4eY8dm`, verified against worktree). +- Fixes applied: Updated normalization examples and debug output in `docs/development/TESTING.md`, corrected API normalization notes in `docs/development/CONFIG_FIELDS.md`, and aligned `test/README.md` summary with current normalizeModel behavior. +- Status: No remaining doc mismatches found after updates. + +## 2026-02-06 – Task 4 Spec Review (gpt-5.3-codex first-class) + +- Source: `@general` spec review (task_id: `ses_3cb193c03ffeDw3Go9yCI9BHfH`). +- High: Docs changed despite scope excluding docs (e.g., `README.md`, `docs/configuration.md`, `docs/getting-started.md`, `docs/development/ARCHITECTURE.md`, `docs/development/TESTING.md`). +- Medium: `config/minimal-opencode.json` still uses `openai/gpt-5-codex` (not 5.3). +- Low: Possible scope creep in new personality/catalog/caching additions (`lib/personalities.ts`, `lib/catalog-defaults.ts`, related tests). + +## 2026-02-06 – Task 4 Code Quality Review (gpt-5.3-codex first-class) + +- Source: `@general` code quality review (task_id: `ses_3cb14e2b6ffemzgHx57X8C0cJS`). +- High: Legacy model normalization removed; legacy/verbose IDs now pass through unchanged (potential invalid model IDs). +- Medium: Personality resolution defaults to pragmatic unless `custom_settings` is set, ignoring user config/runtime defaults. +- Medium: `buildInternalModelDefaults` applies gpt-5.3-codex defaults to any `gpt-5.*` slug (including non-codex). +- Medium: Variant filtering uses cached supported reasoning levels without TTL validation; stale cache may delete variants. +- Low: Server `base_instructions` + `apply_patch_tool_type` fetched but unused in request instruction building. +- Low: New test may bypass required fixture seeding (`test/models-gpt-5.3-codex.test.ts`). + +## 2026-02-06 – Task 4 Spec Review (re-check) + +- Source: `@general` spec review (task_id: `ses_3cb0a3449ffeDsmXG6zohLAzUr`). +- Pass: `config/minimal-opencode.json` updated to `openai/gpt-5.3-codex`. +- Remaining gap: Doc edits still present even though docs are a separate task (`README.md`, `docs/getting-started.md`, `docs/development/TESTING.md`, `docs/development/ARCHITECTURE.md`, `docs/configuration.md`). +- Note: Possible scope creep remains (`lib/personalities.ts`, `lib/catalog-defaults.ts`). + +## 2026-02-06 – Task 6 Spec Review (docs update, initial) + +- Source: `@general` spec review (task_id: `ses_3cacdf02ffferw61yrheL5qS96`). +- High: Worktree includes non-doc edits; doc-only scope flagged (code/config/test files present). +- Medium: β€œBest Practice” example still referenced GPT‑5.2 presets (`docs/development/CONFIG_FIELDS.md`). +- Low: Cache examples still centered on GPT‑5.2 (`docs/privacy.md`). + +## 2026-02-06 – Task 6 Spec Review (docs update, final) + +- Source: `@general` spec review (task_id: `ses_3cabf347cffeFXV5oqyuqeALZV`). +- Pass: GPT‑5.3 Codex primary examples and personality guidance aligned across docs. + +## 2026-02-06 – Task 6 Code Quality Review (docs update, re-check) + +- Source: `@general` code quality review (task_id: `ses_3cab0848effec95r4cpQRrkhl4`). +- Pass: All doc quality issues (xhigh casing, normalization examples, variant counts, legacy aliases, and personality default naming) have been corrected and aligned with plugin behavior. + +## 2026-02-06 – End-to-End Verification + +- Results: `npm run build` and `npm test` passed in worktree. +- Fixed: 2 regression tests in `test/plugin-config-hook.test.ts` related to `gpt-5.3-codex` first-class synthesis and metadata folding precedence. +- Status: All 457 tests passing. diff --git a/docs/configuration.md b/docs/configuration.md index 919bd0b..4ebc4ed 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -18,8 +18,8 @@ Complete reference for configuring the OpenCode OpenAI Codex Auth Plugin. "store": false }, "models": { - "gpt-5.1-codex-low": { - "name": "GPT 5.1 Codex Low (OAuth)", + "gpt-5.3-codex-low": { + "name": "GPT 5.3 Codex Low (OAuth)", "limit": { "context": 272000, "output": 128000 @@ -46,6 +46,12 @@ Complete reference for configuring the OpenCode OpenAI Codex Auth Plugin. Controls computational effort for reasoning. +**GPT-5.3-Codex Values:** +- `low` - Fastest for code +- `medium` - Balanced (default) +- `high` - Maximum code quality +- `xhigh` - Extra depth for long-horizon tasks + **GPT-5.2 Values** (per OpenAI API docs and Codex CLI `ReasoningEffort` enum): - `none` - No dedicated reasoning phase (disables reasoning) - `low` - Light reasoning @@ -79,7 +85,7 @@ Controls computational effort for reasoning. - `none` is supported for GPT-5.2 and GPT-5.1 (general purpose) per OpenAI API documentation - `none` is NOT supported for Codex variants (including GPT-5.2 Codex) - it auto-converts to `low` for Codex/Codex Max or `medium` for Codex Mini - `minimal` auto-converts to `low` for Codex models -- `xhigh` is supported for GPT-5.2, GPT-5.2 Codex, and GPT-5.1-Codex-Max; other models downgrade to `high` +- `xhigh` is supported for GPT-5.3-Codex, GPT-5.2, GPT-5.2 Codex, and GPT-5.1-Codex-Max; other models downgrade to `high` - Codex Mini only supports `medium` or `high`; lower settings clamp to `medium` **Example:** @@ -212,14 +218,14 @@ Different settings for different models: "store": false }, "models": { - "gpt-5-codex-fast": { + "gpt-5.3-codex-fast": { "name": "Fast Codex", "options": { "reasoningEffort": "low", "store": false } }, - "gpt-5-codex-smart": { + "gpt-5.3-codex-smart": { "name": "Smart Codex", "options": { "reasoningEffort": "high", @@ -313,11 +319,11 @@ Different agents use different models: { "agent": { "commit": { - "model": "openai/gpt-5.1-codex-low", + "model": "openai/gpt-5.3-codex-low", "prompt": "Generate concise commit messages" }, "review": { - "model": "openai/gpt-5.1-codex-high", + "model": "openai/gpt-5.3-codex-high", "prompt": "Thorough code review" } } @@ -363,17 +369,39 @@ Result: Project uses `high`, other projects use `medium`. ## Plugin Configuration -Advanced plugin settings in `~/.config/opencode/openai-codex-auth-config.json`: +Advanced plugin settings in `~/.config/opencode/openai-codex-auth-config.json`. + +### Custom Settings Overrides + +Use `custom_settings` to override OpenCode provider options without editing `opencode.json`. +These settings are merged on top of the OpenCode config at request time. ```json { - "codexMode": false + "custom_settings": { + "options": { + "personality": "friendly" + }, + "models": { + "gpt-5.3-codex": { + "options": { + "personality": "pragmatic" + } + } + } + } } ``` -### Legacy `codexMode` (No-op) +Personality descriptions come from: +- `.opencode/Personalities/*.md` (project-local) +- `~/.config/opencode/Personalities/*.md` (global) -`codexMode` is retained only for backwards compatibility. Bridge mode was removed, and this field no longer changes runtime prompt or tool behavior. +The filename (case-insensitive) defines the personality key (e.g., `Friendly.md` matches `friendly`). The file contents are used verbatim as the personality specification. + +The plugin also seeds `Friendly.md` and `Pragmatic.md` in the global directory from server-derived runtime defaults. These files are treated as a cache and are only updated when the existing file is managed by the plugin (identified by an internal marker). User-managed files are never overwritten. + +Built-ins: `none`, `default` (uses model runtime defaults), `friendly`, `pragmatic` (fallback if unset). Any other key requires a matching `.md` file in one of the locations above. ### Multi-Account Settings @@ -386,7 +414,6 @@ Add `$schema` for editor autocompletion: ```json { "$schema": "https://raw.githubusercontent.com/iam-brain/opencode-openai-codex-multi-auth/main/assets/openai-codex-auth-config.schema.json", - "codexMode": false, "accountSelectionStrategy": "sticky", "pidOffsetEnabled": true, "quietMode": false, @@ -400,6 +427,42 @@ Add `$schema` for editor autocompletion: | `pidOffsetEnabled` | `boolean` | `true` | Enable PID-based offset for parallel agent rotation. | | `perProjectAccounts` | `boolean` | `false` | If `true`, the plugin will look for and use account storage in `.opencode/openai-codex-accounts.json` relative to the current project. | | `quietMode` | `boolean` | `false` | Disable TUI toasts for background operations (e.g., token refreshes). | +| `rateLimitToastDebounceMs` | `number` | `60000` | Debounce account/rate-limit toasts. | +| `tokenRefreshSkewMs` | `number` | `60000` | Refresh OAuth tokens this early (ms) before expiry. | +| `proactiveTokenRefresh` | `boolean` | `false` | Enable background token refresh queue (when available). | +| `authDebug` | `boolean` | `false` | Enable debug logging (env aliases supported). | + +#### Hard-Stop Settings + +| Field | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `hardStopMaxWaitMs` | `number` | `10000` | Maximum wait before returning a hard-stop error when no accounts are available. | +| `hardStopOnUnknownModel` | `boolean` | `true` | Return a hard-stop error for models not in the server catalog. | +| `hardStopOnAllAuthFailed` | `boolean` | `true` | Return a hard-stop error when all accounts are in auth-failure cooldown. | +| `hardStopMaxConsecutiveFailures` | `number` | `5` | Maximum consecutive failures before returning a hard-stop error. | + +Default hard-stop wait is 10 seconds; increase `hardStopMaxWaitMs` if you prefer longer waits. + +#### Scheduling & Retry Settings + +| Field | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `schedulingMode` | `string` | `"cache_first"` | Scheduling strategy (`cache_first`, `balance`, `performance_first`). | +| `maxCacheFirstWaitSeconds` | `number` | `60` | Max seconds to wait in cache-first mode before switching. | +| `switchOnFirstRateLimit` | `boolean` | `true` | Switch accounts immediately on the first rate-limit response. | +| `retryAllAccountsRateLimited` | `boolean` | `false` | Enable global retry loop when all accounts are rate-limited. | +| `retryAllAccountsMaxWaitMs` | `number` | `30000` | Max wait time for all-accounts retry (0 disables the limit). | +| `retryAllAccountsMaxRetries` | `number` | `1` | Max retry cycles when all accounts are rate-limited. | + +#### Rate-Limit Tuning + +| Field | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `rateLimitDedupWindowMs` | `number` | `2000` | Deduplicate rate-limit events within this window. | +| `rateLimitStateResetMs` | `number` | `120000` | Reset rate-limit state after this idle time. | +| `defaultRetryAfterMs` | `number` | `60000` | Fallback retry-after when headers are missing. | +| `maxBackoffMs` | `number` | `120000` | Cap exponential backoff for rate-limit retries. | +| `requestJitterMaxMs` | `number` | `1000` | Random jitter added to retry delays. | #### Per-Project Storage @@ -436,6 +499,32 @@ For a detailed guide, see [docs/multi-account.md](multi-account.md). All options can be overridden with env vars: +| Field | Env Var | Notes | +| :--- | :--- | :--- | +| `accountSelectionStrategy` | `CODEX_AUTH_ACCOUNT_SELECTION_STRATEGY` | `sticky`, `round-robin`, `hybrid` | +| `pidOffsetEnabled` | `CODEX_AUTH_PID_OFFSET_ENABLED` | Boolean | +| `perProjectAccounts` | `CODEX_AUTH_PER_PROJECT_ACCOUNTS` | Boolean | +| `quietMode` | `CODEX_AUTH_QUIET` | Boolean | +| `rateLimitToastDebounceMs` | `CODEX_AUTH_RATE_LIMIT_TOAST_DEBOUNCE_MS` | Milliseconds | +| `tokenRefreshSkewMs` | `CODEX_AUTH_TOKEN_REFRESH_SKEW_MS` | Milliseconds | +| `proactiveTokenRefresh` | `CODEX_AUTH_PROACTIVE_TOKEN_REFRESH` | Boolean | +| `authDebug` | `CODEX_AUTH_DEBUG` | Aliases supported (see below) | +| `schedulingMode` | `CODEX_AUTH_SCHEDULING_MODE` | `cache_first`, `balance`, `performance_first` | +| `maxCacheFirstWaitSeconds` | `CODEX_AUTH_MAX_CACHE_FIRST_WAIT_SECONDS` | Seconds | +| `switchOnFirstRateLimit` | `CODEX_AUTH_SWITCH_ON_FIRST_RATE_LIMIT` | Boolean | +| `rateLimitDedupWindowMs` | `CODEX_AUTH_RATE_LIMIT_DEDUP_WINDOW_MS` | Milliseconds | +| `rateLimitStateResetMs` | `CODEX_AUTH_RATE_LIMIT_STATE_RESET_MS` | Milliseconds | +| `defaultRetryAfterMs` | `CODEX_AUTH_DEFAULT_RETRY_AFTER_MS` | Milliseconds | +| `maxBackoffMs` | `CODEX_AUTH_MAX_BACKOFF_MS` | Milliseconds | +| `requestJitterMaxMs` | `CODEX_AUTH_REQUEST_JITTER_MAX_MS` | Milliseconds | +| `retryAllAccountsRateLimited` | `CODEX_AUTH_RETRY_ALL_RATE_LIMITED` | Boolean | +| `retryAllAccountsMaxWaitMs` | `CODEX_AUTH_RETRY_ALL_MAX_WAIT_MS` | Milliseconds | +| `retryAllAccountsMaxRetries` | `CODEX_AUTH_RETRY_ALL_MAX_RETRIES` | Number | +| `hardStopMaxWaitMs` | `CODEX_AUTH_HARD_STOP_MAX_WAIT_MS` | Milliseconds | +| `hardStopOnUnknownModel` | `CODEX_AUTH_HARD_STOP_ON_UNKNOWN_MODEL` | Boolean | +| `hardStopOnAllAuthFailed` | `CODEX_AUTH_HARD_STOP_ON_ALL_AUTH_FAILED` | Boolean | +| `hardStopMaxConsecutiveFailures` | `CODEX_AUTH_HARD_STOP_MAX_CONSECUTIVE_FAILURES` | Number | + ```bash CODEX_AUTH_ACCOUNT_SELECTION_STRATEGY=round-robin CODEX_AUTH_ACCOUNT_SELECTION_STRATEGY=hybrid @@ -443,11 +532,30 @@ CODEX_AUTH_PID_OFFSET_ENABLED=1 CODEX_AUTH_QUIET=1 CODEX_AUTH_TOKEN_REFRESH_SKEW_MS=60000 CODEX_AUTH_RATE_LIMIT_TOAST_DEBOUNCE_MS=60000 +CODEX_AUTH_RATE_LIMIT_DEDUP_WINDOW_MS=2000 +CODEX_AUTH_RATE_LIMIT_STATE_RESET_MS=120000 +CODEX_AUTH_DEFAULT_RETRY_AFTER_MS=60000 +CODEX_AUTH_MAX_BACKOFF_MS=120000 +CODEX_AUTH_REQUEST_JITTER_MAX_MS=1000 +CODEX_AUTH_SCHEDULING_MODE=cache_first +CODEX_AUTH_MAX_CACHE_FIRST_WAIT_SECONDS=60 +CODEX_AUTH_SWITCH_ON_FIRST_RATE_LIMIT=1 CODEX_AUTH_RETRY_ALL_RATE_LIMITED=1 CODEX_AUTH_RETRY_ALL_MAX_WAIT_MS=30000 CODEX_AUTH_RETRY_ALL_MAX_RETRIES=1 +CODEX_AUTH_HARD_STOP_MAX_WAIT_MS=10000 +CODEX_AUTH_HARD_STOP_ON_UNKNOWN_MODEL=1 +CODEX_AUTH_HARD_STOP_ON_ALL_AUTH_FAILED=1 +CODEX_AUTH_HARD_STOP_MAX_CONSECUTIVE_FAILURES=5 +CODEX_AUTH_PROACTIVE_TOKEN_REFRESH=1 +CODEX_AUTH_DEBUG=1 +CODEX_AUTH_NO_BROWSER=1 ``` +Deprecated environment aliases (still supported): +- `OPENCODE_OPENAI_AUTH_DEBUG`, `DEBUG_CODEX_PLUGIN` β†’ `CODEX_AUTH_DEBUG` +- `OPENCODE_NO_BROWSER`, `OPENCODE_HEADLESS` β†’ `CODEX_AUTH_NO_BROWSER` + ### Prompt caching - When OpenCode provides a `prompt_cache_key` (its session identifier), the plugin forwards it directly to Codex. @@ -471,9 +579,11 @@ CODEX_AUTH_RETRY_ALL_MAX_RETRIES=1 - bundled static template fallback - Runtime model metadata is online-first: - Codex `/backend-api/codex/models` - - local `codex-models-cache.json` fallback - - Codex GitHub `models.json` fallback (`latest release` then `main`) - - static template defaults as final fallback + - local `codex-models-cache-.json` per-account fallback (server-derived) + +If the server catalog and its cache are unavailable, requests are rejected to avoid guessing supported models. + +Note: legacy `codex-models-cache.json` files are ignored after the per-account cache change; the first refresh will recreate the new cache files. --- @@ -511,7 +621,7 @@ DEBUG_CODEX_PLUGIN=1 opencode run "test" --model=openai/your-model-name Look for: ``` -[openai-codex-plugin] Model config lookup: "your-model-name" β†’ normalized to "gpt-5.1-codex" for API { +[openai-codex-plugin] Model config lookup: "your-model-name" β†’ normalized to "gpt-5.3-codex" for API { hasModelSpecificConfig: true, resolvedConfig: { ... } } @@ -521,8 +631,8 @@ Look for: ```bash # Run with different models, check logs show different options -ENABLE_PLUGIN_REQUEST_LOGGING=1 opencode run "test" --model=openai/gpt-5-codex-low -ENABLE_PLUGIN_REQUEST_LOGGING=1 opencode run "test" --model=openai/gpt-5-codex-high +ENABLE_PLUGIN_REQUEST_LOGGING=1 opencode run "test" --model=openai/gpt-5.3-codex-low +ENABLE_PLUGIN_REQUEST_LOGGING=1 opencode run "test" --model=openai/gpt-5.3-codex-high # Compare reasoning.effort in logs cat ~/.config/opencode/logs/codex-plugin/request-*-after-transform.json | jq '.reasoning.effort' @@ -557,8 +667,8 @@ Use the official config file (`opencode-modern.json` for v1.0.210+, `opencode-le ```json { "models": { - "gpt-5.1-codex-low": { - "name": "GPT 5.1 Codex Low (OAuth)", + "gpt-5.3-codex-low": { + "name": "GPT 5.3 Codex Low (OAuth)", "limit": { "context": 272000, "output": 128000 @@ -671,17 +781,17 @@ Look for `hasModelSpecificConfig: true` in debug output. ### Options Ignored -**Cause**: Model normalizes before lookup +**Cause**: Config key in `opencode.json` doesn't match the model name used in CLI **Example Problem:** ```json -{ "models": { "gpt-5.1-codex": { "options": { ... } } } } +{ "models": { "gpt-5.3-codex": { "options": { ... } } } } ``` ```bash ---model=openai/gpt-5.1-codex-low # Normalizes to "gpt-5.1-codex" before lookup +--model=openai/gpt-5.3-codex-low # Plugin looks for "gpt-5.3-codex-low" in config ``` -**Fix**: Use exact name you specify in CLI as config key. +**Fix**: Use exact name you specify in CLI as config key (normalization for API happens *after* config lookup). > **⚠️ Best Practice:** Use the official `opencode-modern.json` or `opencode-legacy.json` configuration instead of creating custom configs. This ensures proper model normalization and compatibility with GPT 5 models. diff --git a/docs/development/ARCHITECTURE.md b/docs/development/ARCHITECTURE.md index 8d1990b..b711854 100644 --- a/docs/development/ARCHITECTURE.md +++ b/docs/development/ARCHITECTURE.md @@ -265,10 +265,11 @@ let include: Vec = if reasoning.is_some() { 2. Model Normalization β”œβ”€ Resolve known mappings from model map - β”œβ”€ Apply fallback normalization for unknown variants - └─ Normalize to canonical Codex slug (for API + metadata lookups) + β”œβ”€ Preserve unknown/legacy slugs (lowercased) to avoid false positives + └─ Use the normalized slug for API + metadata lookups 3. Config Merging + β”œβ”€ Merge plugin `custom_settings` over OpenCode config β”œβ”€ Global options (provider.openai.options) β”œβ”€ Model-specific options (provider.openai.models[name].options) └─ Result: merged config for this model @@ -279,11 +280,17 @@ let include: Vec = if reasoning.is_some() { └─ Verify no IDs remain 5. System Prompt Handling - β”œβ”€ Preserve OpenCode env + AGENTS/runtime metadata messages - β”œβ”€ Load Codex instructions by model family (GitHub ETag-cached) - β”œβ”€ Load model runtime defaults (online-first /codex/models fallback chain) - β”œβ”€ Render personality using precedence: - β”‚ model option β†’ online model default β†’ global backup β†’ static default + β”œβ”€ Preserve OpenCode env + AGENTS/runtime metadata messages + β”œβ”€ Load Codex instructions by model family (GitHub ETag-cached) + β”œβ”€ Load model runtime defaults from /codex/models (per-account cached, strict allowlist) + β”œβ”€ Render personality using precedence: + β”‚ custom_settings model override β†’ custom_settings global β†’ pragmatic (fallback) + β”œβ”€ Resolve personality message from: + β”‚ Personalities/*.md β†’ runtime instructions_variables.personalities β†’ built-ins + β”‚ (Key "default" uses runtime defaults: personality_default message if present, + β”‚ otherwise the onlineDefaultPersonality key from instructions_variables.personality, + β”‚ with pragmatic fallback) + β”œβ”€ Apply instructions template (replace `{{ personality }}` or append spec) └─ Do not inject bridge/tool-remap overlays 6. Orphan Tool Output Handling @@ -335,7 +342,7 @@ let include: Vec = if reasoning.is_some() { | Feature | Codex CLI | This Plugin | Why? | |---------|-----------|-------------|------| | **OpenCode Runtime Metadata Preservation** | Native runtime | βœ… Preserve env/AGENTS developer messages | Keep harness context intact without duplicating tool contracts | -| **Online-First Model Metadata Fallbacks** | Native model manager | βœ… `/codex/models` β†’ cache β†’ GitHub β†’ static | Resilient runtime defaults + personality templates | +| **Authoritative Model Catalog** | Native model manager | βœ… `/codex/models` β†’ per-account cache (server-derived), fail closed if unavailable | Strict allowlist + runtime defaults | | **Orphan Tool Output Handling** | βœ… Drop orphans | βœ… Convert to messages | Preserve context + avoid 400s | | **Usage-limit messaging** | CLI prints status | βœ… Friendly error summary | Surface 5h/weekly windows in OpenCode | | **Per-Model Options** | CLI flags | βœ… Config file | Better UX in OpenCode | @@ -396,7 +403,7 @@ let include: Vec = if reasoning.is_some() { **Alternative**: Single global config **Problem**: -- `gpt-5.1-codex` optimal settings differ from `gpt-5.1` +- `gpt-5.3-codex` optimal settings differ from `gpt-5.3` - Users want quick switching between quality levels - No way to save "presets" @@ -431,6 +438,20 @@ let include: Vec = if reasoning.is_some() { **Workaround**: Codex CLI uses `store: true` for Azure only **This Plugin**: Only supports ChatGPT OAuth (no Azure) +### Hard-Stop Error Handling + +**Unsupported model**: +- Trigger: model not in `/codex/models` (including custom IDs) +- Response: synthetic error with `type: unsupported_model`, `param: model`, and attempted model ID + +**Catalog unavailable**: +- Trigger: `/codex/models` unavailable and no cached catalog +- Response: synthetic `unsupported_model` error with catalog context in the message + +**All accounts unavailable**: +- Trigger: all accounts rate-limited beyond `hardStopMaxWaitMs` or all accounts in auth-failure cooldown +- Response: synthetic errors `all_accounts_rate_limited` (HTTP 429) or `all_accounts_auth_failed` (HTTP 401) + --- ## Multi-Process State Management @@ -474,11 +495,6 @@ The plugin retrieves usage data from the authoritative `/wham/usage` endpoint: ## Performance Considerations -### Token Usage - -**Codex Bridge Prompt**: ~550 tokens (~90% reduction vs full OpenCode prompt) -**Benefit**: Faster inference, lower costs - ### Request Optimization **Prompt Caching**: Uses `promptCacheKey` for session-based caching diff --git a/docs/development/CONFIG_FIELDS.md b/docs/development/CONFIG_FIELDS.md index 294b312..300bc81 100644 --- a/docs/development/CONFIG_FIELDS.md +++ b/docs/development/CONFIG_FIELDS.md @@ -2,7 +2,7 @@ Understanding the difference between config key, `id`, and `name` fields in OpenCode model configuration. -> Note: Examples may use legacy aliases (for example `gpt-5-codex-*`) to show compatibility behavior. Runtime normalization maps these aliases to current canonical API slugs. +> Note: Examples may use legacy aliases (for example `gpt-5.0-codex-*`) to show compatibility behavior. Runtime normalization maps known variants (like `gpt-5.3-codex-low`) to base slugs (`gpt-5.3-codex`) for the API, while unknown/legacy IDs are lowercased and preserved. ## The Three Fields @@ -27,16 +27,16 @@ Understanding the difference between config key, `id`, and `name` fields in Open ### Config Key (Property Name) -**Example:** `"gpt-5-codex-low"` +**Example:** `"gpt-5.1-codex-low"` **Used For:** -- βœ… CLI `--model` flag: `--model=openai/gpt-5-codex-low` -- βœ… OpenCode internal lookups: `provider.info.models["gpt-5-codex-low"]` -- βœ… TUI persistence: Saved to `~/.config/opencode/tui` as `model_id = "gpt-5-codex-low"` -- βœ… Custom command frontmatter: `model: openai/gpt-5-codex-low` -- βœ… Agent configuration: `"model": "openai/gpt-5-codex-low"` -- βœ… **Plugin config lookup**: `userConfig.models["gpt-5-codex-low"]` -- βœ… Passed to custom loaders: `getModel(sdk, "gpt-5-codex-low")` +- βœ… CLI `--model` flag: `--model=openai/gpt-5.3-codex-low` +- βœ… OpenCode internal lookups: `provider.info.models["gpt-5.3-codex-low"]` +- βœ… TUI persistence: Saved to `~/.config/opencode/tui` as `model_id = "gpt-5.3-codex-low"` +- βœ… Custom command frontmatter: `model: openai/gpt-5.3-codex-low` +- βœ… Agent configuration: `"model": "openai/gpt-5.3-codex-low"` +- βœ… **Plugin config lookup**: `userConfig.models["gpt-5.3-codex-low"]` +- βœ… Passed to custom loaders: `getModel(sdk, "gpt-5.3-codex-low")` **This is the PRIMARY identifier throughout OpenCode!** @@ -44,7 +44,7 @@ Understanding the difference between config key, `id`, and `name` fields in Open ### `id` Field (Optional - NOT NEEDED for OpenAI) -**Example:** `"gpt-5-codex"` +**Example:** `"gpt-5.1-codex"` **What it's used for:** - ⚠️ **Other providers**: Some providers use this for `sdk.languageModel(id)` @@ -75,7 +75,7 @@ openai: async () => { } ``` -**Our plugin receives:** `body.model = "gpt-5-codex-low"` (config key, NOT id field) +**Our plugin receives:** `body.model = "gpt-5.3-codex-low"` (config key, NOT id field) **Recommendation:** **Omit the `id` field** for OpenAI provider - it's redundant and creates confusion. OpenCode will auto-set it to the config key. @@ -109,7 +109,7 @@ const parsedModel: ModelsDev.Model = { β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ β”‚ β”‚ β”‚ CLI Usage: β”‚ -β”‚ $ opencode run --model=openai/gpt-5-codex-low β”‚ +β”‚ $ opencode run --model=openai/gpt-5.3-codex-low β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ CONFIG KEY β”‚ β”‚ β”‚ @@ -123,7 +123,7 @@ const parsedModel: ModelsDev.Model = { β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”‚ β”‚ Config Lookup (Plugin): β”‚ -β”‚ userConfig.models["gpt-5-codex-low"].options β”‚ +β”‚ userConfig.models["gpt-5.3-codex-low"].options β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ CONFIG KEY β”‚ β”‚ β”‚ @@ -134,44 +134,44 @@ const parsedModel: ModelsDev.Model = { β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ β”‚ β”‚ β”‚ 1. User Selection β”‚ -β”‚ opencode run --model=openai/gpt-5-codex-low β”‚ +β”‚ opencode run --model=openai/gpt-5.3-codex-low β”‚ β”‚ OpenCode parses: providerID="openai" β”‚ -β”‚ modelID="gpt-5-codex-low" ← CONFIG KEY β”‚ +β”‚ modelID="gpt-5.3-codex-low" ← CONFIG KEY β”‚ β”‚ β”‚ β”‚ 2. OpenCode Provider Lookup β”‚ -β”‚ provider.info.models["gpt-5-codex-low"] β”‚ +β”‚ provider.info.models["gpt-5.3-codex-low"] β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ CONFIG KEY β”‚ β”‚ β”‚ β”‚ 3. Custom Loader Call (OpenAI) β”‚ -β”‚ getModel(sdk, "gpt-5-codex-low") β”‚ +β”‚ getModel(sdk, "gpt-5.3-codex-low") β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ CONFIG KEY β”‚ β”‚ β”‚ β”‚ 4. AI SDK Request Creation β”‚ -β”‚ { model: "gpt-5-codex-low", ... } β”‚ +β”‚ { model: "gpt-5.3-codex-low", ... } β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ CONFIG KEY β”‚ β”‚ β”‚ β”‚ 5. Custom fetch() (Our Plugin) β”‚ -β”‚ body.model = "gpt-5-codex-low" β”‚ +β”‚ body.model = "gpt-5.3-codex-low" β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ CONFIG KEY β”‚ β”‚ β”‚ β”‚ 6. Plugin Config Lookup β”‚ -β”‚ userConfig.models["gpt-5-codex-low"].options β”‚ +β”‚ userConfig.models["gpt-5.3-codex-low"].options β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ CONFIG KEY β”‚ β”‚ Result: { reasoningEffort: "low", ... } βœ… FOUND β”‚ β”‚ β”‚ β”‚ 7. Plugin Normalization β”‚ -β”‚ normalizeModel("gpt-5-codex-low") β”‚ -β”‚ Returns: "gpt-5-codex" ← SENT TO CODEX API β”‚ +β”‚ normalizeModel("gpt-5.3-codex-low") β”‚ +β”‚ Returns: "gpt-5.3-codex" ← SENT TO CODEX API β”‚ β”‚ β”‚ β”‚ 8. TUI Persistence β”‚ β”‚ ~/.config/opencode/tui: β”‚ β”‚ provider_id = "openai" β”‚ -β”‚ model_id = "gpt-5-codex-low" ← CONFIG KEY persisted β”‚ +β”‚ model_id = "gpt-5.3-codex-low" ← CONFIG KEY persisted β”‚ β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ ``` @@ -183,7 +183,7 @@ const parsedModel: ModelsDev.Model = { ### Config Key: The Real Identifier ```json -"gpt-5-codex-low": { ... } +"gpt-5.3-codex-low": { ... } β””β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”˜ CONFIG KEY ``` @@ -194,14 +194,14 @@ const parsedModel: ModelsDev.Model = { - 🎯 **Config lookup key** - how plugin finds per-model options - 🎯 **Persisted value** - saved in TUI state -**Best Practice:** Use Codex CLI preset names (`gpt-5-codex-low`, `gpt-5-high`, etc.) +**Best Practice:** Use Codex CLI preset names (`gpt-5.3-codex-low`, `gpt-5.3-codex-high`, etc.) --- ### `id` Field: Documentation/Metadata ```json -"id": "gpt-5-codex" +"id": "gpt-5.3-codex" β””β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”˜ ID FIELD ``` @@ -211,7 +211,7 @@ const parsedModel: ModelsDev.Model = { - πŸ“ **Helps sorting** in model lists - πŸ“ **Clarity** - shows relationship between variants -**Best Practice:** Set to the base API model name (`gpt-5-codex`, `gpt-5`, etc.) +**Best Practice:** Set to the base API model name (`gpt-5.3-codex`, `gpt-5`, etc.) **Note:** For OpenAI provider, this is NOT sent to the API! The plugin normalizes the config key instead. @@ -240,20 +240,20 @@ const parsedModel: ModelsDev.Model = { ```json { - "gpt-5-codex-low": { - "id": "gpt-5-codex", + "gpt-5.3-codex-low": { + "id": "gpt-5.3-codex", "name": "GPT 5 Codex Low (OAuth)", "options": { "reasoningEffort": "low" } } } ``` -**When user selects `openai/gpt-5-codex-low`:** -- CLI: Uses `"gpt-5-codex-low"` (config key) +**When user selects `openai/gpt-5.3-codex-low`:** +- CLI: Uses `"gpt-5.3-codex-low"` (config key) - TUI: Shows `"GPT 5 Codex Low (OAuth)"` (name field) -- Plugin receives: `body.model = "gpt-5-codex-low"` (config key) -- Plugin looks up: `models["gpt-5-codex-low"]` βœ… Found -- Plugin sends to API: `"gpt-5-codex"` (normalized) +- Plugin receives: `body.model = "gpt-5.3-codex-low"` (config key) +- Plugin looks up: `models["gpt-5.3-codex-low"]` βœ… Found +- Plugin sends to API: `"gpt-5.3-codex"` (normalized) **Result:** βœ… Everything works perfectly! @@ -263,19 +263,19 @@ const parsedModel: ModelsDev.Model = { ```json { - "gpt-5-codex-low": { - "id": "gpt-5-codex", + "gpt-5.3-codex-low": { + "id": "gpt-5.3-codex", "name": "GPT 5 Codex Low (OAuth)" }, - "gpt-5-codex-high": { - "id": "gpt-5-codex", + "gpt-5.3-codex-high": { + "id": "gpt-5.3-codex", "name": "GPT 5 Codex High (OAuth)" } } ``` **Why this works:** -- Config keys are different: `"gpt-5-codex-low"` vs `"gpt-5-codex-high"` βœ… +- Config keys are different: `"gpt-5.3-codex-low"` vs `"gpt-5.3-codex-high"` βœ… - Same `id` is fine - it's just metadata - Different `name` values help distinguish in TUI @@ -287,13 +287,13 @@ const parsedModel: ModelsDev.Model = { ```json { - "gpt-5-codex": { - "id": "gpt-5-codex", + "gpt-5.3-codex": { + "id": "gpt-5.3-codex", "name": "GPT 5 Codex Low (OAuth)", "options": { "reasoningEffort": "low" } }, - "gpt-5-codex": { // ❌ DUPLICATE KEY ERROR! - "id": "gpt-5-codex", + "gpt-5.3-codex": { // ❌ DUPLICATE KEY ERROR! + "id": "gpt-5.3-codex", "name": "GPT 5 Codex High (OAuth)", "options": { "reasoningEffort": "high" } } @@ -314,16 +314,16 @@ const parsedModel: ModelsDev.Model = { ```json { - "gpt-5-codex-low": { // ← Unique config key #1 - "id": "gpt-5-codex", // ← Same base model + "gpt-5.3-codex-low": { // ← Unique config key #1 + "id": "gpt-5.3-codex", // ← Same base model "options": { "reasoningEffort": "low" } }, - "gpt-5-codex-medium": { // ← Unique config key #2 - "id": "gpt-5-codex", // ← Same base model + "gpt-5.3-codex-medium": { // ← Unique config key #2 + "id": "gpt-5.3-codex", // ← Same base model "options": { "reasoningEffort": "medium" } }, - "gpt-5-codex-high": { // ← Unique config key #3 - "id": "gpt-5-codex", // ← Same base model + "gpt-5.3-codex-high": { // ← Unique config key #3 + "id": "gpt-5.3-codex", // ← Same base model "options": { "reasoningEffort": "high" } } } @@ -331,7 +331,7 @@ const parsedModel: ModelsDev.Model = { **Result:** - 3 selectable variants in TUI βœ… -- Same API model (`gpt-5-codex`) βœ… +- Same API model (`gpt-5.3-codex`) βœ… - Different reasoning settings βœ… - Plugin correctly applies per-variant options βœ… @@ -344,7 +344,7 @@ const parsedModel: ModelsDev.Model = { **Old Plugin + Old Config:** ```json "GPT 5 Codex Low (ChatGPT Subscription)": { - "id": "gpt-5-codex", + "id": "gpt-5.3-codex", "options": { "reasoningEffort": "low" } } ``` @@ -353,7 +353,7 @@ const parsedModel: ModelsDev.Model = { **New Plugin + Old Config:** ```json "GPT 5 Codex Low (ChatGPT Subscription)": { - "id": "gpt-5-codex", + "id": "gpt-5.3-codex", "options": { "reasoningEffort": "low" } } ``` @@ -361,8 +361,8 @@ const parsedModel: ModelsDev.Model = { **New Plugin + New Config:** ```json -"gpt-5-codex-low": { - "id": "gpt-5-codex", +"gpt-5.3-codex-low": { + "id": "gpt-5.3-codex", "name": "GPT 5 Codex Low (OAuth)", "options": { "reasoningEffort": "low" } } @@ -398,6 +398,38 @@ const parsedModel: ModelsDev.Model = { - `false` (required): Prevents AI SDK from using `item_reference` for conversation history - `true` (default): Uses server-side storage with references (incompatible with Codex API) +--- + +## Plugin Config Fields + +Plugin config is stored in `~/.config/opencode/openai-codex-auth-config.json`. + +| Field | Type | Default | Description | +| :--- | :--- | :--- | :--- | +| `accountSelectionStrategy` | `string` | `"sticky"` | Account selection (`sticky`, `round-robin`, `hybrid`). | +| `pidOffsetEnabled` | `boolean` | `true` | PID-based starting offset for parallel agents. | +| `perProjectAccounts` | `boolean` | `false` | Use `.opencode/openai-codex-accounts.json` when present. | +| `quietMode` | `boolean` | `false` | Reduce background toasts. | +| `rateLimitToastDebounceMs` | `number` | `60000` | Debounce toast notifications. | +| `tokenRefreshSkewMs` | `number` | `60000` | Refresh tokens this early before expiry. | +| `proactiveTokenRefresh` | `boolean` | `false` | Enable proactive refresh queue when available. | +| `authDebug` | `boolean` | `false` | Debug logging (see env aliases). | +| `retryAllAccountsRateLimited` | `boolean` | `false` | Enable global wait-and-retry when all accounts rate-limited. | +| `retryAllAccountsMaxWaitMs` | `number` | `30000` | Max wait time for global retry (0 disables limit). | +| `retryAllAccountsMaxRetries` | `number` | `1` | Max retry cycles for global wait loop. | +| `hardStopMaxWaitMs` | `number` | `10000` | Hard-stop wait threshold for all-accounts rate limits. | +| `hardStopOnUnknownModel` | `boolean` | `true` | Hard-stop when model not in server catalog. | +| `hardStopOnAllAuthFailed` | `boolean` | `true` | Hard-stop when all accounts in auth-failure cooldown. | +| `hardStopMaxConsecutiveFailures` | `number` | `5` | Hard-stop after consecutive failures. | +| `schedulingMode` | `string` | `"cache_first"` | Scheduling strategy (`cache_first`, `balance`, `performance_first`). | +| `maxCacheFirstWaitSeconds` | `number` | `60` | Cache-first wait before switching. | +| `switchOnFirstRateLimit` | `boolean` | `true` | Switch accounts immediately on rate limit. | +| `rateLimitDedupWindowMs` | `number` | `2000` | Deduplicate rate-limit events. | +| `rateLimitStateResetMs` | `number` | `120000` | Reset rate-limit state after idle window. | +| `defaultRetryAfterMs` | `number` | `60000` | Fallback retry-after when headers missing. | +| `maxBackoffMs` | `number` | `120000` | Cap exponential backoff. | +| `requestJitterMaxMs` | `number` | `1000` | Random jitter added to retry delays. | + **Why required:** AI SDK 2.0.50 introduced automatic use of `item_reference` items to reduce payload size when `store: true`. However: - Codex API requires `store: false` (stateless mode) @@ -413,7 +445,7 @@ AI SDK 2.0.50 introduced automatic use of `item_reference` items to reduce paylo "store": false // ← Global: applies to all models }, "models": { - "gpt-5-codex-low": { + "gpt-5.3-codex-low": { "options": { "store": false // ← Per-model: redundant but explicit } @@ -436,7 +468,7 @@ AI SDK 2.0.50 introduced automatic use of `item_reference` items to reduce paylo ```json { - "gpt-5-codex-low": { + "gpt-5.3-codex-low": { "name": "GPT 5 Codex Low (OAuth)", "options": { "reasoningEffort": "low" } } @@ -444,7 +476,7 @@ AI SDK 2.0.50 introduced automatic use of `item_reference` items to reduce paylo ``` **Benefits:** -- βœ… Clean config key: `gpt-5-codex-low` (matches Codex CLI presets) +- βœ… Clean config key: `gpt-5.3-codex-low` (matches Codex CLI presets) - βœ… Friendly display: `"GPT 5 Codex Low (OAuth)"` (UX) - βœ… No redundant fields - βœ… OpenCode auto-sets `id` to config key @@ -460,17 +492,17 @@ AI SDK 2.0.50 introduced automatic use of `item_reference` items to reduce paylo ```json { - "gpt-5-codex-low": { + "gpt-5.3-codex-low": { "options": { "reasoningEffort": "low" } } } ``` **What happens:** -- `id` defaults to: `"gpt-5-codex-low"` (config key) -- `name` defaults to: `"gpt-5-codex-low"` (config key) -- TUI shows: `"gpt-5-codex-low"` (less friendly) -- Plugin normalizes: `"gpt-5-codex-low"` β†’ `"gpt-5-codex"` for API +- `id` defaults to: `"gpt-5.3-codex-low"` (config key) +- `name` defaults to: `"gpt-5.3-codex-low"` (config key) +- TUI shows: `"gpt-5.3-codex-low"` (less friendly) +- Plugin normalizes: `"gpt-5.3-codex-low"` β†’ `"gpt-5.3-codex"` for API - **Works perfectly, just less user-friendly** --- @@ -479,8 +511,8 @@ AI SDK 2.0.50 introduced automatic use of `item_reference` items to reduce paylo ```json { - "gpt-5-codex-low": { - "id": "gpt-5-codex", + "gpt-5.3-codex-low": { + "id": "gpt-5.3-codex", "name": "GPT 5 Codex Low (OAuth)", "options": { "reasoningEffort": "low" } } @@ -498,15 +530,15 @@ AI SDK 2.0.50 introduced automatic use of `item_reference` items to reduce paylo | Use Case | Which Field? | Example Value | |----------|-------------|---------------| -| **CLI `--model` flag** | Config Key | `openai/gpt-5-codex-low` | -| **Custom commands** | Config Key | `model: openai/gpt-5-codex-low` | -| **Agent config** | Config Key | `"model": "openai/gpt-5-codex-low"` | +| **CLI `--model` flag** | Config Key | `openai/gpt-5.3-codex-low` | +| **Custom commands** | Config Key | `model: openai/gpt-5.3-codex-low` | +| **Agent config** | Config Key | `"model": "openai/gpt-5.3-codex-low"` | | **TUI display** | `name` field | `"GPT 5 Codex Low (OAuth)"` | -| **Plugin config lookup** | Config Key | `models["gpt-5-codex-low"]` | -| **AI SDK receives** | Config Key | `body.model = "gpt-5-codex-low"` | -| **Plugin normalizes** | Transformed | `"gpt-5-codex"` (sent to API) | -| **TUI persistence** | Config Key | `model_id = "gpt-5-codex-low"` | -| **Documentation** | `id` field | `"gpt-5-codex"` (base model) | +| **Plugin config lookup** | Config Key | `models["gpt-5.3-codex-low"]` | +| **AI SDK receives** | Config Key | `body.model = "gpt-5.3-codex-low"` | +| **Plugin normalizes** | Transformed | `"gpt-5.3-codex"` (sent to API) | +| **TUI persistence** | Config Key | `model_id = "gpt-5.3-codex-low"` | +| **Documentation** | `id` field | `"gpt-5.3-codex"` (base model) | | **Model sorting** | `id` field | Used for priority ranking | --- @@ -536,22 +568,22 @@ name field is UI sugar 🎨 **Old Plugin Logic (Broken):** ```typescript -const normalizedModel = normalizeModel(body.model); // "gpt-5-codex-low" β†’ "gpt-5-codex" -const modelConfig = getModelConfig(normalizedModel, userConfig); // Lookup "gpt-5-codex" +const normalizedModel = normalizeModel(body.model); // "gpt-5.2-codex-high" β†’ "gpt-5.2-codex" +const modelConfig = getModelConfig(normalizedModel, userConfig); // Lookup "gpt-5.2-codex" ``` **Problem:** -- Plugin received: `"gpt-5-codex-low"` (config key) -- Plugin normalized first: `"gpt-5-codex"` -- Plugin looked up config: `models["gpt-5-codex"]` ❌ NOT FOUND -- Config key was: `models["gpt-5-codex-low"]` +- Plugin received: `"gpt-5.2-codex-high"` (config key) +- Plugin normalized first: `"gpt-5.2-codex"` +- Plugin looked up config: `models["gpt-5.2-codex"]` ❌ NOT FOUND +- Config key was: `models["gpt-5.2-codex-high"]` - **Result:** Per-model options ignored! **New Plugin Logic (Fixed):** ```typescript -const originalModel = body.model; // "gpt-5-codex-low" (config key) -const normalizedModel = normalizeModel(body.model); // "gpt-5-codex" (for API) -const modelConfig = getModelConfig(originalModel, userConfig); // Lookup "gpt-5-codex-low" βœ… +const originalModel = body.model; // "gpt-5.2-codex-high" (config key) +const normalizedModel = normalizeModel(body.model); // "gpt-5.2-codex" (for API) +const modelConfig = getModelConfig(originalModel, userConfig); // Lookup "gpt-5.2-codex-high" βœ… ``` **Fix:** @@ -569,7 +601,7 @@ const modelConfig = getModelConfig(originalModel, userConfig); // Lookup "gpt-5 ```json { "my-custom-name": { - "id": "gpt-5-codex", + "id": "gpt-5.3-codex", "name": "My Custom Display Name", "options": { "reasoningEffort": "high" } } @@ -582,8 +614,8 @@ const modelConfig = getModelConfig(originalModel, userConfig); // Lookup "gpt-5 **Answer:** 1. Plugin receives: `body.model = "my-custom-name"` -2. Plugin normalizes: `"my-custom-name"` β†’ `"gpt-5-codex"` (contains "codex") -3. Plugin sends to API: `"gpt-5-codex"` βœ… +2. Plugin normalizes: `"my-custom-name"` β†’ `"my-custom-name"` (preserved) +3. Plugin sends to API: `"my-custom-name"` βœ… **The `id` field is NOT used for this!** @@ -614,20 +646,20 @@ const modelConfig = getModelConfig(originalModel, userConfig); // Lookup "gpt-5 **Config:** ```json { - "gpt-5-codex-low": { - "id": "gpt-5-codex", + "gpt-5.3-codex-low": { + "id": "gpt-5.3-codex", "options": { "reasoningEffort": "low" } } } ``` -**User selects:** `openai/gpt-5-codex-low` +**User selects:** `openai/gpt-5.3-codex-low` **Question:** How does plugin find the options? **Answer:** -1. Plugin receives: `body.model = "gpt-5-codex-low"` -2. Plugin looks up: `userConfig.models["gpt-5-codex-low"]` βœ… +1. Plugin receives: `body.model = "gpt-5.3-codex-low"` +2. Plugin looks up: `userConfig.models["gpt-5.3-codex-low"]` βœ… 3. Plugin finds: `{ reasoningEffort: "low" }` βœ… **The lookup uses config key, NOT the `id` field!** @@ -640,8 +672,8 @@ const modelConfig = getModelConfig(originalModel, userConfig); // Lookup "gpt-5 ```json { - "gpt-5-codex": { // ❌ Can't have multiple variants - "id": "gpt-5-codex" + "gpt-5.3-codex": { // ❌ Can't have multiple variants + "id": "gpt-5.3-codex" } } ``` @@ -651,26 +683,26 @@ const modelConfig = getModelConfig(originalModel, userConfig); // Lookup "gpt-5 ```json { "my-model": { - "id": "gpt-5-codex-low", // ❌ Plugin won't look up by this! + "id": "gpt-5.3-codex-low", // ❌ Plugin won't look up by this! "options": { ... } } } ``` -**Plugin looks up by:** `"my-model"` (config key), not `"gpt-5-codex-low"` (id) +**Plugin looks up by:** `"my-model"` (config key), not `"gpt-5.3-codex-low"` (id) ### ❌ Forgetting name Field ```json { - "gpt-5-codex-low": { - "id": "gpt-5-codex" + "gpt-5.3-codex-low": { + "id": "gpt-5.3-codex" // Missing: "name" field } } ``` -**Result:** TUI shows `"gpt-5-codex-low"` (works but less friendly) +**Result:** TUI shows `"gpt-5.3-codex-low"` (works but less friendly) --- diff --git a/docs/development/CONFIG_FLOW.md b/docs/development/CONFIG_FLOW.md index cc29e55..5347db0 100644 --- a/docs/development/CONFIG_FLOW.md +++ b/docs/development/CONFIG_FLOW.md @@ -2,7 +2,7 @@ This document explains how OpenCode configuration flows from user files through the plugin system to the Codex API. -> Note: Some examples use legacy model aliases for compatibility demonstrations. Runtime normalization maps legacy aliases to current canonical slugs before API submission. +> Note: Some examples use legacy model aliases for compatibility demonstrations. Runtime normalization maps known gpt-5.x variants (like `gpt-5.3-codex-low`) to base slugs (`gpt-5.3-codex`) before API submission; unknown/legacy IDs are lowercased and preserved without substring coercion. ## Table of Contents - [Config Loading Order](#config-loading-order) @@ -95,8 +95,8 @@ Plugins can inject options via the `loader()` function. "provider": { "openai": { "models": { - "gpt-5-codex-medium": { - "name": "GPT 5 Codex Medium (OAuth)", + "gpt-5.3-codex-medium": { + "name": "GPT 5.3 Codex Medium (OAuth)", "limit": { "context": 272000, "output": 128000 @@ -118,9 +118,9 @@ Plugins can inject options via the `loader()` function. ``` **What OpenCode Uses**: -- **UI Display**: "GPT 5 Codex Medium (OAuth)" βœ… -- **Persistence**: `provider_id: "openai"` + `model_id: "gpt-5-codex-medium"` βœ… -- **Plugin lookup**: `models["gpt-5-codex-medium"]` β†’ used to build Codex request βœ… +- **UI Display**: "GPT 5.3 Codex Medium (OAuth)" βœ… +- **Persistence**: `provider_id: "openai"` + `model_id: "gpt-5.3-codex-medium"` βœ… +- **Plugin lookup**: `models["gpt-5.3-codex-medium"]` β†’ used to build Codex request βœ… ### TUI Persistence @@ -129,11 +129,11 @@ The TUI stores recently used models in `~/.config/opencode/tui`: ```toml [[recently_used_models]] provider_id = "openai" -model_id = "gpt-5-codex" +model_id = "gpt-5.3-codex" last_used = 2025-10-12T10:30:00Z ``` -**Key Point**: Custom display names are **UI-only**. The underlying `id` field is what gets persisted and sent to APIs. +**Key Point**: Custom display names are **UI-only**. The underlying config key (persisted as model_id) is used for selection, while normalization determines the slug sent to APIs. **Source**: `tmp/opencode/packages/tui/internal/app/state.go:54-79` @@ -160,6 +160,16 @@ async loader(getAuth: () => Promise, provider: unknown) { } ``` +Plugin settings are loaded from `~/.config/opencode/openai-codex-auth-config.json` and include: + +- Account selection + scheduling: `accountSelectionStrategy`, `schedulingMode`, `maxCacheFirstWaitSeconds`, `switchOnFirstRateLimit` +- Rate-limit tuning: `rateLimitDedupWindowMs`, `rateLimitStateResetMs`, `defaultRetryAfterMs`, `maxBackoffMs`, `requestJitterMaxMs` +- Hard-stop safety: `hardStopMaxWaitMs`, `hardStopOnUnknownModel`, `hardStopOnAllAuthFailed`, `hardStopMaxConsecutiveFailures` +- Token refresh + logging: `tokenRefreshSkewMs`, `proactiveTokenRefresh`, `authDebug` +- Storage + UX: `perProjectAccounts`, `pidOffsetEnabled`, `quietMode` + +See `docs/configuration.md` for the full field table and environment variable mapping. + ### Config Structure ```typescript @@ -223,8 +233,8 @@ For a given model, options are merged: "textVerbosity": "medium" }, "models": { - "gpt-5-codex-high": { - "name": "GPT 5 Codex High (OAuth)", + "gpt-5.3-codex-high": { + "name": "GPT 5.3 Codex High (OAuth)", "options": { "reasoningEffort": "high", "reasoningSummary": "detailed" @@ -244,7 +254,7 @@ For a given model, options are merged: ``` **Result**: -- `gpt-5-codex-high` uses `reasoningEffort: "high"` (overridden) + `textVerbosity: "medium"` (from global) +- `gpt-5.3-codex-high` uses `reasoningEffort: "high"` (overridden) + `textVerbosity: "medium"` (from global) - `gpt-5-nano` uses `reasoningEffort: "minimal"` + `textVerbosity: "low"` (both overridden) ### Example 3: Full Configuration @@ -252,7 +262,7 @@ For a given model, options are merged: { "$schema": "https://opencode.ai/config.json", "plugin": ["opencode-openai-codex-multi-auth"], - "model": "openai/gpt-5-codex-medium", + "model": "openai/gpt-5.3-codex-medium", "provider": { "openai": { "options": { @@ -262,14 +272,14 @@ For a given model, options are merged: "include": ["reasoning.encrypted_content"] }, "models": { - "gpt-5-codex-low": { - "name": "GPT 5 Codex Low (OAuth)", + "gpt-5.3-codex-low": { + "name": "GPT 5.3 Codex Low (OAuth)", "options": { "reasoningEffort": "low" } }, - "gpt-5-codex-high": { - "name": "GPT 5 Codex High (OAuth)", + "gpt-5.3-codex-high": { + "name": "GPT 5.3 Codex High (OAuth)", "options": { "reasoningEffort": "high", "reasoningSummary": "detailed" @@ -344,15 +354,15 @@ Custom model names help you remember what each variant does: { "models": { "GPT 5 Codex - Fast & Cheap": { - "id": "gpt-5-codex", + "id": "gpt-5.3-codex", "options": { "reasoningEffort": "low" } }, "GPT 5 Codex - Balanced": { - "id": "gpt-5-codex", + "id": "gpt-5.3-codex", "options": { "reasoningEffort": "medium" } }, "GPT 5 Codex - Max Quality": { - "id": "gpt-5-codex", + "id": "gpt-5.3-codex", "options": { "reasoningEffort": "high" } } } @@ -374,14 +384,7 @@ Most common settings should be global: ``` ### 4. Prefer Config Files for Plugin Settings -Use plugin config files for persistent behavior. Legacy `codexMode` is now a no-op and does not alter prompt/tool handling. - -Example `~/.config/opencode/openai-codex-auth-config.json`: -```json -{ - "codexMode": false -} -``` +Use plugin config files for persistent behavior. --- diff --git a/docs/development/TESTING.md b/docs/development/TESTING.md index 4410bd7..a77ad68 100644 --- a/docs/development/TESTING.md +++ b/docs/development/TESTING.md @@ -2,7 +2,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibility. -> Note: Some examples intentionally use legacy aliases (`gpt-5`, `gpt-5-codex`, etc.) to verify compatibility paths. Current runtime normalizes these to current canonical slugs (for example, `gpt-5.1`, `gpt-5.1-codex`). +> Note: Some examples intentionally use legacy aliases (`gpt-5`, `gpt-5.1-codex`, etc.) to verify compatibility paths. Current runtime normalizes known legacy aliases to modern base slugs; unknown slugs are rejected during request transformation. ## Test Scenarios Matrix @@ -17,7 +17,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit **Available Models:** (from OpenCode's models.dev database) - `gpt-5` -- `gpt-5-codex` +- `gpt-5.3-codex` - `gpt-5-mini` - `gpt-5-nano` @@ -25,10 +25,10 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit | User Selects | Plugin Receives | Normalizes To | Config Lookup | API Receives | Result | |--------------|-----------------|---------------|---------------|--------------|--------| -| `openai/gpt-5` | `"gpt-5"` | `"gpt-5"` | `models["gpt-5"]` β†’ undefined | `"gpt-5"` | βœ… Uses global options | -| `openai/gpt-5-codex` | `"gpt-5-codex"` | `"gpt-5-codex"` | `models["gpt-5-codex"]` β†’ undefined | `"gpt-5-codex"` | βœ… Uses global options | -| `openai/gpt-5-mini` | `"gpt-5-mini"` | `"gpt-5"` | `models["gpt-5-mini"]` β†’ undefined | `"gpt-5"` | βœ… Uses global options | -| `openai/gpt-5-nano` | `"gpt-5-nano"` | `"gpt-5"` | `models["gpt-5-nano"]` β†’ undefined | `"gpt-5"` | βœ… Uses global options | +| `openai/gpt-5` | `"gpt-5"` | `"gpt-5.1"` | `models["gpt-5"]` β†’ undefined | `"gpt-5.1"` | βœ… Uses global options | +| `openai/gpt-5.3-codex` | `"gpt-5.3-codex"` | `"gpt-5.3-codex"` | `models["gpt-5.3-codex"]` β†’ undefined | `"gpt-5.3-codex"` | βœ… Uses global options | +| `openai/gpt-5-mini` | `"gpt-5-mini"` | `"gpt-5-mini"` | `models["gpt-5-mini"]` β†’ undefined | `"gpt-5-mini"` | βœ… Uses global options | +| `openai/gpt-5-nano` | `"gpt-5-nano"` | `"gpt-5-nano"` | `models["gpt-5-nano"]` β†’ undefined | `"gpt-5-nano"` | βœ… Uses global options | **Expected Behavior:** - βœ… All models work with global options @@ -49,11 +49,11 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit "reasoningEffort": "medium" }, "models": { - "gpt-5-codex-low": { + "gpt-5.3-codex-low": { "name": "GPT 5 Codex Low (OAuth)", "options": { "reasoningEffort": "low" } }, - "gpt-5-codex-high": { + "gpt-5.3-codex-high": { "name": "GPT 5 Codex High (OAuth)", "options": { "reasoningEffort": "high" } } @@ -67,14 +67,14 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit | User Selects | Plugin Receives | Config Lookup | Resolved Options | API Receives | Result | |--------------|-----------------|---------------|------------------|--------------|--------| -| `openai/gpt-5-codex-low` | `"gpt-5-codex-low"` | Found βœ… | `{ reasoningEffort: "low" }` | `"gpt-5-codex"` | βœ… Per-model | -| `openai/gpt-5-codex-high` | `"gpt-5-codex-high"` | Found βœ… | `{ reasoningEffort: "high" }` | `"gpt-5-codex"` | βœ… Per-model | -| `openai/gpt-5-codex` | `"gpt-5-codex"` | Not found | `{ reasoningEffort: "medium" }` | `"gpt-5-codex"` | βœ… Global | +| `openai/gpt-5.3-codex-low` | `"gpt-5.3-codex-low"` | Found βœ… | `{ reasoningEffort: "low" }` | `"gpt-5.3-codex"` | βœ… Per-model | +| `openai/gpt-5.3-codex-high` | `"gpt-5.3-codex-high"` | Found βœ… | `{ reasoningEffort: "high" }` | `"gpt-5.3-codex"` | βœ… Per-model | +| `openai/gpt-5.3-codex` | `"gpt-5.3-codex"` | Not found | `{ reasoningEffort: "medium" }` | `"gpt-5.3-codex"` | βœ… Global | **Expected Behavior:** - βœ… Custom variants use per-model options -- βœ… Default `gpt-5-codex` uses global options -- βœ… Both normalize to `"gpt-5-codex"` for API +- βœ… Default `gpt-5.3-codex` uses global options +- βœ… Known variants normalize to base slug for API; default `gpt-5.3-codex` stays `gpt-5.3-codex` --- @@ -91,7 +91,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit }, "models": { "GPT 5 Codex Low (ChatGPT Subscription)": { - "id": "gpt-5-codex", + "id": "gpt-5.3-codex", "options": { "reasoningEffort": "low" } } } @@ -104,7 +104,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit | User Selects | Plugin Receives | Config Lookup | Resolved Options | API Receives | Result | |--------------|-----------------|---------------|------------------|--------------|--------| -| `openai/GPT 5 Codex Low (ChatGPT Subscription)` | `"GPT 5 Codex Low (ChatGPT Subscription)"` | Found βœ… | `{ reasoningEffort: "low" }` | `"gpt-5-codex"` | βœ… Per-model | +| `openai/GPT 5 Codex Low (ChatGPT Subscription)` | `"GPT 5 Codex Low (ChatGPT Subscription)"` | Found βœ… | `{ reasoningEffort: "low" }` | `"gpt 5 codex low (chatgpt subscription)"` | βœ… Per-model | **Expected Behavior:** - βœ… Old config keys still work @@ -122,7 +122,7 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit "provider": { "openai": { "models": { - "gpt-5-codex-low": { + "gpt-5.3-codex-low": { "name": "GPT 5 Codex Low (OAuth)", "options": { "reasoningEffort": "low" } } @@ -133,16 +133,16 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit ``` **Available Models:** -- `gpt-5-codex-low` (custom) -- `gpt-5-codex` (default from models.dev) +- `gpt-5.3-codex-low` (custom) +- `gpt-5.3-codex` (default from models.dev) - `gpt-5` (default from models.dev) **Test Cases:** | User Selects | Config Lookup | Uses Options | Result | |--------------|---------------|--------------|--------| -| `openai/gpt-5-codex-low` | Found βœ… | Per-model | βœ… Custom config | -| `openai/gpt-5-codex` | Not found | Global | βœ… Default model | +| `openai/gpt-5.3-codex-low` | Found βœ… | Per-model | βœ… Custom config | +| `openai/gpt-5.3-codex` | Not found | Global | βœ… Default model | | `openai/gpt-5` | Not found | Global | βœ… Default model | **Expected Behavior:** @@ -171,12 +171,12 @@ Comprehensive testing matrix for all config scenarios and backwards compatibilit ``` User selects: openai/GPT-5-CODEX-HIGH Plugin receives: "GPT-5-CODEX-HIGH" -normalizeModel: "GPT-5-CODEX-HIGH" β†’ "gpt-5-codex" βœ… (includes "codex") +normalizeModel: "GPT-5-CODEX-HIGH" β†’ "gpt-5.3-codex-high" βœ… (lowercased) Config lookup: models["GPT-5-CODEX-HIGH"] β†’ Found βœ… -API receives: "gpt-5-codex" βœ… +API receives: "gpt-5.3-codex" βœ… ``` -**Result:** βœ… Works (case-insensitive includes()) +**Result:** βœ… Works (case-insensitive normalization) --- @@ -197,9 +197,9 @@ API receives: "gpt-5-codex" βœ… ``` User selects: openai/my-gpt5-codex-variant Plugin receives: "my-gpt5-codex-variant" -normalizeModel: "my-gpt5-codex-variant" β†’ "gpt-5-codex" βœ… (includes "codex") +normalizeModel: "my-gpt5-codex-variant" β†’ "my-gpt5-codex-variant" βœ… (preserved) Config lookup: models["my-gpt5-codex-variant"] β†’ Found βœ… -API receives: "gpt-5-codex" βœ… +API receives: "my-gpt5-codex-variant" βœ… ``` **Result:** βœ… Works (normalization handles it) @@ -219,9 +219,9 @@ API receives: "gpt-5-codex" βœ… ``` User selects: (none - uses OpenCode default) Plugin receives: undefined or default from OpenCode -normalizeModel: undefined β†’ "gpt-5" βœ… (fallback) +normalizeModel: undefined β†’ "gpt-5.1" βœ… (fallback) Config lookup: models[undefined] β†’ undefined -API receives: "gpt-5" βœ… +API receives: "gpt-5.1" βœ… ``` **Result:** βœ… Works (safe fallback) @@ -245,9 +245,9 @@ API receives: "gpt-5" βœ… ``` User selects: openai/my-gpt-5-variant Plugin receives: "my-gpt-5-variant" -normalizeModel: "my-gpt-5-variant" β†’ "gpt-5" βœ… (includes "gpt-5", not "codex") +normalizeModel: "my-gpt-5-variant" β†’ "my-gpt-5-variant" βœ… (preserved) Config lookup: models["my-gpt-5-variant"] β†’ Found βœ… -API receives: "gpt-5" βœ… +API receives: "my-gpt-5-variant" βœ… ``` **Result:** βœ… Works (correct model selected) @@ -303,7 +303,7 @@ Turn 4: > now delete it { "models": { "GPT 5 Codex Low (ChatGPT Subscription)": { - "id": "gpt-5-codex", + "id": "gpt-5.3-codex", "options": { "reasoningEffort": "low" } } } @@ -325,7 +325,7 @@ Turn 4: > now delete it ```json { "models": { - "gpt-5-codex-low": { + "gpt-5.3-codex-low": { "name": "GPT 5 Codex Low (OAuth)", "options": { "reasoningEffort": "low" } } @@ -334,10 +334,10 @@ Turn 4: > now delete it ``` **Expected:** -- CLI: `--model=openai/gpt-5-codex-low` βœ… +- CLI: `--model=openai/gpt-5.3-codex-low` βœ… - TUI: Shows "GPT 5 Codex Low (OAuth)" βœ… - Plugin: Finds and applies per-model options βœ… -- API: Receives `"gpt-5-codex"` βœ… +- API: Receives `"gpt-5.3-codex"` βœ… **Result:** βœ… **Optimal experience** @@ -349,12 +349,12 @@ Turn 4: > now delete it ```json { "plugin": ["opencode-openai-codex-multi-auth"], - "model": "openai/gpt-5-codex" + "model": "openai/gpt-5.3-codex" } ``` **Expected:** -- Uses default OpenCode model: `gpt-5-codex` +- Uses default OpenCode model: `gpt-5.3-codex` - Plugin applies: Global options + Codex defaults - No errors βœ… @@ -367,7 +367,7 @@ Turn 4: > now delete it ### Enable Debug Mode ```bash -DEBUG_CODEX_PLUGIN=1 opencode run "test" --model=openai/gpt-5-codex-low +DEBUG_CODEX_PLUGIN=1 opencode run "test" --model=openai/gpt-5.3-codex-low ``` ### Expected Debug Output @@ -376,7 +376,7 @@ DEBUG_CODEX_PLUGIN=1 opencode run "test" --model=openai/gpt-5-codex-low ``` [openai-codex-plugin] Debug logging ENABLED -[openai-codex-plugin] Model config lookup: "gpt-5-codex-low" β†’ normalized to "gpt-5-codex" for API { +[openai-codex-plugin] Model config lookup: "gpt-5.3-codex-low" β†’ normalized to "gpt-5.3-codex" for API { hasModelSpecificConfig: true, resolvedConfig: { reasoningEffort: 'low', @@ -395,12 +395,12 @@ DEBUG_CODEX_PLUGIN=1 opencode run "test" --model=openai/gpt-5-codex-low #### Case 2: Default Model (No Custom Config) ```bash -DEBUG_CODEX_PLUGIN=1 opencode run "test" --model=openai/gpt-5-codex +DEBUG_CODEX_PLUGIN=1 opencode run "test" --model=openai/gpt-5.3-codex ``` ``` [openai-codex-plugin] Debug logging ENABLED -[openai-codex-plugin] Model config lookup: "gpt-5-codex" β†’ normalized to "gpt-5-codex" for API { +[openai-codex-plugin] Model config lookup: "gpt-5.3-codex" β†’ normalized to "gpt-5.3-codex" for API { hasModelSpecificConfig: false, resolvedConfig: { reasoningEffort: 'medium', @@ -451,7 +451,7 @@ DEBUG_CODEX_PLUGIN=1 opencode run "test" --model=openai/gpt-5-codex cat > ~/.config/opencode/opencode.jsonc <<'EOF' { "plugin": ["opencode-openai-codex-multi-auth"], - "model": "openai/gpt-5-codex" + "model": "openai/gpt-5.3-codex" } EOF @@ -463,7 +463,7 @@ DEBUG_CODEX_PLUGIN=1 opencode run "write hello world to test.txt" - βœ… Plugin installs automatically - βœ… Auth works - βœ… Debug log shows: `hasModelSpecificConfig: false` -- βœ… Model normalizes to `"gpt-5-codex"` +- βœ… Model normalizes to `"gpt-5.3-codex"` - βœ… No errors --- @@ -478,11 +478,11 @@ cat > ~/.config/opencode/opencode.jsonc <<'EOF' "provider": { "openai": { "models": { - "gpt-5-codex-low": { + "gpt-5.3-codex-low": { "name": "GPT 5 Codex Low (OAuth)", "options": { "reasoningEffort": "low" } }, - "gpt-5-codex-high": { + "gpt-5.3-codex-high": { "name": "GPT 5 Codex High (OAuth)", "options": { "reasoningEffort": "high" } } @@ -493,8 +493,8 @@ cat > ~/.config/opencode/opencode.jsonc <<'EOF' EOF # Test per-model options -DEBUG_CODEX_PLUGIN=1 opencode run "test low" --model=openai/gpt-5-codex-low -DEBUG_CODEX_PLUGIN=1 opencode run "test high" --model=openai/gpt-5-codex-high +DEBUG_CODEX_PLUGIN=1 opencode run "test low" --model=openai/gpt-5.3-codex-low +DEBUG_CODEX_PLUGIN=1 opencode run "test high" --model=openai/gpt-5.3-codex-high ``` **Verify:** @@ -507,7 +507,7 @@ DEBUG_CODEX_PLUGIN=1 opencode run "test high" --model=openai/gpt-5-codex-high #### Step 3: Multi-Turn Test (Critical for store:false) ```bash -DEBUG_CODEX_PLUGIN=1 opencode --model=openai/gpt-5-codex-medium +DEBUG_CODEX_PLUGIN=1 opencode --model=openai/gpt-5.3-codex-medium ``` ``` @@ -533,9 +533,9 @@ DEBUG_CODEX_PLUGIN=1 opencode ``` ``` -> /model openai/gpt-5-codex-low +> /model openai/gpt-5.3-codex-low > write hello to test.txt -> /model openai/gpt-5-codex-high +> /model openai/gpt-5.3-codex-high > write goodbye to test2.txt ``` @@ -550,7 +550,7 @@ DEBUG_CODEX_PLUGIN=1 opencode ```bash # 1. Start opencode -opencode --model=openai/gpt-5-codex-high +opencode --model=openai/gpt-5.3-codex-high # 2. Run a command > write test @@ -565,7 +565,7 @@ opencode ``` **Verify:** -- βœ… Last used model is `gpt-5-codex-high` +- βœ… Last used model is `gpt-5.3-codex-high` - βœ… Model is auto-selected on restart - βœ… TUI shows correct model highlighted @@ -575,22 +575,24 @@ opencode ### Test: normalizeModel() Coverage +`normalizeModel()` lowercases unknown slugs for diagnostics only; requests still reject unknown models. + ```typescript -normalizeModel("gpt-5.2-codex") // β†’ "gpt-5.2-codex" βœ… -normalizeModel("gpt-5.2-codex-high") // β†’ "gpt-5.2-codex" βœ… -normalizeModel("gpt-5.2-xhigh") // β†’ "gpt-5.2" βœ… -normalizeModel("gpt-5.1-codex-max-xhigh")// β†’ "gpt-5.1-codex-max" βœ… -normalizeModel("gpt-5.1-codex-mini-high")// β†’ "gpt-5.1-codex-mini" βœ… -normalizeModel("codex-mini-latest") // β†’ "gpt-5.1-codex-mini" βœ… -normalizeModel("gpt-5.1-codex") // β†’ "gpt-5.1-codex" βœ… -normalizeModel("gpt-5.1") // β†’ "gpt-5.1" βœ… -normalizeModel("my-codex-model") // β†’ "gpt-5.1-codex" βœ… -normalizeModel("gpt-5") // β†’ "gpt-5.1" βœ… -normalizeModel("gpt-5-mini") // β†’ "gpt-5.1" βœ… -normalizeModel("gpt-5-nano") // β†’ "gpt-5.1" βœ… -normalizeModel("GPT 5 High") // β†’ "gpt-5.1" βœ… -normalizeModel(undefined) // β†’ "gpt-5.1" βœ… -normalizeModel("random-model") // β†’ "gpt-5.1" βœ… (fallback) +normalizeModel("gpt-5.3-codex") // β†’ "gpt-5.3-codex" βœ… +normalizeModel("gpt-5.2-codex-high") // β†’ "gpt-5.2-codex" βœ… +normalizeModel("gpt-5.2-xhigh") // β†’ "gpt-5.2" βœ… +normalizeModel("gpt-5.1-codex-max-xhigh") // β†’ "gpt-5.1-codex-max" βœ… +normalizeModel("gpt-5.1-codex-mini-high") // β†’ "gpt-5.1-codex-mini" βœ… +normalizeModel("codex-mini-latest") // β†’ "gpt-5.1-codex-mini" βœ… +normalizeModel("gpt-5.1-codex") // β†’ "gpt-5.1-codex" βœ… +normalizeModel("gpt-5.1") // β†’ "gpt-5.1" βœ… +normalizeModel("my-codex-model") // β†’ "my-codex-model" βœ… +normalizeModel("gpt-5") // β†’ "gpt-5.1" βœ… +normalizeModel("gpt-5-mini") // β†’ "gpt-5-mini" βœ… +normalizeModel("gpt-5-nano") // β†’ "gpt-5-nano" βœ… +normalizeModel("GPT 5 High") // β†’ "gpt 5 high" βœ… +normalizeModel(undefined) // β†’ "gpt-5.1" βœ… +normalizeModel("random-model") // β†’ "random-model" βœ… ``` **Implementation:** @@ -598,51 +600,19 @@ normalizeModel("random-model") // β†’ "gpt-5.1" βœ… (fallback) export function normalizeModel(model: string | undefined): string { if (!model) return "gpt-5.1"; const modelId = model.includes("/") ? model.split("/").pop()! : model; - const mappedModel = MODEL_MAP[modelId]; + const trimmed = modelId.trim(); + if (!trimmed) return "gpt-5.1"; + const mappedModel = getNormalizedModel(trimmed); if (mappedModel) return mappedModel; - - const normalized = modelId.toLowerCase(); - - if (normalized.includes("gpt-5.2-codex") || normalized.includes("gpt 5.2 codex")) { - return "gpt-5.2-codex"; - } - if (normalized.includes("gpt-5.2") || normalized.includes("gpt 5.2")) { - return "gpt-5.2"; - } - if (normalized.includes("gpt-5.1-codex-max") || normalized.includes("gpt 5.1 codex max")) { - return "gpt-5.1-codex-max"; - } - if (normalized.includes("gpt-5.1-codex-mini") || normalized.includes("gpt 5.1 codex mini")) { - return "gpt-5.1-codex-mini"; - } - if ( - normalized.includes("codex-mini-latest") || - normalized.includes("gpt-5-codex-mini") || - normalized.includes("gpt 5 codex mini") - ) { - return "codex-mini-latest"; - } - if (normalized.includes("gpt-5.1-codex") || normalized.includes("gpt 5.1 codex")) { - return "gpt-5.1-codex"; - } - if (normalized.includes("gpt-5.1") || normalized.includes("gpt 5.1")) { - return "gpt-5.1"; - } - if (normalized.includes("codex")) { - return "gpt-5.1-codex"; - } - if (normalized.includes("gpt-5") || normalized.includes("gpt 5")) { - return "gpt-5.1"; - } - return "gpt-5.1"; + return trimmed.toLowerCase(); } ``` **Why this works:** -- βœ… Case-insensitive (`.toLowerCase()` + `.includes()`) -- βœ… Pattern-based (works with any naming) -- βœ… Safe fallback (unknown models β†’ `gpt-5.1`) -- βœ… Codex priority with explicit Codex Mini support (`codex-mini*` β†’ `codex-mini-latest`) +- βœ… Explicit model map + strict dynamic regex for known slugs +- βœ… Case-insensitive normalization for unknown/legacy slugs +- βœ… No substring-based coercion (reduces false positives) +- βœ… Only missing/blank model values fall back to `gpt-5.1` --- @@ -660,7 +630,7 @@ opencode run "test" --model=openai/claude-3.5 ```bash # Without running: opencode auth login -opencode run "test" --model=openai/gpt-5-codex +opencode run "test" --model=openai/gpt-5.3-codex ``` **Expected:** ❌ 401 Unauthorized error @@ -697,36 +667,36 @@ opencode run "test" --model=openai/gpt-5-codex ```typescript describe('normalizeModel', () => { test('handles all default models', () => { - expect(normalizeModel('gpt-5')).toBe('gpt-5') - expect(normalizeModel('gpt-5-codex')).toBe('gpt-5-codex') - expect(normalizeModel('gpt-5-codex-mini')).toBe('codex-mini-latest') - expect(normalizeModel('gpt-5-mini')).toBe('gpt-5') - expect(normalizeModel('gpt-5-nano')).toBe('gpt-5') + expect(normalizeModel('gpt-5')).toBe('gpt-5.1') + expect(normalizeModel('gpt-5.3-codex')).toBe('gpt-5.3-codex') + expect(normalizeModel('gpt-5.3-codex-mini')).toBe('gpt-5.1-codex-mini') + expect(normalizeModel('gpt-5-mini')).toBe('gpt-5-mini') + expect(normalizeModel('gpt-5-nano')).toBe('gpt-5-nano') }) test('handles custom preset names', () => { - expect(normalizeModel('gpt-5-codex-low')).toBe('gpt-5-codex') - expect(normalizeModel('openai/gpt-5-codex-mini-high')).toBe('codex-mini-latest') - expect(normalizeModel('gpt-5-high')).toBe('gpt-5') + expect(normalizeModel('gpt-5.3-codex-low')).toBe('gpt-5.3-codex') + expect(normalizeModel('openai/gpt-5.3-codex-mini-high')).toBe('gpt-5.1-codex-mini') + expect(normalizeModel('gpt-5-high')).toBe('gpt-5.1') }) test('handles legacy names', () => { - expect(normalizeModel('GPT 5 Codex Low (ChatGPT Subscription)')).toBe('gpt-5-codex') + expect(normalizeModel('GPT 5 Codex Low (ChatGPT Subscription)')).toBe('gpt 5 codex low (chatgpt subscription)') }) test('handles edge cases', () => { - expect(normalizeModel(undefined)).toBe('gpt-5') + expect(normalizeModel(undefined)).toBe('gpt-5.1') expect(normalizeModel('codex-mini-latest')).toBe('codex-mini-latest') - expect(normalizeModel('random')).toBe('gpt-5') + expect(normalizeModel('random')).toBe('random') }) }) describe('getModelConfig', () => { test('returns per-model options when found', () => { - const config = getModelConfig('gpt-5-codex-low', { + const config = getModelConfig('gpt-5.3-codex-low', { global: { reasoningEffort: 'medium' }, models: { - 'gpt-5-codex-low': { + 'gpt-5.3-codex-low': { options: { reasoningEffort: 'low' } } } @@ -735,7 +705,7 @@ describe('getModelConfig', () => { }) test('returns global options when model not in config', () => { - const config = getModelConfig('gpt-5-codex', { + const config = getModelConfig('gpt-5.3-codex', { global: { reasoningEffort: 'medium' }, models: {} }) diff --git a/docs/getting-started.md b/docs/getting-started.md index cf3be88..ef76c76 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -73,22 +73,8 @@ Add this to `~/.config/opencode/opencode.jsonc` (or `.json`): "store": false }, "models": { - "gpt-5.1-codex-low": { - "name": "GPT 5.1 Codex Low (OAuth)", - "limit": { - "context": 272000, - "output": 128000 - }, - "options": { - "reasoningEffort": "low", - "reasoningSummary": "auto", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false - } - }, - "gpt-5.1-codex-medium": { - "name": "GPT 5.1 Codex Medium (OAuth)", + "gpt-5.3-codex": { + "name": "GPT 5.3 Codex (OAuth)", "limit": { "context": 272000, "output": 128000 @@ -101,64 +87,22 @@ Add this to `~/.config/opencode/opencode.jsonc` (or `.json`): "store": false } }, - "gpt-5.1-codex-high": { - "name": "GPT 5.1 Codex High (OAuth)", - "limit": { - "context": 272000, - "output": 128000 - }, - "options": { - "reasoningEffort": "high", - "reasoningSummary": "detailed", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false - } - }, - "gpt-5.1-codex-max": { - "name": "GPT 5.1 Codex Max (OAuth)", - "limit": { - "context": 272000, - "output": 128000 - }, - "options": { - "reasoningEffort": "high", - "reasoningSummary": "detailed", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false - } - }, - "gpt-5.1-codex-max-low": { - "name": "GPT 5.1 Codex Max Low (OAuth)", + "gpt-5.3-codex-low": { + "name": "GPT 5.3 Codex Low (OAuth)", "limit": { "context": 272000, "output": 128000 }, "options": { "reasoningEffort": "low", - "reasoningSummary": "detailed", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false - } - }, - "gpt-5.1-codex-max-medium": { - "name": "GPT 5.1 Codex Max Medium (OAuth)", - "limit": { - "context": 272000, - "output": 128000 - }, - "options": { - "reasoningEffort": "medium", - "reasoningSummary": "detailed", + "reasoningSummary": "auto", "textVerbosity": "medium", "include": ["reasoning.encrypted_content"], "store": false } }, - "gpt-5.1-codex-max-high": { - "name": "GPT 5.1 Codex Max High (OAuth)", + "gpt-5.3-codex-high": { + "name": "GPT 5.3 Codex High (OAuth)", "limit": { "context": 272000, "output": 128000 @@ -171,22 +115,8 @@ Add this to `~/.config/opencode/opencode.jsonc` (or `.json`): "store": false } }, - "gpt-5.1-codex-max-xhigh": { - "name": "GPT 5.1 Codex Max Extra High (OAuth)", - "limit": { - "context": 272000, - "output": 128000 - }, - "options": { - "reasoningEffort": "xhigh", - "reasoningSummary": "detailed", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false - } - }, - "gpt-5.1-codex-mini-medium": { - "name": "GPT 5.1 Codex Mini Medium (OAuth)", + "gpt-5.2-codex": { + "name": "GPT 5.2 Codex (OAuth)", "limit": { "context": 272000, "output": 128000 @@ -199,36 +129,8 @@ Add this to `~/.config/opencode/opencode.jsonc` (or `.json`): "store": false } }, - "gpt-5.1-codex-mini-high": { - "name": "GPT 5.1 Codex Mini High (OAuth)", - "limit": { - "context": 272000, - "output": 128000 - }, - "options": { - "reasoningEffort": "high", - "reasoningSummary": "detailed", - "textVerbosity": "medium", - "include": ["reasoning.encrypted_content"], - "store": false - } - }, - "gpt-5.1-low": { - "name": "GPT 5.1 Low (OAuth)", - "limit": { - "context": 272000, - "output": 128000 - }, - "options": { - "reasoningEffort": "low", - "reasoningSummary": "auto", - "textVerbosity": "low", - "include": ["reasoning.encrypted_content"], - "store": false - } - }, - "gpt-5.1-medium": { - "name": "GPT 5.1 Medium (OAuth)", + "gpt-5.2": { + "name": "GPT 5.2 (OAuth)", "limit": { "context": 272000, "output": 128000 @@ -240,20 +142,6 @@ Add this to `~/.config/opencode/opencode.jsonc` (or `.json`): "include": ["reasoning.encrypted_content"], "store": false } - }, - "gpt-5.1-high": { - "name": "GPT 5.1 High (OAuth)", - "limit": { - "context": 272000, - "output": 128000 - }, - "options": { - "reasoningEffort": "high", - "reasoningSummary": "detailed", - "textVerbosity": "high", - "include": ["reasoning.encrypted_content"], - "store": false - } } } } @@ -262,39 +150,31 @@ Add this to `~/.config/opencode/opencode.jsonc` (or `.json`): ``` **What you get:** - - βœ… GPT 5.2 (None/Low/Medium/High/xHigh reasoning) - - βœ… GPT 5.2 Codex (Low/Medium/High/xHigh reasoning) - - βœ… GPT 5.1 Codex Max (Low/Medium/High/xHigh reasoning presets) - - βœ… GPT 5.1 Codex (Low/Medium/High reasoning) - - βœ… GPT 5.1 Codex Mini (Medium/High reasoning) - - βœ… GPT 5.1 (None/Low/Medium/High reasoning) + - βœ… GPT 5.3 Codex (low/medium/high/xhigh reasoning) + - βœ… GPT 5.2 (none/low/medium/high/xhigh reasoning) + - βœ… GPT 5.2 Codex (low/medium/high/xhigh reasoning) + - βœ… GPT 5.1 Codex Max (low/medium/high/xhigh reasoning presets) + - βœ… GPT 5.1 Codex (low/medium/high reasoning) + - βœ… GPT 5.1 Codex Mini (medium/high reasoning) + - βœ… GPT 5.1 (none/low/medium/high reasoning) - βœ… 272k context + 128k output window for all GPT 5.x presets. - βœ… All visible in OpenCode model selector - βœ… Optimal settings for each reasoning level **Optional: Personality configuration** -You can set personality globally and override it per model: +Personality settings live in the plugin config file: `~/.config/opencode/openai-codex-auth-config.json` under `custom_settings`. ```json { - "provider": { - "openai": { - "options": { - "reasoningEffort": "high", - "reasoningSummary": "detailed", - "textVerbosity": "medium", - "include": [ - "reasoning.encrypted_content" - ], - "store": false, - "personality": "friendly" - }, - "models": { - "gpt-5.3-codex": { - "options": { - "personality": "pragmatic" - } + "custom_settings": { + "options": { + "personality": "friendly" + }, + "models": { + "gpt-5.3-codex": { + "options": { + "personality": "pragmatic" } } } @@ -302,20 +182,22 @@ You can set personality globally and override it per model: } ``` -Accepted values are `none`, `friendly`, and `pragmatic` (case-insensitive). +Personality descriptions are loaded from: +- Project-local `.opencode/Personalities/*.md` +- Global `~/.config/opencode/Personalities/*.md` + +The filename (case-insensitive) is the personality key; the file contents are used verbatim. -> **Note**: All `gpt-5.1-codex-mini*` presets use 272k context / 128k output limits. +Built-ins: `none`, `default` (uses model runtime defaults), `friendly`, `pragmatic` (fallback if unset). Any other key requires a matching file. + +> **Note**: All `gpt-5.*` presets use 272k context / 128k output limits. > > **Note**: Codex Max presets map to the `gpt-5.1-codex-max` slug with 272k context and 128k output. Use `gpt-5.1-codex-max-low/medium/high/xhigh` to pick the reasoning level (only `-xhigh` uses `xhigh` reasoning). > -> **Note**: GPT 5.2 and GPT 5.2 Codex support `xhigh` reasoning. Use explicit reasoning levels (e.g., `gpt-5.2-xhigh`, `gpt-5.2-codex-xhigh`) for precise control. +> **Note**: GPT-5.3-Codex, GPT-5.2, and GPT-5.2 Codex support `xhigh` reasoning. Use explicit reasoning levels (e.g., `gpt-5.3-codex-xhigh`, `gpt-5.2-xhigh`) for precise control. Prompt caching is enabled out of the box: when OpenCode sends its session identifier as `prompt_cache_key`, the plugin forwards it untouched so multi-turn runs reuse prior work. If you hit your ChatGPT subscription limits, the plugin returns a friendly Codex-style message with the 5-hour and weekly usage windows so you know when capacity resets. -### Migration Note: Legacy `codexMode` - -The old bridge-mode behavior has been removed. `codexMode` is deprecated (no-op) and no longer changes request prompt/tool behavior. The runtime now relies on Codex instructions, OpenCode harness metadata, and live tool schemas. - > **⚠️ CRITICAL:** This full configuration is REQUIRED. OpenCode's context auto-compaction and usage sidebar only work with this full configuration. GPT 5 models are temperamental and need proper setup - minimal configurations are NOT supported. #### ❌ Minimal Configuration (NOT SUPPORTED - DO NOT USE) @@ -327,7 +209,7 @@ The old bridge-mode behavior has been removed. `codexMode` is deprecated (no-op) { "$schema": "https://opencode.ai/config.json", "plugin": ["opencode-openai-codex-multi-auth"], - "model": "openai/gpt-5-codex" + "model": "openai/gpt-5.3-codex" } ``` @@ -348,7 +230,7 @@ opencode auth login If you see other OpenAI auth options, they are OpenCode's built-in methods. This plugin's flow is the one labeled **"(Codex Multi Auth)"**. 3. Browser opens automatically for OAuth flow 4. Log in with your ChatGPT account -5. Done! Token saved to `~/.config/opencode/auth/openai.json` +5. Done! Accounts saved to `~/.config/opencode/openai-codex-accounts.json` **Multi-account:** Run `opencode auth login` again to add more ChatGPT accounts (you'll be prompted to add, fresh start, or manage accounts to enable/disable). Accounts are stored in `~/.config/opencode/openai-codex-accounts.json`. See [Multi-Account](multi-account.md). @@ -360,13 +242,13 @@ opencode auth login ```bash # Quick test -opencode run "write hello world to test.txt" --model=openai/gpt-5.1-codex-medium +opencode run "write hello world to test.txt" --model=openai/gpt-5.3-codex --variant=medium # Or start interactive session opencode ``` -You'll see all 22 GPT 5.x variants (GPT 5.2, GPT 5.2 Codex, Codex Max, Codex, Codex Mini, and GPT 5.1 presets) in the model selector! +You'll see all GPT 5.x variants (GPT 5.3 Codex, GPT 5.2, GPT 5.2 Codex, Codex Max, Codex, Codex Mini, and GPT 5.1 presets) in the model selector! --- @@ -447,7 +329,7 @@ opencode --version ### Check Authentication ```bash -cat ~/.config/opencode/auth/openai.json +cat ~/.config/opencode/openai-codex-accounts.json # Should show OAuth credentials (if authenticated) ``` @@ -455,7 +337,7 @@ cat ~/.config/opencode/auth/openai.json ```bash # Enable logging to verify requests -ENABLE_PLUGIN_REQUEST_LOGGING=1 opencode run "test" --model=openai/gpt-5-codex +ENABLE_PLUGIN_REQUEST_LOGGING=1 opencode run "test" --model=openai/gpt-5.3-codex # Check logs ls ~/.config/opencode/logs/codex-plugin/ diff --git a/docs/index.md b/docs/index.md index 2071f2f..eeac880 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,6 +1,6 @@ # OpenCode OpenAI Codex Auth Plugin -> Access GPT-5 Codex through your ChatGPT Plus/Pro subscription in OpenCode +> Access GPT-5.3 Codex through your ChatGPT Plus/Pro subscription in OpenCode [![Tests](https://github.com/iam-brain/opencode-openai-codex-multi-auth/actions/workflows/ci.yml/badge.svg)](https://github.com/iam-brain/opencode-openai-codex-multi-auth/actions) @@ -77,7 +77,7 @@ npx -y opencode-openai-codex-multi-auth@latest ### Quick Test ```bash -opencode run "write hello world to test.txt" --model=openai/gpt-5.2 --variant=medium +opencode run "write hello world to test.txt" --model=openai/gpt-5.3-codex --variant=medium ``` --- @@ -85,9 +85,9 @@ opencode run "write hello world to test.txt" --model=openai/gpt-5.2 --variant=me ## Features βœ… **OAuth Authentication** - Secure ChatGPT Plus/Pro login -βœ… **GPT 5.2 + GPT 5.2 Codex + GPT 5.1 Models** - 22 pre-configured variants across GPT 5.2, GPT 5.2 Codex, GPT 5.1, Codex, Codex Max, Codex Mini +βœ… **GPT 5.3 Codex + GPT 5.2 + GPT 5.2 Codex + GPT 5.1 Models** - Pre-configured variants across GPT 5.3 Codex, GPT 5.2, GPT 5.2 Codex, GPT 5.1, Codex, Codex Max, Codex Mini βœ… **Variant system support** - Works with OpenCode v1.0.210+ model variants and legacy presets -βœ… **Per-Model Configuration** - Different reasoning effort, including `xhigh` for GPT 5.2, GPT 5.2 Codex, and Codex Max +βœ… **Per-Model Configuration** - Different reasoning effort, including `xhigh` for GPT 5.3 Codex, GPT 5.2, GPT 5.2 Codex, and Codex Max βœ… **Multi-Turn Conversations** - Full conversation history with stateless backend βœ… **Multi-Account Support** - Sticky-by-default rotation + PID offset for parallel agents βœ… **Verified Configuration** - Use `config/opencode-modern.json` (v1.0.210+) or `config/opencode-legacy.json` (older) diff --git a/docs/multi-account.md b/docs/multi-account.md index f6a96d1..535511b 100644 --- a/docs/multi-account.md +++ b/docs/multi-account.md @@ -66,6 +66,9 @@ The plugin exposes a few OpenCode tools to inspect or switch accounts: - `codex-status` - list accounts and status - `codex-switch-accounts` - switch active account by index (1-based) - `codex-toggle-account` - enable/disable account by index (1-based) +- `codex-remove-account` - remove account by index (1-based) + +The remove tool requires `confirm: true` when called directly; the TUI slash command template includes confirmation automatically. These are primarily useful in the OpenCode TUI. To enable or disable accounts, re-run `opencode auth login` and choose **manage**. @@ -95,6 +98,7 @@ Example accounts file: ], "activeIndex": 0, "activeIndexByFamily": { + "gpt-5.3-codex": 0, "codex": 0, "gpt-5.2-codex": 0, "codex-max": 0, @@ -110,6 +114,8 @@ It's not related to the npm package version; it exists so the file format can ev Accounts are matched by `accountId` + `email` + `plan` (strict identity). This allows multiple emails per account and multiple accounts per email without collisions. +Legacy records that lack full identity are preserved but skipped for selection until hydration. Upgrades from v2-style storage may trigger repair/quarantine warnings; re-authenticate if needed. + ### Fields | Field | Description | @@ -163,6 +169,14 @@ Configure in `~/.config/opencode/openai-codex-auth-config.json`: | `round-robin` | Rotate to next account on every request | Maximum throughput | | `hybrid` | Deterministic selection using health score + token bucket + LRU bias | Best overall distribution | +### Hybrid Strategy Details + +Hybrid selection prefers healthy accounts with available tokens, then falls back to LRU: + +- **Health score defaults:** start 70, success +1, rate limit -10, failure -20, min usable 50, max 100, recovery +2/hour. +- **Token bucket defaults:** max 50 tokens, initial 50, regen 6/minute, stale after 1 hour. +- **Tie-breaker:** most recently used order (LRU bias). + ### Set Strategy via Environment Variable ```bash diff --git a/docs/privacy.md b/docs/privacy.md index 1907da8..8ba1de2 100644 --- a/docs/privacy.md +++ b/docs/privacy.md @@ -1,5 +1,7 @@ # Privacy & Data Handling +**Last Updated:** Feb 2026 + This page explains how the OpenCode OpenAI Codex Auth Plugin handles your data and protects your privacy. ## Overview @@ -24,19 +26,24 @@ This plugin prioritizes user privacy and data security. We believe in transparen All data is stored **locally on your machine**: ### OAuth Tokens -- **Location:** `~/.config/opencode/auth/openai.json` -- **Contents:** Access tokens, refresh tokens, expiration timestamps -- **Managed by:** OpenCode's credential management system +- **Location:** `~/.config/opencode/openai-codex-accounts.json` (plus any project-local storage seeded by OpenCode) +- **Contents:** Refresh tokens, access tokens, expiration timestamps, account identity metadata +- **Managed by:** This plugin's account storage (with file locking + atomic writes) - **Security:** File permissions restrict access to your user account ### Cache Files - **Location:** `~/.config/opencode/cache/` - **Contents:** - - `gpt-5.1-instructions.md`, `gpt-5.2-instructions.md`, `gpt-5.2-codex-instructions.md`, etc. (Codex system instructions) + - `gpt-5.1-codex-instructions.md`, `gpt-5.3-codex-instructions.md`, `gpt-5.3-codex-instructions-v2.md`, etc. (Codex system instructions) - `*-instructions-meta.json` (ETag/tag/timestamp metadata per family) - - `codex-models-cache.json` (runtime `/codex/models` fallback cache) + - `codex-models-cache-.json` (per-account hashed `/codex/models` cache) - **Purpose:** Reduce GitHub API calls, preserve offline fallbacks, and improve startup/runtime performance +### Personality Cache Files +- **Location:** `~/.config/opencode/Personalities/` +- **Contents:** `Friendly.md`, `Pragmatic.md` (server-derived personality fallbacks) +- **Purpose:** Durable fallback when runtime defaults cannot be fetched; user-managed files are not overwritten + ### Debug Logs - **Location:** `~/.config/opencode/logs/codex-plugin/` - **Contents:** Request/response logs (only when `ENABLE_PLUGIN_REQUEST_LOGGING=1` is set) @@ -103,7 +110,7 @@ You have complete control over your data: ```bash opencode auth logout # Or manually: -rm ~/.config/opencode/auth/openai.json +rm ~/.config/opencode/openai-codex-accounts.json ``` ### Delete Cache Files diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 65ee2f3..5c69ed7 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -26,14 +26,14 @@ opencode auth login **2. Check auth file exists:** ```bash -cat ~/.config/opencode/auth/openai.json -# Should show OAuth credentials +cat ~/.config/opencode/openai-codex-accounts.json +# Should show stored accounts with OAuth credentials ``` **3. Check token expiration:** ```bash # Token has "expires" timestamp -cat ~/.config/opencode/auth/openai.json | jq '.expires' +cat ~/.config/opencode/openai-codex-accounts.json | jq '.accounts[]?.expires' # Compare to current time date +%s000 # Current timestamp in milliseconds @@ -162,6 +162,30 @@ opencode auth login **4. For parallel agents, keep PID offset enabled:** - `pidOffsetEnabled: true` helps parallel OpenCode sessions start on different accounts +### Hard-stop: all accounts unavailable + +**Symptoms:** +- Requests fail with HTTP 429 and error type `all_accounts_rate_limited` + +**What this means:** +- All accounts are rate-limited beyond the hard-stop wait threshold. + +**Solutions:** +- Increase `hardStopMaxWaitMs` in `~/.config/opencode/openai-codex-auth-config.json` +- Set `hardStopMaxWaitMs: 0` to disable the hard-stop and allow longer waits +- Add another account (`opencode auth login`) + +### Hard-stop: all accounts auth-failed + +**Symptoms:** +- Requests fail with HTTP 401 and error type `all_accounts_auth_failed` + +**What this means:** +- All accounts are in auth-failure cooldown. + +**Solutions:** +- Re-authenticate: `opencode auth login` + ### Reset Accounts If tokens were revoked or you want to start over: @@ -184,7 +208,7 @@ See [Multi-Account](multi-account.md) for details. ### "Model not found" -**Error**: `Model 'openai/gpt-5-codex-low' not found` +**Error**: `Model 'openai/gpt-5.3-codex-low' not found` **Cause 1: Config key mismatch** @@ -192,28 +216,60 @@ See [Multi-Account](multi-account.md) for details. ```json { "models": { - "gpt-5-codex-low": { ... } // ← This is the key + "gpt-5.3-codex-low": { ... } // ← This is the key } } ``` -**CLI must match exactly:** +**CLI Usage (Modern):** ```bash -opencode run "test" --model=openai/gpt-5-codex-low # Must match config key +opencode run "test" --model=openai/gpt-5.3-codex --variant=low +``` + +**CLI Usage (Legacy Suffix):** +```bash +opencode run "test" --model=openai/gpt-5.3-codex-low # Must match config key ``` **Cause 2: Missing provider prefix** **❌ Wrong:** ```yaml -model: gpt-5-codex-low +model: gpt-5.3-codex-low ``` **βœ… Correct:** ```yaml -model: openai/gpt-5-codex-low +model: openai/gpt-5.3-codex +variant: low ``` +### Hard-stop: unsupported model + +**Symptoms:** +- Requests fail with HTTP 400 and error type `unsupported_model` + +**What this means:** +- The requested model is not in the server catalog. Custom model IDs are rejected. + +**Solutions:** +- Use a model ID that appears in `/codex/models` +- Update your config to match the catalog model IDs (see `config/opencode-modern.json`) + +### Hard-stop: model catalog unavailable + +**Symptoms:** +- Requests fail with HTTP 400 and error type `unsupported_model` +- Error message mentions the model catalog being unavailable + +**What this means:** +- The plugin cannot access `/codex/models` and has no cached catalog. + +**Solutions:** +- Run once with network access to seed the catalog cache +- Retry after the catalog cache is available +- Check for `codex-models-cache-.json` under `~/.config/opencode/cache/` (per-account hashed) + ### Per-Model Options Not Applied **Symptom**: All models behave the same despite different `reasoningEffort` @@ -248,7 +304,7 @@ AI_APICallError: Item with id 'msg_abc123' not found. Items are not persisted when `store` is set to false. ``` -**Cause**: Old plugin version (fixed in v2.1.2+) +**Cause**: Older plugin version **Solution:** ```bash @@ -307,7 +363,7 @@ cat ~/.config/opencode/logs/codex-plugin/request-*-error-response.json ``` **Common causes:** -1. Invalid options for model (e.g., `minimal` for gpt-5-codex) +1. Invalid options for model (e.g., `minimal` for gpt-5.3-codex) 2. Malformed request body 3. Unsupported parameter @@ -315,7 +371,7 @@ cat ~/.config/opencode/logs/codex-plugin/request-*-error-response.json **Error:** ``` -Rate limit reached for gpt-5-codex +Rate limit reached for gpt-5.3-codex ``` **Solutions:** @@ -326,10 +382,10 @@ Check headers in response logs: cat ~/.config/opencode/logs/codex-plugin/request-*-response.json | jq '.headers["x-codex-primary-reset-after-seconds"]' ``` -**2. Switch to different model:** +**2. Use a specific model variant:** ```bash -# If codex is rate limited, try gpt-5 -opencode run "task" --model=openai/gpt-5 +# Explicitly use variant via flag +opencode run "task" --model=openai/gpt-5.3-codex --variant=high ``` ### "Context Window Exceeded" @@ -351,7 +407,7 @@ Your input exceeds the context window **2. Use compact mode** (if OpenCode supports it) **3. Switch to model with larger context:** -- gpt-5.1-codex / gpt-5.2-codex presets have larger context windows than lightweight presets +- gpt-5.3-codex / gpt-5.2-codex / gpt-5.1-codex presets have larger context windows than lightweight presets --- @@ -377,10 +433,10 @@ Using cached instructions ls -lt ~/.config/opencode/cache/*-instructions-meta.json # Check lastChecked timestamp (example family) -cat ~/.config/opencode/cache/gpt-5.1-instructions-meta.json | jq '.lastChecked' +cat ~/.config/opencode/cache/gpt-5.3-codex-instructions-meta.json | jq '.lastChecked' # Check runtime model metadata fallback cache -ls -lt ~/.config/opencode/cache/codex-models-cache.json +ls -lt ~/.config/opencode/cache/codex-models-cache-*.json ``` **Manual workaround** (if on old version): @@ -411,7 +467,7 @@ DEBUG_CODEX_PLUGIN=1 ENABLE_PLUGIN_REQUEST_LOGGING=1 opencode run "test" ```bash # Run command with logging -ENABLE_PLUGIN_REQUEST_LOGGING=1 opencode run "test" --model=openai/gpt-5-codex-low +ENABLE_PLUGIN_REQUEST_LOGGING=1 opencode run "test" --model=openai/gpt-5.3-codex --variant=low # Check what was sent to API cat ~/.config/opencode/logs/codex-plugin/request-*-after-transform.json | jq '{ diff --git a/index.ts b/index.ts index bd1cc82..c3708cd 100644 --- a/index.ts +++ b/index.ts @@ -78,16 +78,90 @@ import { import { formatToastMessage } from "./lib/formatting.js"; import { logCritical } from "./lib/logger.js"; import { FetchOrchestrator } from "./lib/fetch-orchestrator.js"; +import { warmCodexInstructions } from "./lib/prompts/codex.js"; +import { getCachedVariantEfforts, warmCodexModelCatalog } from "./lib/prompts/codex-models.js"; +import { buildInternalModelDefaults, mergeModelDefaults } from "./lib/catalog-defaults.js"; -const LEGACY_ALLOWED_METADATA_MODELS = new Set([ +/** + * Fallback model slugs when server is unavailable. + * The server response is the source of truth; this is only used as a fallback. + */ +const FALLBACK_MODEL_SLUGS = new Set([ + "gpt-5.3-codex", + "gpt-5.2-codex", "gpt-5.2", "gpt-5.1", - "gpt-5-codex", - "codex-mini-latest", + "gpt-5.1-codex", + "gpt-5.1-codex-max", + "gpt-5.1-codex-mini", ]); +function parseGptVersion(slug: string): { major: number; minor: number } | null { + const match = slug.toLowerCase().match(/^gpt-(\d+)\.(\d+)/); + if (!match) return null; + const major = Number(match[1]); + const minor = Number(match[2]); + if (!Number.isFinite(major) || !Number.isFinite(minor)) return null; + return { major, minor }; +} + +function pickLowestAvailable(available: Set, pattern: RegExp): string | null { + let best: { slug: string; major: number; minor: number } | null = null; + for (const slug of available) { + if (!pattern.test(slug)) continue; + const version = parseGptVersion(slug); + if (!version) continue; + if (!best) { + best = { slug, ...version }; + continue; + } + if (version.major < best.major) { + best = { slug, ...version }; + continue; + } + if (version.major === best.major && version.minor < best.minor) { + best = { slug, ...version }; + } + } + return best?.slug ?? null; +} + +/** + * Legacy model slug mappings for automatic upgrade. + * Maps obsolete identifiers to their modern equivalents. + * The target slugs are dynamically resolved from available models. + */ +const LEGACY_MODEL_PATTERNS: Array<{ + pattern: RegExp; + upgrade: (available: Set) => string | null; +}> = [ + // gpt-5 β†’ lowest available gpt-5.X (dynamic) + { + pattern: /^gpt-5$/, + upgrade: (available) => + pickLowestAvailable(available, /^gpt-5\.\d+$/i) ?? "gpt-5.1", + }, + // gpt-5-codex β†’ lowest available gpt-5.X-codex (dynamic) + { + pattern: /^gpt-5-codex$/, + upgrade: (available) => + pickLowestAvailable(available, /^gpt-5\.\d+-codex$/i) ?? "gpt-5.1-codex", + }, + // codex-mini-latest β†’ lowest available gpt-5.X-codex-mini (dynamic) + { + pattern: /^codex-mini-latest$/, + upgrade: (available) => + pickLowestAvailable(available, /^gpt-5\.\d+-codex-mini$/i) ?? + "gpt-5.1-codex-mini", + }, +]; + +/** + * Matches official Codex model slugs with optional effort suffix. + * Format: gpt-X.Y[-codex[-max|-mini|-pro]][-effort] + */ const CODEX_METADATA_REGEX = - /^(gpt-\d+(?:\.\d+)*-codex(?:-(?:max|mini))?)(?:-(none|minimal|low|medium|high|xhigh))?$/i; + /^(gpt-\d+\.\d+(?:-codex)?(?:-(?:max|mini|pro))?)(?:-(none|minimal|low|medium|high|xhigh))?$/i; const CODEX_STANDARD_VARIANTS = ["low", "medium", "high"] as const; const CODEX_XHIGH_VARIANTS = ["low", "medium", "high", "xhigh"] as const; @@ -136,10 +210,68 @@ function codexVariantSet(baseId: string): readonly string[] { return CODEX_STANDARD_VARIANTS; } -function isAllowedMetadataModel(modelId: string): boolean { - const normalized = modelId.toLowerCase(); - if (LEGACY_ALLOWED_METADATA_MODELS.has(normalized)) return true; - return parseCodexMetadataModel(normalized) !== undefined; +/** + * Get available model slugs from the cached server response. + * Falls back to FALLBACK_MODEL_SLUGS when cache is empty. + */ +function getAvailableModelSlugs(accountId?: string): Set { + const cached = getCachedVariantEfforts(accountId); + if (cached.size > 0) { + return new Set(cached.keys()); + } + return new Set(FALLBACK_MODEL_SLUGS); +} + +/** + * Upgrade legacy model slugs to their modern equivalents. + * Uses dynamic resolution based on available models from server. + */ +function upgradeLegacyModelSlug(modelId: string, accountId?: string): string { + const normalized = modelId.toLowerCase().trim(); + const available = getAvailableModelSlugs(accountId); + + for (const { pattern, upgrade } of LEGACY_MODEL_PATTERNS) { + if (pattern.test(normalized)) { + const upgraded = upgrade(available); + if (upgraded) return upgraded; + } + } + + return normalized; +} + +/** + * Check if a model ID is an officially supported Codex model slug. + * + * Allowed: + * - Models from server catalog (getCachedVariantEfforts) + * - Fallback slugs when server unavailable + * - Models matching official pattern: gpt-X.Y[-codex[-max|-mini|-pro]] + * + * NOT allowed: + * - Legacy slugs with effort suffixes like "gpt-5.2-high" (use variants instead) + * - Old slugs like "gpt-5" or "gpt-5-codex" (auto-upgraded internally) + */ +function isAllowedMetadataModel(modelId: string, accountId?: string): boolean { + const normalized = modelId.toLowerCase().trim(); + const available = getAvailableModelSlugs(accountId); + + // Check if it's directly available from server/fallback + if (available.has(normalized)) return true; + + // Check if it's a legacy slug that can be upgraded + const upgraded = upgradeLegacyModelSlug(normalized, accountId); + if (upgraded !== normalized && available.has(upgraded)) return true; + + // Check if it matches the official format pattern (for new models from server) + const parsed = parseCodexMetadataModel(normalized); + if (!parsed) return false; + + // Base ID must be available or match the official gpt-X.Y pattern + // This allows new models like gpt-5.2-pro from server + if (available.has(parsed.baseId)) return true; + + return /^gpt-\d+\.\d+(?:-codex)?(?:-(?:max|mini|pro))?$/.test(parsed.baseId); } function cloneModelMetadata( @@ -184,7 +316,12 @@ function looksLikeModelMetadataRegistry(models: Record): boolea function normalizeProviderModelMetadata( models: Record, - options?: { force?: boolean }, + options?: { + force?: boolean; + variantEfforts?: Map; + legacyEffortBases?: Set; + accountId?: string; + }, ): void { if (!options?.force && !looksLikeModelMetadataRegistry(models)) return; @@ -197,6 +334,7 @@ function normalizeProviderModelMetadata( variantTemplates: Map>; } >(); + const baseEntryPresent = new Set(); for (const [modelId, metadata] of Object.entries(models)) { const parsed = parseCodexMetadataModel(modelId); @@ -208,6 +346,7 @@ function normalizeProviderModelMetadata( }; if (modelId.toLowerCase() === parsed.baseId) { entry.baseTemplate = metadata; + baseEntryPresent.add(parsed.baseId); } if (!entry.fallbackTemplate) { entry.fallbackTemplate = metadata; @@ -247,10 +386,21 @@ function normalizeProviderModelMetadata( ? (baseModel.variants as Record) : {}; - const efforts = new Set([ - ...codexVariantSet(baseId), - ...entry.seenEfforts, - ]); + const cachedEfforts = options?.variantEfforts?.get(baseId); + const efforts = cachedEfforts?.length + ? new Set(cachedEfforts) + : new Set([...codexVariantSet(baseId), ...entry.seenEfforts]); + + if (cachedEfforts?.length) { + const allowed = new Set( + cachedEfforts.map((effort) => effort.toLowerCase()), + ); + for (const key of Object.keys(variants)) { + if (!allowed.has(key.toLowerCase())) { + delete variants[key]; + } + } + } for (const effort of efforts) { const existingVariant = isObjectRecord(variants[effort]) @@ -269,11 +419,34 @@ function normalizeProviderModelMetadata( for (const modelId of Object.keys(models)) { const parsed = parseCodexMetadataModel(modelId); if (parsed?.effort) { + if (options?.legacyEffortBases?.has(parsed.baseId)) { + continue; + } + if (!baseEntryPresent.has(parsed.baseId)) { + continue; + } delete models[modelId]; continue; } - if (!isAllowedMetadataModel(modelId)) delete models[modelId]; + if (!isAllowedMetadataModel(modelId, options?.accountId)) delete models[modelId]; + } +} + +function collectLegacyEffortBases(models?: Record): Set { + const bases = new Set(); + if (!models) return bases; + const baseEntries = new Set(); + for (const modelId of Object.keys(models)) { + const parsed = parseCodexMetadataModel(modelId); + if (!parsed) continue; + if (!parsed.effort) baseEntries.add(parsed.baseId); + } + for (const modelId of Object.keys(models)) { + const parsed = parseCodexMetadataModel(modelId); + if (!parsed?.effort) continue; + if (!baseEntries.has(parsed.baseId)) bases.add(parsed.baseId); } + return bases; } @@ -283,6 +456,8 @@ export const OpenAIAuthPlugin: Plugin = async ({ client }: PluginInput) => { let cachedFetchOrchestrator: FetchOrchestrator | null = null; configureStorageForPluginConfig(loadPluginConfig(), process.cwd()); + void warmCodexInstructions(); + void warmCodexModelCatalog(); const showToast = async ( message: string, @@ -584,10 +759,6 @@ export const OpenAIAuthPlugin: Plugin = async ({ client }: PluginInput) => { const auth = await getAuth(); if (!isOAuthAuth(auth)) return {}; const providerConfig = provider as { options?: Record; models?: UserConfig["models"] } | undefined; - if (providerConfig?.models && isObjectRecord(providerConfig.models)) { - normalizeProviderModelMetadata(providerConfig.models, { force: true }); - } - const pluginConfig = loadPluginConfig(); configureStorageForPluginConfig(pluginConfig, process.cwd()); const quietMode = getQuietMode(pluginConfig); @@ -601,6 +772,20 @@ export const OpenAIAuthPlugin: Plugin = async ({ client }: PluginInput) => { return {}; } + if (providerConfig?.models && isObjectRecord(providerConfig.models)) { + const legacyEffortBases = collectLegacyEffortBases(providerConfig.models); + const activeAccount = accountManager.getCurrentAccountForFamily( + DEFAULT_MODEL_FAMILY, + ); + const variantEfforts = getCachedVariantEfforts(activeAccount?.accountId); + normalizeProviderModelMetadata(providerConfig.models, { + force: true, + variantEfforts, + legacyEffortBases, + accountId: activeAccount?.accountId, + }); + } + const userConfig: UserConfig = { global: providerConfig?.options || {}, models: providerConfig?.models || {} }; const pidOffsetEnabled = getPidOffsetEnabled(pluginConfig); @@ -685,9 +870,21 @@ async fetch(input: Request | string | URL, init?: RequestInit): Promise { - const openAIModels = (cfg as { provider?: { openai?: { models?: unknown } } })?.provider?.openai?.models; - if (openAIModels && isObjectRecord(openAIModels)) { - normalizeProviderModelMetadata(openAIModels, { force: true }); + cfg.provider = cfg.provider || {}; + cfg.provider.openai = cfg.provider.openai || {}; + const openAIConfig = cfg.provider.openai as { models?: unknown }; + const legacyEffortBases = collectLegacyEffortBases( + isObjectRecord(openAIConfig.models) ? openAIConfig.models : undefined, + ); + const internalDefaults = buildInternalModelDefaults(); + openAIConfig.models = mergeModelDefaults(openAIConfig.models, internalDefaults); + if (isObjectRecord(openAIConfig.models)) { + const variantEfforts = getCachedVariantEfforts(); + normalizeProviderModelMetadata(openAIConfig.models, { + force: true, + variantEfforts, + legacyEffortBases, + }); } cfg.command = cfg.command || {}; diff --git a/lib/accounts.ts b/lib/accounts.ts index 2e4a294..3a6248f 100644 --- a/lib/accounts.ts +++ b/lib/accounts.ts @@ -297,6 +297,7 @@ export class AccountManager { private accounts: ManagedAccount[] = []; private cursor = 0; private currentAccountIndexByFamily: Record = { + "gpt-5.3-codex": -1, "gpt-5.2-codex": -1, "codex-max": -1, codex: -1, @@ -304,6 +305,7 @@ export class AccountManager { "gpt-5.1": -1, }; private sessionOffsetApplied: Record = { + "gpt-5.3-codex": false, "gpt-5.2-codex": false, "codex-max": false, codex: false, @@ -678,6 +680,18 @@ export class AccountManager { return true; } + allAccountsCoolingDown(reason?: CooldownReason): boolean { + const eligible = this.accounts.filter( + (account) => hasCompleteIdentity(account) && isAccountEnabled(account), + ); + if (eligible.length === 0) return false; + for (const account of eligible) { + if (!this.isAccountCoolingDown(account)) return false; + if (reason && account.cooldownReason !== reason) return false; + } + return true; + } + shouldShowAccountToast(accountIndex: number, debounceMs = 30_000): boolean { const now = nowMs(); if (accountIndex === this.lastToastAccountIndex && now - this.lastToastTime < debounceMs) { diff --git a/lib/catalog-defaults.ts b/lib/catalog-defaults.ts new file mode 100644 index 0000000..c44cad5 --- /dev/null +++ b/lib/catalog-defaults.ts @@ -0,0 +1,304 @@ +import { existsSync, readFileSync } from "node:fs"; +import { dirname, join } from "node:path"; +import { fileURLToPath } from "node:url"; +import { getOpencodeCacheDir } from "./paths.js"; +import { logWarn } from "./logger.js"; + +type ModelConfig = Record & { + name?: string; + limit?: { context?: number; output?: number }; + modalities?: { input?: string[]; output?: string[] }; + description?: string; + visibility?: string; + priority?: number; + supportedInApi?: boolean; + minimalClientVersion?: string; +}; + +type ModelsCache = { + models?: Array; +}; + +type CatalogModel = { + slug?: string; + display_name?: string; + description?: string; + visibility?: string; + priority?: number; + supported_in_api?: boolean; + minimal_client_version?: string; + context_window?: number; + truncation_policy?: { mode?: string; limit?: number }; + input_modalities?: string[]; + output_modalities?: string[]; +}; + +const EFFORT_SUFFIX_REGEX = /-(none|minimal|low|medium|high|xhigh)$/i; +const STATIC_TEMPLATE_FILES = ["opencode-modern.json", "opencode-legacy.json"]; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +function isObjectRecord(value: unknown): value is Record { + return typeof value === "object" && value !== null && !Array.isArray(value); +} + +function normalizeBaseId(modelId: string): string { + return modelId.toLowerCase().trim().replace(EFFORT_SUFFIX_REGEX, ""); +} + +function resolveStaticTemplateFiles(moduleDir: string = __dirname): string[] { + const candidateDirs = [ + join(moduleDir, "..", "config"), + join(moduleDir, "..", "..", "config"), + join(moduleDir, "..", "..", "..", "config"), + ]; + const files: string[] = []; + const seen = new Set(); + + for (const configDir of candidateDirs) { + for (const fileName of STATIC_TEMPLATE_FILES) { + const filePath = join(configDir, fileName); + if (seen.has(filePath)) continue; + seen.add(filePath); + files.push(filePath); + } + } + + return files; +} + +function readStaticTemplateModels(moduleDir: string = __dirname): Map { + const models = new Map(); + const templateFiles = resolveStaticTemplateFiles(moduleDir); + + for (const filePath of templateFiles) { + try { + if (!existsSync(filePath)) continue; + const parsed = JSON.parse(readFileSync(filePath, "utf8")) as { + provider?: { openai?: { models?: Record } }; + }; + const templateModels = parsed.provider?.openai?.models ?? {}; + for (const [modelId, config] of Object.entries(templateModels)) { + if (!isObjectRecord(config)) continue; + const baseId = normalizeBaseId(modelId); + if (models.has(baseId)) continue; + models.set(baseId, JSON.parse(JSON.stringify(config)) as ModelConfig); + } + } catch (error) { + logWarn(`Failed to parse static template file: ${filePath}`, error); + } + } + + return models; +} + +function readCachedCatalogSlugs(cacheFile: string): string[] { + try { + if (!existsSync(cacheFile)) return []; + const parsed = JSON.parse(readFileSync(cacheFile, "utf8")) as ModelsCache; + const slugs = parsed.models?.map((model) => model.slug).filter(Boolean) ?? []; + return Array.from( + new Set(slugs.map((slug) => normalizeBaseId(slug as string))), + ); + } catch (error) { + logWarn("Failed to read codex model cache", error); + return []; + } +} + +function readCachedCatalogModels(cacheFile: string): CatalogModel[] { + try { + if (!existsSync(cacheFile)) return []; + const parsed = JSON.parse(readFileSync(cacheFile, "utf8")) as ModelsCache; + return parsed.models?.filter((model) => model?.slug) ?? []; + } catch (error) { + logWarn("Failed to read codex model cache", error); + return []; + } +} + +/** + * Find the appropriate template ID for a model slug. + * + * Rules: + * - Codex models (contain "-codex") β†’ fall back to codex templates + * - Non-codex GPT models β†’ fall back to non-codex templates + * - Never mix: don't apply codex defaults to non-codex models + */ +function pickTemplateId(baseId: string, defaults: Map): string | null { + // Direct match first + if (defaults.has(baseId)) return baseId; + + const isCodexModel = baseId.includes("-codex"); + + if (isCodexModel) { + // Codex model fallbacks (most specific to least specific) + if (baseId.includes("-codex-max") && defaults.has("gpt-5.1-codex-max")) { + return "gpt-5.1-codex-max"; + } + if (baseId.includes("-codex-mini") && defaults.has("gpt-5.1-codex-mini")) { + return "gpt-5.1-codex-mini"; + } + // Generic codex fallback - newest available + if (defaults.has("gpt-5.3-codex")) return "gpt-5.3-codex"; + if (defaults.has("gpt-5.2-codex")) return "gpt-5.2-codex"; + if (defaults.has("gpt-5.1-codex")) return "gpt-5.1-codex"; + } else if (baseId.startsWith("gpt-5.")) { + // Non-codex GPT model fallbacks (e.g., gpt-5.2-pro, gpt-5.3) + if (defaults.has("gpt-5.2")) return "gpt-5.2"; + if (defaults.has("gpt-5.1")) return "gpt-5.1"; + } + + return null; +} + +function formatModelDisplayName(baseId: string): string { + const parts = baseId.split("-").filter(Boolean); + if (parts.length === 0) return `${baseId} (OAuth)`; + let label = ""; + if (parts[0] === "gpt" && parts[1]) { + label = `GPT ${parts[1]}`; + for (const part of parts.slice(2)) { + label += ` ${part.charAt(0).toUpperCase()}${part.slice(1)}`; + } + } else { + label = parts + .map((part) => `${part.charAt(0).toUpperCase()}${part.slice(1)}`) + .join(" "); + } + return `${label} (OAuth)`; +} + +function applyCatalogMetadata( + config: ModelConfig, + model: CatalogModel, +): ModelConfig { + const next: ModelConfig = { ...config }; + if (typeof model.display_name === "string" && model.display_name.trim()) { + next.name = model.display_name.trim(); + } + if (Number.isFinite(model.context_window)) { + next.limit = { + ...next.limit, + context: model.context_window, + }; + } + const truncationLimit = model.truncation_policy?.limit; + const truncationMode = model.truncation_policy?.mode; + if (Number.isFinite(truncationLimit) && truncationMode === "tokens") { + next.limit = { + ...next.limit, + output: truncationLimit, + }; + } + if (Array.isArray(model.input_modalities) && model.input_modalities.length > 0) { + next.modalities = { + ...next.modalities, + input: [...model.input_modalities], + }; + } + if (Array.isArray(model.output_modalities) && model.output_modalities.length > 0) { + next.modalities = { + ...next.modalities, + output: [...model.output_modalities], + }; + } + if (typeof model.description === "string" && model.description.trim()) { + next.description = model.description.trim(); + } + if (typeof model.visibility === "string" && model.visibility.trim()) { + next.visibility = model.visibility.trim(); + } + if (typeof model.priority === "number" && Number.isFinite(model.priority)) { + next.priority = model.priority; + } + if (typeof model.supported_in_api === "boolean") { + next.supportedInApi = model.supported_in_api; + } + if ( + typeof model.minimal_client_version === "string" && + model.minimal_client_version.trim() + ) { + next.minimalClientVersion = model.minimal_client_version.trim(); + } + return next; +} + +export function buildInternalModelDefaults(options?: { + cacheFile?: string; + moduleDir?: string; +}): Record { + const moduleDir = options?.moduleDir ?? __dirname; + const defaults = readStaticTemplateModels(moduleDir); + const cacheFile = options?.cacheFile ?? join(getOpencodeCacheDir(), "codex-models-cache.json"); + const catalogModels = readCachedCatalogModels(cacheFile); + const catalogSlugs = readCachedCatalogSlugs(cacheFile); + + for (const slug of catalogSlugs) { + if (!defaults.has(slug)) { + const templateId = pickTemplateId(slug, defaults); + if (!templateId) continue; + const template = defaults.get(templateId); + if (!template) continue; + const cloned = JSON.parse(JSON.stringify(template)) as ModelConfig; + cloned.name = formatModelDisplayName(slug); + defaults.set(slug, cloned); + } + } + + for (const model of catalogModels) { + const slug = model.slug ? normalizeBaseId(model.slug) : undefined; + if (!slug) continue; + const existing = defaults.get(slug); + if (!existing) continue; + const updated = applyCatalogMetadata(existing, model); + if (!updated.name) { + updated.name = formatModelDisplayName(slug); + } + defaults.set(slug, updated); + } + + return Object.fromEntries(defaults); +} + +export function mergeModelDefaults( + userModels: unknown, + defaults: Record, +): Record { + const merged: Record = { ...defaults }; + if (!isObjectRecord(userModels)) return merged; + for (const [modelId, override] of Object.entries(userModels)) { + const base = isObjectRecord(merged[modelId]) ? merged[modelId] : {}; + if (!isObjectRecord(override)) { + merged[modelId] = override as ModelConfig; + continue; + } + const next: ModelConfig = { ...base, ...override }; + if (isObjectRecord(base.limit) || isObjectRecord(override.limit)) { + next.limit = { + ...(base.limit as Record | undefined), + ...(override.limit as Record | undefined), + } as ModelConfig["limit"]; + } + if (isObjectRecord(base.options) || isObjectRecord(override.options)) { + next.options = { + ...(base.options as Record | undefined), + ...(override.options as Record | undefined), + }; + } + if (isObjectRecord(base.variants) || isObjectRecord(override.variants)) { + next.variants = { + ...(base.variants as Record | undefined), + ...(override.variants as Record | undefined), + }; + } + merged[modelId] = next; + } + return merged; +} + +export const __internal = { + readStaticTemplateModels, + resolveStaticTemplateFiles, +}; diff --git a/lib/config.ts b/lib/config.ts index a4ba686..44af662 100644 --- a/lib/config.ts +++ b/lib/config.ts @@ -43,10 +43,9 @@ function migrateLegacyConfigIfNeeded(): void { /** * Default plugin configuration - * Bridge mode is legacy and no longer affects runtime prompt behavior. + * Plugin default configuration. */ const DEFAULT_CONFIG: PluginConfig = { - codexMode: false, accountSelectionStrategy: "sticky", pidOffsetEnabled: true, quietMode: false, @@ -54,6 +53,10 @@ const DEFAULT_CONFIG: PluginConfig = { retryAllAccountsRateLimited: false, retryAllAccountsMaxWaitMs: 30_000, retryAllAccountsMaxRetries: 1, + hardStopMaxWaitMs: 10_000, + hardStopOnUnknownModel: true, + hardStopOnAllAuthFailed: true, + hardStopMaxConsecutiveFailures: 5, tokenRefreshSkewMs: 60_000, proactiveTokenRefresh: false, authDebug: false, @@ -81,11 +84,14 @@ export function loadPluginConfig(): PluginConfig { const fileContent = readFileSync(CONFIG_PATH, "utf-8"); const userConfig = JSON.parse(fileContent) as Partial; + const { codexMode: _ignoredCodexMode, ...rest } = userConfig as Partial & { + codexMode?: unknown; + }; // Merge with defaults return { ...DEFAULT_CONFIG, - ...userConfig, + ...rest, }; } catch (error) { console.warn( @@ -143,13 +149,6 @@ function resolveNumberSetting( return candidate; } -/** - * Legacy no-op: bridge mode has been removed. - */ -export function getCodexMode(_pluginConfig: PluginConfig): boolean { - return false; -} - export function getPerProjectAccounts(pluginConfig: PluginConfig): boolean { return resolveBooleanSetting( "CODEX_AUTH_PER_PROJECT_ACCOUNTS", @@ -223,6 +222,41 @@ export function getRetryAllAccountsMaxRetries(pluginConfig: PluginConfig): numbe ); } +export function getHardStopMaxWaitMs(pluginConfig: PluginConfig): number { + return resolveNumberSetting( + "CODEX_AUTH_HARD_STOP_MAX_WAIT_MS", + pluginConfig.hardStopMaxWaitMs, + 10_000, + { min: 0 }, + ); +} + +export function getHardStopOnUnknownModel(pluginConfig: PluginConfig): boolean { + return resolveBooleanSetting( + "CODEX_AUTH_HARD_STOP_ON_UNKNOWN_MODEL", + pluginConfig.hardStopOnUnknownModel, + true, + ); +} + +export function getHardStopOnAllAuthFailed(pluginConfig: PluginConfig): boolean { + return resolveBooleanSetting( + "CODEX_AUTH_HARD_STOP_ON_ALL_AUTH_FAILED", + pluginConfig.hardStopOnAllAuthFailed, + true, + ); +} + +export function getHardStopMaxConsecutiveFailures(pluginConfig: PluginConfig): number { + return resolveNumberSetting( + "CODEX_AUTH_HARD_STOP_MAX_CONSECUTIVE_FAILURES", + pluginConfig.hardStopMaxConsecutiveFailures, + 5, + { min: 0 }, + ); +} + + export function getSchedulingMode(pluginConfig: PluginConfig): | "cache_first" | "balance" diff --git a/lib/constants.ts b/lib/constants.ts index de78e17..76f4b73 100644 --- a/lib/constants.ts +++ b/lib/constants.ts @@ -82,6 +82,7 @@ export const AUTH_LABELS = { /** Model family identifiers for account selection */ export const MODEL_FAMILIES = [ + "gpt-5.3-codex", "gpt-5.2-codex", "codex-max", "codex", diff --git a/lib/fetch-orchestrator.ts b/lib/fetch-orchestrator.ts index 6973eb0..e7ff750 100644 --- a/lib/fetch-orchestrator.ts +++ b/lib/fetch-orchestrator.ts @@ -26,6 +26,8 @@ import { type ProactiveRefreshQueue } from "./refresh-queue.js"; import { getAccountSelectionStrategy, getAuthDebugEnabled, + getHardStopMaxWaitMs, + getHardStopOnAllAuthFailed, getMaxCacheFirstWaitSeconds, getRateLimitDedupWindowMs, getRateLimitToastDebounceMs, @@ -49,6 +51,11 @@ import { handleErrorResponse, handleSuccessResponse, } from "./request/fetch-helpers.js"; +import { createSyntheticErrorResponse } from "./request/response-handler.js"; +import { + isModelCatalogError, + isModelCatalogUnavailableError, +} from "./request/errors.js"; import { normalizeModel } from "./request/request-transformer.js"; import { getModelFamily } from "./prompts/codex.js"; import { logDebug, logWarn } from "./logger.js"; @@ -63,6 +70,9 @@ const debugAuth = (...args: unknown[]): void => { console.debug(...args); }; +const SESSION_KEY_TTL_MS = 6 * 60 * 60 * 1000; +const MAX_SESSION_KEYS = 200; + function shouldRefreshToken(auth: OAuthAuthDetails, skewMs: number): boolean { return !auth.access || auth.expires <= Date.now() + Math.max(0, Math.floor(skewMs)); } @@ -105,11 +115,40 @@ export interface FetchOrchestratorConfig { export class FetchOrchestrator { private lastSessionKey: string | null = null; - private readonly seenSessionKeys = new Set(); + private readonly seenSessionKeys = new Map(); private lastAccountIndex: number | null = null; constructor(private config: FetchOrchestratorConfig) { } + private touchSessionKey(sessionKey: string): boolean { + const now = Date.now(); + this.pruneSessionKeys(now); + const hasSeen = this.seenSessionKeys.has(sessionKey); + if (hasSeen) { + this.seenSessionKeys.delete(sessionKey); + } + this.seenSessionKeys.set(sessionKey, now); + this.enforceSessionKeyLimit(); + return hasSeen; + } + + private pruneSessionKeys(now: number): void { + if (this.seenSessionKeys.size === 0) return; + for (const [key, lastSeen] of this.seenSessionKeys) { + if (now - lastSeen > SESSION_KEY_TTL_MS) { + this.seenSessionKeys.delete(key); + } + } + } + + private enforceSessionKeyLimit(): void { + while (this.seenSessionKeys.size > MAX_SESSION_KEYS) { + const oldest = this.seenSessionKeys.keys().next().value as string | undefined; + if (!oldest) break; + this.seenSessionKeys.delete(oldest); + } + } + async execute(input: Request | string | URL, init?: RequestInit): Promise { const { accountManager, @@ -147,6 +186,7 @@ export class FetchOrchestrator { let transformation: | Awaited> | undefined; + let transformationAccountId: string | null = null; let requestInit = init; let model: string | undefined = initialModel; const modelFamily: ModelFamily = model @@ -160,10 +200,9 @@ export class FetchOrchestrator { const sessionKey = resolveSessionKey(sessionBody); let sessionEvent: "new" | "switch" | null = null; if (sessionKey) { - const hasSeen = this.seenSessionKeys.has(sessionKey); + const hasSeen = this.touchSessionKey(sessionKey); if (!hasSeen) { sessionEvent = "new"; - this.seenSessionKeys.add(sessionKey); } else if (this.lastSessionKey && this.lastSessionKey !== sessionKey) { sessionEvent = "switch"; } @@ -260,14 +299,36 @@ export class FetchOrchestrator { } account.accountId = accountId; - if (!transformation) { + if (!transformation || transformationAccountId !== accountId) { + try { transformation = await transformRequestForCodex(init, url, userConfig, { accessToken: accountAuth.access, accountId, + pluginConfig, }); - requestInit = transformation?.updatedInit ?? init; - model = transformation?.body.model ?? model; + } catch (err) { + if (isModelCatalogError(err)) { + const attemptedModel = + typeof originalBody?.model === "string" && originalBody.model.trim() + ? originalBody.model + : model ?? "unknown"; + const detail = + isModelCatalogUnavailableError(err) + ? " Model catalog unavailable; run once with network access to seed /codex/models." + : ""; + return createSyntheticErrorResponse( + `Unsupported model "${attemptedModel}".${detail}`, + 400, + "unsupported_model", + "model", + ); + } + throw err; } + transformationAccountId = accountId; + requestInit = transformation?.updatedInit ?? init; + model = transformation?.body.model ?? model; + } const headers = createCodexHeaders(requestInit, accountId, accountAuth.access, { model, promptCacheKey: transformation?.body?.prompt_cache_key }); @@ -404,6 +465,22 @@ export class FetchOrchestrator { } const waitMs = await accountManager.getMinWaitTimeForFamilyWithHydration(modelFamily, model); + if (getHardStopOnAllAuthFailed(pluginConfig) && accountManager.allAccountsCoolingDown("auth-failure")) { + return createSyntheticErrorResponse( + "All accounts failed authentication. Run `opencode auth login` to reauthenticate.", + HTTP_STATUS.UNAUTHORIZED, + "all_accounts_auth_failed", + ); + } + + const hardStopMaxWaitMs = getHardStopMaxWaitMs(pluginConfig); + if (hardStopMaxWaitMs > 0 && waitMs > hardStopMaxWaitMs) { + return createSyntheticErrorResponse( + `All ${accountCount} account(s) rate-limited for ${formatWaitTime(waitMs)}. Try again later or raise hardStopMaxWaitMs.`, + HTTP_STATUS.TOO_MANY_REQUESTS, + "all_accounts_rate_limited", + ); + } if (getRetryAllAccountsRateLimited(pluginConfig) && accountManager.getAccountCount() > 0 && waitMs > 0 && (getRetryAllAccountsMaxWaitMs(pluginConfig) === 0 || waitMs <= getRetryAllAccountsMaxWaitMs(pluginConfig)) && allRateLimitedRetries < getRetryAllAccountsMaxRetries(pluginConfig)) { allRateLimitedRetries += 1; await sleep(waitMs); @@ -427,3 +504,8 @@ export class FetchOrchestrator { } } } + +export const __internal = { + SESSION_KEY_TTL_MS, + MAX_SESSION_KEYS, +}; diff --git a/lib/logger.ts b/lib/logger.ts index 62f091f..dd15296 100644 --- a/lib/logger.ts +++ b/lib/logger.ts @@ -8,6 +8,8 @@ import { getOpencodeLogDir, migrateLegacyLogDir } from "./paths.js"; export const LOGGING_ENABLED = process.env.ENABLE_PLUGIN_REQUEST_LOGGING === "1"; export const DEBUG_ENABLED = getAuthDebugEnabled() || LOGGING_ENABLED; const LOG_DIR = getOpencodeLogDir(); +const REDACTED_VALUE = "[redacted]"; +const REDACTED_KEYS = new Set(["prompt_cache_key"]); migrateLegacyLogDir(); @@ -21,6 +23,27 @@ if (DEBUG_ENABLED && !LOGGING_ENABLED) { let requestCounter = 0; +function isPlainObject(value: unknown): value is Record { + return Object.prototype.toString.call(value) === "[object Object]"; +} + +function sanitizeLogValue(value: unknown): unknown { + if (Array.isArray(value)) { + return value.map((entry) => sanitizeLogValue(entry)); + } + if (!isPlainObject(value)) return value; + + const sanitized: Record = {}; + for (const [key, entry] of Object.entries(value)) { + if (REDACTED_KEYS.has(key)) { + sanitized[key] = REDACTED_VALUE; + continue; + } + sanitized[key] = sanitizeLogValue(entry); + } + return sanitized; +} + /** * Log request data to file (only when LOGGING_ENABLED is true) * @param stage - The stage of the request (e.g., "before-transform", "after-transform") @@ -40,6 +63,7 @@ export function logRequest(stage: string, data: Record): void { const filename = join(LOG_DIR, `request-${requestId}-${stage}.json`); try { + const sanitized = sanitizeLogValue(data) as Record; writeFileSync( filename, JSON.stringify( @@ -47,7 +71,7 @@ export function logRequest(stage: string, data: Record): void { timestamp, requestId, stage, - ...data, + ...sanitized, }, null, 2, @@ -71,7 +95,8 @@ export function logRequest(stage: string, data: Record): void { * @param data - Optional data to log */ export function logDebug(message: string, data?: unknown): void { - if (!DEBUG_ENABLED) return; + const loggingEnabled = process.env.ENABLE_PLUGIN_REQUEST_LOGGING === "1"; + if (!DEBUG_ENABLED && !loggingEnabled) return; if (data !== undefined) { console.log(`[${PLUGIN_NAME}] ${message}`, data); diff --git a/lib/personalities.ts b/lib/personalities.ts new file mode 100644 index 0000000..861e94c --- /dev/null +++ b/lib/personalities.ts @@ -0,0 +1,98 @@ +import { existsSync, readFileSync, readdirSync, statSync } from "node:fs"; +import { join } from "node:path"; +import { getOpencodeConfigDir } from "./paths.js"; +import { logDebug } from "./logger.js"; + +type CachedPersonality = { + content: string; + mtimeMs: number; +}; + +const PERSONALITY_DIR_NAME = "Personalities"; +const PERSONALITY_CACHE = new Map(); +const PERSONALITY_CACHE_MARKER = ""; + +function resolveProjectPersonalityDir(projectRoot: string): string { + return join(projectRoot, ".opencode", PERSONALITY_DIR_NAME); +} + +function resolveGlobalPersonalityDir(): string { + return join(getOpencodeConfigDir(), PERSONALITY_DIR_NAME); +} + +function resolvePersonalityFile( + directory: string, + personality: string, +): string | null { + if (!existsSync(directory)) return null; + const normalized = personality.trim(); + if (!normalized) return null; + if (!isSafePersonalityKey(normalized)) return null; + const direct = join(directory, `${normalized}.md`); + if (existsSync(direct)) return direct; + const lowerTarget = `${normalized.toLowerCase()}.md`; + try { + const entries = readdirSync(directory); + for (const entry of entries) { + if (entry.toLowerCase() === lowerTarget) { + return join(directory, entry); + } + } + } catch { + return null; + } + return null; +} + +function isSafePersonalityKey(personality: string): boolean { + return !( + personality.includes("/") || + personality.includes("\\") || + personality.includes("..") + ); +} + +function readPersonalityFile(filePath: string): string | null { + try { + const stats = statSync(filePath); + const cached = PERSONALITY_CACHE.get(filePath); + if (cached && cached.mtimeMs === stats.mtimeMs) return cached.content; + const content = readFileSync(filePath, "utf8"); + const normalized = content.startsWith(PERSONALITY_CACHE_MARKER) + ? content.slice(PERSONALITY_CACHE_MARKER.length).trimStart() + : content; + PERSONALITY_CACHE.set(filePath, { content: normalized, mtimeMs: stats.mtimeMs }); + return normalized; + } catch (error) { + logDebug("Failed to read personality file", error); + return null; + } +} + +export function resolveCustomPersonalityDescription( + personality: string, + projectRoot: string = process.cwd(), +): string | null { + const localDir = resolveProjectPersonalityDir(projectRoot); + const globalDir = resolveGlobalPersonalityDir(); + + const localFile = resolvePersonalityFile(localDir, personality); + if (localFile) { + const content = readPersonalityFile(localFile); + if (content) return content; + } + + const globalFile = resolvePersonalityFile(globalDir, personality); + if (globalFile) { + const content = readPersonalityFile(globalFile); + if (content) return content; + } + + return null; +} + +export const __internal = { + resolveProjectPersonalityDir, + resolveGlobalPersonalityDir, + resolvePersonalityFile, +}; diff --git a/lib/prompts/codex-models.ts b/lib/prompts/codex-models.ts index ba77684..685a3e4 100644 --- a/lib/prompts/codex-models.ts +++ b/lib/prompts/codex-models.ts @@ -1,6 +1,16 @@ -import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs"; +import { + chmodSync, + existsSync, + mkdirSync, + readFileSync, + renameSync, + unlinkSync, + writeFileSync, +} from "node:fs"; +import { createHash, randomBytes } from "node:crypto"; import { dirname, join } from "node:path"; import { fileURLToPath } from "node:url"; +import lockfile from "proper-lockfile"; import type { ConfigOptions } from "../types.js"; import { CODEX_BASE_URL, @@ -8,17 +18,25 @@ import { OPENAI_HEADER_VALUES, URL_PATHS, } from "../constants.js"; -import { getOpencodeCacheDir } from "../paths.js"; +import { getOpencodeCacheDir, getOpencodeConfigDir } from "../paths.js"; import { logDebug, logWarn } from "../logger.js"; import { getLatestReleaseTag } from "./codex.js"; +import { + ModelCatalogUnavailableError, + UnknownModelError, +} from "../request/errors.js"; type PersonalityOption = "none" | "friendly" | "pragmatic"; +const PERSONALITY_DIR_NAME = "Personalities"; +const PERSONALITY_CACHE_MARKER = ""; + interface ModelInstructionsVariables { personality?: string | null; personality_default?: string | null; personality_friendly?: string | null; personality_pragmatic?: string | null; + personalities?: Record | null; } interface ModelMessages { @@ -29,6 +47,14 @@ interface ModelMessages { interface ModelInfo { slug: string; model_messages?: ModelMessages | null; + base_instructions?: string | null; + apply_patch_tool_type?: string | null; + supported_reasoning_levels?: Array<{ effort?: string }> | null; + default_reasoning_level?: string | null; + supports_reasoning_summaries?: boolean | null; + reasoning_summary_format?: string | null; + support_verbosity?: boolean | null; + default_verbosity?: string | null; } interface ModelsResponse { @@ -44,13 +70,17 @@ interface ModelsCache { export interface CodexModelRuntimeDefaults { onlineDefaultPersonality?: PersonalityOption; - personalityMessages?: { - default?: string; - friendly?: string; - pragmatic?: string; - }; + personalityMessages?: Record; instructionsTemplate?: string; + baseInstructions?: string; + applyPatchToolType?: string; staticDefaultPersonality: PersonalityOption; + defaultReasoningEffort?: string; + supportedReasoningEfforts?: string[]; + supportsReasoningSummaries?: boolean; + reasoningSummaryFormat?: string; + supportsVerbosity?: boolean; + defaultVerbosity?: string; } export interface ModelsFetchOptions { @@ -61,8 +91,13 @@ export interface ModelsFetchOptions { } const CACHE_DIR = getOpencodeCacheDir(); -const MODELS_CACHE_FILE = join(CACHE_DIR, "codex-models-cache.json"); +const MODELS_CACHE_FILE_BASE = join(CACHE_DIR, "codex-models-cache"); +const CLIENT_VERSION_CACHE_FILE = join(CACHE_DIR, "codex-client-version.json"); const MODELS_FETCH_TIMEOUT_MS = 5_000; +const MODELS_CACHE_TTL_MS = 15 * 60 * 1000; +const MODELS_SERVER_RETRY_BACKOFF_MS = 60 * 1000; +const MODELS_SESSION_MAX_AGE_MS = 60 * 60 * 1000; // 1 hour hard limit for session cache +const CLIENT_VERSION_TTL_MS = 60 * 60 * 1000; const STATIC_DEFAULT_PERSONALITY: PersonalityOption = "none"; const EFFORT_SUFFIX_REGEX = /-(none|minimal|low|medium|high|xhigh)$/i; const PERSONALITY_VALUES = new Set([ @@ -70,10 +105,48 @@ const PERSONALITY_VALUES = new Set([ "friendly", "pragmatic", ]); +const SUPPORTED_EFFORTS = new Set([ + "none", + "minimal", + "low", + "medium", + "high", + "xhigh", +]); const STATIC_TEMPLATE_FILES = ["opencode-modern.json", "opencode-legacy.json"]; const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); +const STATIC_TEMPLATE_DEFAULTS = new Map>(); +// In-memory cache is now scoped by accountId (null = unauthenticated/shared) +const inMemoryModelsCacheByAccount = new Map(); +const lastServerAttemptByAuth = new Map(); +let cachedClientVersion: string | null = null; +let cachedClientVersionAt: number | null = null; + +/** + * Get cache file path scoped to account identity. + * Pro/Enterprise users may have access to different models (e.g., gpt-5.2-pro). + */ +function getModelsCacheFile(accountId?: string): string { + if (!accountId) return `${MODELS_CACHE_FILE_BASE}.json`; + return `${MODELS_CACHE_FILE_BASE}-${hashAccountId(accountId)}.json`; +} + +function hashAccountId(accountId: string): string { + return createHash("sha256").update(accountId).digest("hex").slice(0, 16); +} + +const LOCK_OPTIONS = { + stale: 10_000, + retries: { + retries: 5, + minTimeout: 100, + maxTimeout: 1000, + factor: 2, + }, + realpath: false, +}; function normalizeModelSlug(model: string): string { return model.toLowerCase().trim(); @@ -90,25 +163,218 @@ function normalizePersonalityValue(value: unknown): PersonalityOption | undefine return normalized as PersonalityOption; } -function readModelsCache(): ModelsCache | null { +function extractSemver(tag: string): string | null { + const match = tag.match(/(\d+\.\d+\.\d+)/); + return match?.[1] ?? null; +} + +function isObjectRecord(value: unknown): value is Record { + return typeof value === "object" && value !== null && !Array.isArray(value); +} + +function extractPersonalityMessages( + instructionsVariables?: ModelInstructionsVariables | null, +): Record { + const messages: Record = {}; + const personalities = instructionsVariables?.personalities; + if (isObjectRecord(personalities)) { + for (const [key, value] of Object.entries(personalities)) { + if (typeof value !== "string") continue; + const normalizedKey = key.trim().toLowerCase(); + if (!normalizedKey) continue; + messages[normalizedKey] = value; + } + } + + if ( + typeof instructionsVariables?.personality_default === "string" && + messages.default === undefined + ) { + messages.default = instructionsVariables.personality_default; + } + if ( + typeof instructionsVariables?.personality_friendly === "string" && + messages.friendly === undefined + ) { + messages.friendly = instructionsVariables.personality_friendly; + } + if ( + typeof instructionsVariables?.personality_pragmatic === "string" && + messages.pragmatic === undefined + ) { + messages.pragmatic = instructionsVariables.personality_pragmatic; + } + + return messages; +} + +async function resolveCodexClientVersion(fetchImpl?: typeof fetch): Promise { + if (cachedClientVersion && cachedClientVersionAt) { + if (Date.now() - cachedClientVersionAt < CLIENT_VERSION_TTL_MS) { + return cachedClientVersion; + } + } + const cachedFile = readClientVersionCache(); + if (cachedFile?.version) { + cachedClientVersion = cachedFile.version; + cachedClientVersionAt = cachedFile.fetchedAt; + } + const impl = fetchImpl ?? fetch; try { - if (!existsSync(MODELS_CACHE_FILE)) return null; - const raw = readFileSync(MODELS_CACHE_FILE, "utf8"); + const tag = await getLatestReleaseTag(impl); + const semver = extractSemver(tag); + if (semver) { + cachedClientVersion = semver; + cachedClientVersionAt = Date.now(); + writeClientVersionCache(semver); + return semver; + } + logWarn(`Unrecognized Codex release tag for client_version: ${tag}`); + } catch (error) { + logDebug("Failed to resolve Codex client_version from GitHub", error); + } + return cachedClientVersion ?? "1.0.0"; +} + +function readModelsCache(accountId?: string): ModelsCache | null { + const cacheFile = getModelsCacheFile(accountId); + try { + if (!existsSync(cacheFile)) return null; + const raw = readFileSync(cacheFile, "utf8"); const parsed = JSON.parse(raw) as ModelsCache; - if (!Array.isArray(parsed.models)) return null; - if (!Number.isFinite(parsed.fetchedAt)) return null; + if (!Array.isArray(parsed.models)) { + try { + unlinkSync(cacheFile); + } catch { + // ignore + } + return null; + } + if (!Number.isFinite(parsed.fetchedAt)) { + try { + unlinkSync(cacheFile); + } catch { + // ignore + } + return null; + } return parsed; + } catch { + try { + unlinkSync(cacheFile); + } catch { + // ignore + } + return null; + } +} + +function readClientVersionCache(): { version: string; fetchedAt: number } | null { + try { + if (!existsSync(CLIENT_VERSION_CACHE_FILE)) return null; + const raw = readFileSync(CLIENT_VERSION_CACHE_FILE, "utf8"); + const parsed = JSON.parse(raw) as { version?: string; fetchedAt?: number }; + if (!parsed.version || typeof parsed.fetchedAt !== "number") return null; + return { version: parsed.version, fetchedAt: parsed.fetchedAt }; } catch { return null; } } -function writeModelsCache(cache: ModelsCache): void { +function writeClientVersionCache(version: string): void { + try { + mkdirSync(dirname(CLIENT_VERSION_CACHE_FILE), { recursive: true }); + const payload = JSON.stringify( + { version, fetchedAt: Date.now() }, + null, + 2, + ); + const tempName = `${CLIENT_VERSION_CACHE_FILE}.tmp.${randomBytes(8).toString("hex")}`; + writeFileSync(tempName, payload, "utf8"); + renameSync(tempName, CLIENT_VERSION_CACHE_FILE); + } catch { + // ignore + } +} + +function readInMemoryModelsCache(accountId?: string): ModelsCache | null { + return inMemoryModelsCacheByAccount.get(accountId ?? null) ?? null; +} + +function writeInMemoryModelsCache(cache: ModelsCache, accountId?: string): void { + inMemoryModelsCacheByAccount.set(accountId ?? null, cache); +} + +function readSessionModelsCache(accountId?: string): ModelsCache | null { + const cached = readInMemoryModelsCache(accountId); + if (cached) { + // Apply hard session limit (Issue 8) + if (Date.now() - cached.fetchedAt > MODELS_SESSION_MAX_AGE_MS) { + inMemoryModelsCacheByAccount.delete(accountId ?? null); + return readModelsCache(accountId); + } + return cached; + } + const disk = readModelsCache(accountId); + if (disk) writeInMemoryModelsCache(disk, accountId); + return disk; +} + +function extractVariantEfforts(models: ModelInfo[]): Map { + const efforts = new Map(); + for (const model of models) { + if (!model?.slug) continue; + const levels = model.supported_reasoning_levels ?? []; + const normalized = levels + .map((level) => level?.effort) + .filter((effort): effort is string => typeof effort === "string") + .map((effort) => effort.trim().toLowerCase()) + .filter((effort) => SUPPORTED_EFFORTS.has(effort)); + if (normalized.length === 0) continue; + const baseId = normalizeModelSlug(model.slug); + efforts.set(baseId, Array.from(new Set(normalized))); + } + return efforts; +} + +export function getCachedVariantEfforts(accountId?: string): Map { + const cached = readSessionModelsCache(accountId); + if (!cached?.models) return new Map(); + return extractVariantEfforts(cached.models); +} + +function isCacheFresh(cache: ModelsCache | null): boolean { + if (!cache) return false; + if (!Number.isFinite(cache.fetchedAt)) return false; + return Date.now() - cache.fetchedAt < MODELS_CACHE_TTL_MS; +} + +async function writeModelsCache(cache: ModelsCache, accountId?: string): Promise { + const cacheFile = getModelsCacheFile(accountId); try { if (!existsSync(CACHE_DIR)) { mkdirSync(CACHE_DIR, { recursive: true }); } - writeFileSync(MODELS_CACHE_FILE, JSON.stringify(cache, null, 2), "utf8"); + let release: (() => Promise) | null = null; + try { + release = await lockfile.lock(CACHE_DIR, LOCK_OPTIONS); + const tmpPath = `${cacheFile}.${randomBytes(6).toString("hex")}.tmp`; + try { + writeFileSync(tmpPath, JSON.stringify(cache, null, 2), "utf8"); + renameSync(tmpPath, cacheFile); + } catch (error) { + try { + unlinkSync(tmpPath); + } catch { + // ignore cleanup failures + } + throw error; + } + } finally { + if (release) { + await release().catch(() => undefined); + } + } } catch (error) { logWarn("Failed to write models cache", error); } @@ -145,19 +411,30 @@ function buildModelsHeaders( async function fetchModelsFromServer( options: ModelsFetchOptions, -): Promise<{ models: ModelInfo[]; etag: string | null } | null> { + cachedEtag?: string | null, +): Promise<{ models: ModelInfo[]; etag: string | null; notModified?: boolean } | null> { const fetchImpl = options.fetchImpl ?? fetch; const controller = new AbortController(); const timeout = setTimeout(() => controller.abort(), MODELS_FETCH_TIMEOUT_MS); try { const baseUrl = `${CODEX_BASE_URL}${URL_PATHS.CODEX_MODELS}`; - const url = `${baseUrl}${baseUrl.includes("?") ? "&" : "?"}client_version=opencode-openai-codex-multi-auth`; + const clientVersion = await resolveCodexClientVersion(fetchImpl); + const url = `${baseUrl}${baseUrl.includes("?") ? "&" : "?"}client_version=${encodeURIComponent(clientVersion)}`; + const headers = buildModelsHeaders(options.accessToken, options.accountId); + if (cachedEtag) headers["If-None-Match"] = cachedEtag; const response = await fetchImpl(url, { method: "GET", - headers: buildModelsHeaders(options.accessToken, options.accountId), + headers, signal: controller.signal, }); + if (response.status === 304) { + return { + models: [], + etag: response.headers.get("etag") ?? cachedEtag ?? null, + notModified: true, + }; + } if (!response.ok) { throw new Error(`HTTP ${response.status}`); } @@ -165,8 +442,8 @@ async function fetchModelsFromServer( if (parsed.length === 0) { throw new Error("Models payload missing models array"); } - const etag = response.headers.get("etag"); - return { models: parsed, etag }; + const responseEtag = response.headers.get("etag"); + return { models: parsed, etag: responseEtag }; } finally { clearTimeout(timeout); } @@ -225,6 +502,8 @@ function resolveStaticTemplateFiles(moduleDir: string = __dirname): string[] { } function readStaticTemplateDefaults(moduleDir: string = __dirname): Map { + const cached = STATIC_TEMPLATE_DEFAULTS.get(moduleDir); + if (cached) return cached; const defaults = new Map(); const templateFiles = resolveStaticTemplateFiles(moduleDir); @@ -246,33 +525,98 @@ function readStaticTemplateDefaults(moduleDir: string = __dirname): Map { - const cached = readModelsCache(); +): Promise<{ + serverModels?: ModelInfo[]; + cachedModels?: ModelInfo[]; + cachedSource?: ModelsCache["source"]; + serverIsCache?: boolean; +}> { + const accountId = options.accountId; + const cached = readSessionModelsCache(accountId); + const cacheIsFresh = isCacheFresh(cached); + if (cached && cacheIsFresh && !options.forceRefresh) { + return { + serverModels: cached.models, + cachedModels: cached.models, + cachedSource: cached.source, + serverIsCache: true, + }; + } + + // Build auth key for backoff tracking (per-account or shared "auth" bucket) + const authKey = options.accessToken + ? (options.accountId ?? "auth") + : null; + + // Apply backoff guard BEFORE any server attempt (even on cold start) + // This prevents hammering the server when it's down + if (!options.forceRefresh && authKey) { + const lastAttempt = lastServerAttemptByAuth.get(authKey); + if (lastAttempt && Date.now() - lastAttempt < MODELS_SERVER_RETRY_BACKOFF_MS) { + // Return cached if available, otherwise signal to use GitHub/static fallback + if (cached?.models) { + return { + serverModels: cached.models, + cachedModels: cached.models, + cachedSource: cached.source, + serverIsCache: true, + }; + } + // No cache - caller should use GitHub fallback without server retry + logDebug(`Server backoff active for ${authKey}; skipping /models fetch`); + return { cachedModels: undefined, cachedSource: cached?.source }; + } + } try { - const server = await fetchModelsFromServer(options); + // Record attempt timestamp BEFORE the call (gate future calls immediately) + if (authKey) { + lastServerAttemptByAuth.set(authKey, Date.now()); + } + const server = await fetchModelsFromServer(options, cached?.etag ?? null); + if (server?.notModified && cached?.models?.length) { + const updated = { + ...cached, + etag: server.etag ?? cached.etag ?? null, + fetchedAt: Date.now(), + }; + writeInMemoryModelsCache(updated, accountId); + await writeModelsCache(updated, accountId); + return { + serverModels: cached.models, + cachedModels: cached.models, + cachedSource: cached?.source, + serverIsCache: true, + }; + } if (server) { - writeModelsCache({ + const updated = { fetchedAt: Date.now(), - source: "server", + source: "server" as const, models: server.models, etag: server.etag, - }); + }; + writeInMemoryModelsCache(updated, accountId); + await writeModelsCache(updated, accountId); return { serverModels: server.models, cachedModels: cached?.models, + cachedSource: cached?.source, + serverIsCache: false, }; } } catch (error) { + // Backoff is already set before the call, so future calls will be gated logDebug("Server /models fetch failed; attempting fallbacks", error); } - return { cachedModels: cached?.models }; + return { cachedModels: cached?.models, cachedSource: cached?.source }; } function resolveModelInfo( @@ -284,32 +628,47 @@ function resolveModelInfo( return bySlug.get(target) ?? bySlug.get(stripEffortSuffix(target)); } +function resolveServerCatalog( + serverModels?: ModelInfo[], + cachedModels?: ModelInfo[], + cachedSource?: ModelsCache["source"], + serverIsCache?: boolean, +): ModelInfo[] | null { + if (serverModels?.length) { + if (serverIsCache && cachedSource === "github") return null; + return serverModels; + } + if (cachedModels?.length && cachedSource !== "github") return cachedModels; + return null; +} + export async function getCodexModelRuntimeDefaults( normalizedModel: string, options: ModelsFetchOptions = {}, ): Promise { - const { serverModels, cachedModels } = await loadServerAndCacheCatalog(options); - let model = resolveModelInfo(serverModels ?? [], normalizedModel); - - if (!model && cachedModels) { - model = resolveModelInfo(cachedModels, normalizedModel); + const accountId = options.accountId; + const { serverModels, cachedModels, cachedSource, serverIsCache } = + await loadServerAndCacheCatalog(options); + const serverCatalog = resolveServerCatalog( + serverModels, + cachedModels, + cachedSource, + serverIsCache, + ); + if (!serverCatalog || serverCatalog.length === 0) { + throw new ModelCatalogUnavailableError(); } - + const availableModels = Array.from( + new Set(serverCatalog.map((modelInfo) => modelInfo.slug)), + ).sort(); + if (!availableModels.includes(normalizedModel)) { + throw new UnknownModelError(normalizedModel, availableModels); + } + const modelSource: "server" | "cache" = + serverIsCache && cachedSource !== "github" ? "cache" : "server"; + const model = resolveModelInfo(serverCatalog, normalizedModel); if (!model) { - try { - const githubModels = await fetchModelsFromGitHub(options); - if (githubModels) { - writeModelsCache({ - fetchedAt: Date.now(), - source: "github", - models: githubModels, - etag: null, - }); - model = resolveModelInfo(githubModels, normalizedModel); - } - } catch (error) { - logDebug("GitHub models fallback failed; using static template defaults", error); - } + throw new UnknownModelError(normalizedModel, availableModels); } const staticDefaults = readStaticTemplateDefaults(); @@ -322,30 +681,117 @@ export async function getCodexModelRuntimeDefaults( const explicitOnlineDefault = normalizePersonalityValue( instructionsVariables?.personality, ); + const personalityMessages = extractPersonalityMessages(instructionsVariables); + seedPersonalityCache(personalityMessages, modelSource); + const supportedReasoningEfforts = (model?.supported_reasoning_levels ?? []) + .map((level) => level?.effort) + .filter((effort): effort is string => typeof effort === "string") + .map((effort) => effort.trim().toLowerCase()) + .filter((effort) => SUPPORTED_EFFORTS.has(effort)); + const defaultReasoningEffort = + typeof model?.default_reasoning_level === "string" + ? model.default_reasoning_level.trim().toLowerCase() + : undefined; + const defaultVerbosity = + typeof model?.default_verbosity === "string" + ? model.default_verbosity.trim().toLowerCase() + : undefined; return { onlineDefaultPersonality: explicitOnlineDefault, instructionsTemplate: instructionsTemplate ?? undefined, - personalityMessages: { - default: - typeof instructionsVariables?.personality_default === "string" - ? instructionsVariables.personality_default - : undefined, - friendly: - typeof instructionsVariables?.personality_friendly === "string" - ? instructionsVariables.personality_friendly - : undefined, - pragmatic: - typeof instructionsVariables?.personality_pragmatic === "string" - ? instructionsVariables.personality_pragmatic - : undefined, - }, + baseInstructions: + typeof model?.base_instructions === "string" ? model.base_instructions : undefined, + applyPatchToolType: + typeof model?.apply_patch_tool_type === "string" ? model.apply_patch_tool_type : undefined, + personalityMessages, staticDefaultPersonality, + defaultReasoningEffort, + supportedReasoningEfforts: + supportedReasoningEfforts.length > 0 ? supportedReasoningEfforts : undefined, + supportsReasoningSummaries: + typeof model?.supports_reasoning_summaries === "boolean" + ? model.supports_reasoning_summaries + : undefined, + reasoningSummaryFormat: + typeof model?.reasoning_summary_format === "string" + ? model.reasoning_summary_format + : undefined, + supportsVerbosity: + typeof model?.support_verbosity === "boolean" + ? model.support_verbosity + : undefined, + defaultVerbosity, }; } +function seedPersonalityCache( + personalityMessages: Record | undefined, + modelSource: "server" | "cache" | "github" | "static", +): void { + if (!personalityMessages) return; + if (modelSource !== "server" && modelSource !== "cache") return; + + const personalityDir = join(getOpencodeConfigDir(), PERSONALITY_DIR_NAME); + const entries = [ + { key: "friendly", fileName: "Friendly.md" }, + { key: "pragmatic", fileName: "Pragmatic.md" }, + ]; + + for (const entry of entries) { + const message = personalityMessages[entry.key]; + if (typeof message !== "string") continue; + const content = message.trim(); + if (!content) continue; + const filePath = join(personalityDir, entry.fileName); + try { + mkdirSync(personalityDir, { recursive: true }); + if (existsSync(filePath)) { + const existing = readFileSync(filePath, "utf8"); + if (!existing.startsWith(PERSONALITY_CACHE_MARKER)) { + continue; + } + const trimmed = existing + .slice(PERSONALITY_CACHE_MARKER.length) + .trimStart(); + if (trimmed === content) continue; + } + const payload = `${PERSONALITY_CACHE_MARKER}\n${content}`; + writeFileSync(filePath, payload, "utf8"); + try { + chmodSync(filePath, 0o600); + } catch { + // Best-effort permissions. + } + } catch (error) { + logWarn("Failed to update personality cache file"); + logDebug("Personality cache update failed", { filePath, error }); + } + } +} + +export async function warmCodexModelCatalog( + options: ModelsFetchOptions = {}, +): Promise { + const accountId = options.accountId; + try { + if (readInMemoryModelsCache(accountId)) return; + const cached = readModelsCache(accountId); + if (!cached) return; + writeInMemoryModelsCache(cached, accountId); + if (isCacheFresh(cached)) return; + if (!options.fetchImpl && !options.accessToken && !options.accountId) return; + await loadServerAndCacheCatalog(options); + } catch { + // Warm failures should not block startup. + } +} + export const __internal = { - MODELS_CACHE_FILE, + MODELS_CACHE_FILE_BASE, + getModelsCacheFile, + hashAccountId, + CLIENT_VERSION_CACHE_FILE, readStaticTemplateDefaults, resolveStaticTemplateFiles, readModelsCache, diff --git a/lib/prompts/codex.ts b/lib/prompts/codex.ts index 228dac7..5124a2e 100644 --- a/lib/prompts/codex.ts +++ b/lib/prompts/codex.ts @@ -1,9 +1,19 @@ -import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs"; +import { randomBytes } from "node:crypto"; +import { + existsSync, + mkdirSync, + readFileSync, + renameSync, + unlinkSync, + writeFileSync, +} from "node:fs"; import { dirname, join } from "node:path"; import { fileURLToPath } from "node:url"; +import lockfile from "proper-lockfile"; import type { CacheMetadata, GitHubRelease } from "../types.js"; import { getOpencodeCacheDir, migrateLegacyCacheFiles } from "../paths.js"; import { MODEL_FAMILIES, type ModelFamily } from "../constants.js"; +import { logDebug, logWarn } from "../logger.js"; export { MODEL_FAMILIES, type ModelFamily }; @@ -11,16 +21,20 @@ const GITHUB_API_RELEASES = "https://api.github.com/repos/openai/codex/releases/latest"; const GITHUB_HTML_RELEASES = "https://github.com/openai/codex/releases/latest"; +const GITHUB_CORE_PATH = "codex-rs/core"; const CACHE_DIR = getOpencodeCacheDir(); +const CACHE_TTL_MS = 15 * 60 * 1000; // 15 minutes +const PROMPT_FILE_CACHE_TTL_MS = 60 * 60 * 1000; // 1 hour const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); /** - * Prompt file mapping for each model family - * Based on codex-rs/core/src/model_family.rs logic + * Static fallback prompt file mapping for each model family. + * Used when dynamic discovery fails or for immediate startup. */ -const PROMPT_FILES: Record = { +const FALLBACK_PROMPT_FILES: Record = { + "gpt-5.3-codex": "gpt-5.2-codex_prompt.md", "gpt-5.2-codex": "gpt-5.2-codex_prompt.md", "codex-max": "gpt-5.1-codex-max_prompt.md", codex: "gpt_5_codex_prompt.md", @@ -28,10 +42,143 @@ const PROMPT_FILES: Record = { "gpt-5.1": "gpt_5_1_prompt.md", }; +// Dynamic prompt file mapping cache (discovered from GitHub) +let discoveredPromptFiles: Map | null = null; +let promptFilesDiscoveredAt: number | null = null; + +/** + * Prompt file name patterns to search for in codex-rs/core. + * Patterns: *_prompt.md, prompt*.md, prompt.md + */ +const PROMPT_FILE_PATTERNS = [ + /_prompt\.md$/i, // e.g., gpt_5_3_codex_prompt.md + /^prompt.*\.md$/i, // e.g., prompt_gpt5.md + /^prompt\.md$/i, // fallback prompt.md +]; + +/** + * Normalize a model family to a prompt file search pattern. + * Converts "gpt-5.3-codex" β†’ ["gpt_5_3_codex", "gpt-5.3-codex", "gpt_5.3_codex"] + */ +function modelFamilyToPromptPatterns(family: ModelFamily): string[] { + const patterns: string[] = []; + const normalized = family.toLowerCase(); + + // Add underscored version (gpt-5.3-codex β†’ gpt_5_3_codex) + patterns.push(normalized.replace(/[-.]/g, "_")); + // Add hyphenated version + patterns.push(normalized.replace(/\./g, "_")); + // Add mixed version + patterns.push(normalized); + + return patterns; +} + +/** + * Match a prompt filename to a model family. + * Returns the model family if matched, otherwise null. + */ +function matchPromptFileToFamily(filename: string): ModelFamily | null { + const lower = filename.toLowerCase(); + if (!lower.endsWith("_prompt.md") && !lower.startsWith("prompt")) { + return null; + } + + // Extract the model identifier from the filename + const base = lower.replace(/_prompt\.md$/, "").replace(/^prompt_?/, "").replace(/\.md$/, ""); + if (!base) return null; + + // Try to match against known model families + for (const family of MODEL_FAMILIES) { + const patterns = modelFamilyToPromptPatterns(family); + for (const pattern of patterns) { + if (base === pattern || base.includes(pattern) || pattern.includes(base)) { + return family; + } + } + } + + return null; +} + +/** + * Discover prompt files from GitHub repository. + * Fetches the file listing from codex-rs/core and maps them to model families. + */ +async function discoverPromptFilesFromGitHub( + tag: string, + fetchImpl: typeof fetch = fetch, +): Promise> { + const discovered = new Map(); + + try { + // Use GitHub API to list files in the directory + const apiUrl = `https://api.github.com/repos/openai/codex/contents/${GITHUB_CORE_PATH}?ref=${tag}`; + const response = await fetchImpl(apiUrl, { + headers: { + Accept: "application/vnd.github.v3+json", + "User-Agent": "opencode-openai-codex-plugin", + }, + }); + + if (!response.ok) { + logDebug(`Failed to list GitHub directory: HTTP ${response.status}`); + return discovered; + } + + const files = (await response.json()) as Array<{ name: string; type: string }>; + const promptFiles = files + .filter((f) => f.type === "file") + .filter((f) => PROMPT_FILE_PATTERNS.some((p) => p.test(f.name))) + .map((f) => f.name); + + logDebug(`Discovered prompt files from GitHub: ${promptFiles.join(", ")}`); + + // Map discovered files to model families + for (const file of promptFiles) { + const family = matchPromptFileToFamily(file); + if (family && !discovered.has(family)) { + discovered.set(family, file); + } + } + + // Also store raw filenames for direct lookup + for (const file of promptFiles) { + if (!discovered.has(file)) { + discovered.set(file, file); + } + } + } catch (error) { + logDebug("Failed to discover prompt files from GitHub", error); + } + + return discovered; +} + +/** + * Get the prompt file for a model family. + * Uses dynamic discovery with fallback to static mapping. + */ +function getPromptFileForFamily(modelFamily: ModelFamily): string { + // Check dynamic cache first (if fresh) + if ( + discoveredPromptFiles && + promptFilesDiscoveredAt && + Date.now() - promptFilesDiscoveredAt < PROMPT_FILE_CACHE_TTL_MS + ) { + const discovered = discoveredPromptFiles.get(modelFamily); + if (discovered) return discovered; + } + + // Fall back to static mapping + return FALLBACK_PROMPT_FILES[modelFamily]; +} + /** * Cache file mapping for each model family */ const CACHE_FILES: Record = { + "gpt-5.3-codex": "gpt-5.3-codex-instructions.md", "gpt-5.2-codex": "gpt-5.2-codex-instructions.md", "codex-max": "codex-max-instructions.md", codex: "codex-instructions.md", @@ -44,6 +191,18 @@ const CACHE_META_FILES = Object.values(CACHE_FILES).map((file) => ); const LEGACY_CACHE_FILES = [...Object.values(CACHE_FILES), ...CACHE_META_FILES]; let cacheMigrated = false; +const IN_MEMORY_INSTRUCTIONS = new Map(); + +const LOCK_OPTIONS = { + stale: 10_000, + retries: { + retries: 5, + minTimeout: 100, + maxTimeout: 1000, + factor: 2, + }, + realpath: false, +}; function ensureCacheMigrated(): void { if (cacheMigrated) return; @@ -51,6 +210,63 @@ function ensureCacheMigrated(): void { cacheMigrated = true; } +function readInMemoryInstructions(modelFamily: ModelFamily): string | null { + const entry = IN_MEMORY_INSTRUCTIONS.get(modelFamily); + return entry?.value ?? null; +} + +function writeInMemoryInstructions(modelFamily: ModelFamily, value: string): void { + IN_MEMORY_INSTRUCTIONS.set(modelFamily, { value }); +} + +function readCacheMetadata(cacheMetaFile: string): CacheMetadata | null { + try { + if (!existsSync(cacheMetaFile)) return null; + const parsed = JSON.parse(readFileSync(cacheMetaFile, "utf8")) as CacheMetadata; + if (!parsed || typeof parsed.tag !== "string") return null; + if (!Number.isFinite(parsed.lastChecked)) return null; + if (typeof parsed.url !== "string") return null; + return parsed; + } catch { + return null; + } +} + +async function writeCacheAtomically( + cacheFile: string, + cacheMetaFile: string, + instructions: string, + metadata: CacheMetadata, +): Promise { + if (!existsSync(CACHE_DIR)) { + mkdirSync(CACHE_DIR, { recursive: true }); + } + let release: (() => Promise) | null = null; + try { + release = await lockfile.lock(CACHE_DIR, LOCK_OPTIONS); + const tmpCachePath = `${cacheFile}.${randomBytes(6).toString("hex")}.tmp`; + const tmpMetaPath = `${cacheMetaFile}.${randomBytes(6).toString("hex")}.tmp`; + try { + writeFileSync(tmpCachePath, instructions, "utf8"); + writeFileSync(tmpMetaPath, JSON.stringify(metadata), "utf8"); + renameSync(tmpCachePath, cacheFile); + renameSync(tmpMetaPath, cacheMetaFile); + } catch (error) { + try { + unlinkSync(tmpCachePath); + } catch { } + try { + unlinkSync(tmpMetaPath); + } catch { } + throw error; + } + } finally { + if (release) { + await release().catch(() => undefined); + } + } +} + /** * Determine the model family based on the normalized model name * @param normalizedModel - The normalized model name (e.g., "gpt-5.2-codex", "gpt-5.1-codex-max", "gpt-5.1-codex", "gpt-5.1") @@ -58,6 +274,12 @@ function ensureCacheMigrated(): void { */ export function getModelFamily(normalizedModel: string): ModelFamily { // Order matters - check more specific patterns first + if ( + normalizedModel.includes("gpt-5.3-codex") || + normalizedModel.includes("gpt 5.3 codex") + ) { + return "gpt-5.3-codex"; + } if ( normalizedModel.includes("gpt-5.2-codex") || normalizedModel.includes("gpt 5.2 codex") @@ -137,7 +359,9 @@ export async function getCodexInstructions( ): Promise { ensureCacheMigrated(); const modelFamily = getModelFamily(normalizedModel); - const promptFile = PROMPT_FILES[modelFamily]; + const inMemory = readInMemoryInstructions(modelFamily); + if (inMemory) return inMemory; + const promptFile = getPromptFileForFamily(modelFamily); const cacheFile = join(CACHE_DIR, CACHE_FILES[modelFamily]); const cacheMetaFile = join( CACHE_DIR, @@ -146,32 +370,39 @@ export async function getCodexInstructions( try { // Load cached metadata (includes ETag, tag, and lastChecked timestamp) - let cachedETag: string | null = null; - let cachedTag: string | null = null; - let cachedTimestamp: number | null = null; - - if (existsSync(cacheMetaFile)) { - const metadata = JSON.parse( - readFileSync(cacheMetaFile, "utf8"), - ) as CacheMetadata; - cachedETag = metadata.etag; - cachedTag = metadata.tag; - cachedTimestamp = metadata.lastChecked; - } + const metadata = readCacheMetadata(cacheMetaFile); + let cachedETag: string | null = metadata?.etag ?? null; + let cachedTag: string | null = metadata?.tag ?? null; + let cachedTimestamp: number | null = metadata?.lastChecked ?? null; // Rate limit protection: If cache is less than 15 minutes old, use it - const CACHE_TTL_MS = 15 * 60 * 1000; // 15 minutes if ( cachedTimestamp && Date.now() - cachedTimestamp < CACHE_TTL_MS && existsSync(cacheFile) ) { - return readFileSync(cacheFile, "utf8"); + const instructions = readFileSync(cacheFile, "utf8"); + writeInMemoryInstructions(modelFamily, instructions); + return instructions; } // Get the latest release tag (only if cache is stale or missing) const latestTag = await getLatestReleaseTag(fetch); - const CODEX_INSTRUCTIONS_URL = `https://raw.githubusercontent.com/openai/codex/${latestTag}/codex-rs/core/${promptFile}`; + + // Try to discover prompt files dynamically (updates cache for future calls) + if (!discoveredPromptFiles || !promptFilesDiscoveredAt || + Date.now() - promptFilesDiscoveredAt > PROMPT_FILE_CACHE_TTL_MS) { + try { + discoveredPromptFiles = await discoverPromptFilesFromGitHub(latestTag, fetch); + promptFilesDiscoveredAt = Date.now(); + } catch { + // Discovery failure is non-fatal; continue with fallback + } + } + + // Re-resolve prompt file after discovery (may have found a new one) + const resolvedPromptFile = getPromptFileForFamily(modelFamily); + const CODEX_INSTRUCTIONS_URL = `https://raw.githubusercontent.com/openai/codex/${latestTag}/${GITHUB_CORE_PATH}/${resolvedPromptFile}`; // If tag changed, we need to fetch new instructions if (cachedTag !== latestTag) { @@ -184,62 +415,93 @@ export async function getCodexInstructions( headers["If-None-Match"] = cachedETag; } - const response = await fetch(CODEX_INSTRUCTIONS_URL, { headers }); + let response = await fetch(CODEX_INSTRUCTIONS_URL, { headers }); // 304 Not Modified - our cached version is still current if (response.status === 304) { if (existsSync(cacheFile)) { - return readFileSync(cacheFile, "utf8"); + const instructions = readFileSync(cacheFile, "utf8"); + writeInMemoryInstructions(modelFamily, instructions); + return instructions; } - // Cache file missing but GitHub says not modified - fall through to re-fetch + response = await fetch(CODEX_INSTRUCTIONS_URL); + } + + // 404 Not Found - try fallback to generic prompt.md + if (response.status === 404 && resolvedPromptFile !== "prompt.md") { + logDebug(`Prompt file ${resolvedPromptFile} not found; trying prompt.md fallback`); + const fallbackUrl = `https://raw.githubusercontent.com/openai/codex/${latestTag}/${GITHUB_CORE_PATH}/prompt.md`; + response = await fetch(fallbackUrl); } // 200 OK - new content or first fetch if (response.ok) { const instructions = await response.text(); const newETag = response.headers.get("etag"); - - // Create cache directory if it doesn't exist - if (!existsSync(CACHE_DIR)) { - mkdirSync(CACHE_DIR, { recursive: true }); - } - - // Cache the instructions with ETag and tag (verbatim from GitHub) - writeFileSync(cacheFile, instructions, "utf8"); - writeFileSync( - cacheMetaFile, - JSON.stringify({ - etag: newETag, - tag: latestTag, - lastChecked: Date.now(), - url: CODEX_INSTRUCTIONS_URL, - } satisfies CacheMetadata), - "utf8", - ); - + await writeCacheAtomically(cacheFile, cacheMetaFile, instructions, { + etag: newETag, + tag: latestTag, + lastChecked: Date.now(), + url: CODEX_INSTRUCTIONS_URL, + }); + writeInMemoryInstructions(modelFamily, instructions); return instructions; } throw new Error(`HTTP ${response.status}`); } catch (error) { const err = error as Error; - console.error( + logWarn( `[openai-codex-plugin] Failed to fetch ${modelFamily} instructions from GitHub:`, err.message, ); // Try to use cached version even if stale if (existsSync(cacheFile)) { - console.error( + logWarn( `[openai-codex-plugin] Using cached ${modelFamily} instructions`, ); - return readFileSync(cacheFile, "utf8"); + const instructions = readFileSync(cacheFile, "utf8"); + writeInMemoryInstructions(modelFamily, instructions); + return instructions; } // Fall back to bundled version (use codex-instructions.md as default) - console.error( + logWarn( `[openai-codex-plugin] Falling back to bundled instructions for ${modelFamily}`, ); - return readFileSync(join(__dirname, "codex-instructions.md"), "utf8"); + const bundled = readFileSync(join(__dirname, "codex-instructions.md"), "utf8"); + writeInMemoryInstructions(modelFamily, bundled); + return bundled; } } + +export async function warmCodexInstructions(): Promise { + ensureCacheMigrated(); + const tasks = MODEL_FAMILIES.map(async (modelFamily) => { + try { + if (readInMemoryInstructions(modelFamily)) return; + const cacheFile = join(CACHE_DIR, CACHE_FILES[modelFamily]); + const cacheMetaFile = join( + CACHE_DIR, + `${CACHE_FILES[modelFamily].replace(".md", "-meta.json")}`, + ); + const metadata = readCacheMetadata(cacheMetaFile); + const hasCacheFile = existsSync(cacheFile); + const isFresh = + metadata?.lastChecked && + Date.now() - metadata.lastChecked < CACHE_TTL_MS; + + if (isFresh && hasCacheFile) { + const instructions = readFileSync(cacheFile, "utf8"); + writeInMemoryInstructions(modelFamily, instructions); + return; + } + if (!metadata && !hasCacheFile) return; + await getCodexInstructions(modelFamily); + } catch { + // Warm failures should not block startup. + } + }); + await Promise.allSettled(tasks); +} diff --git a/lib/request/errors.ts b/lib/request/errors.ts new file mode 100644 index 0000000..9d2ee11 --- /dev/null +++ b/lib/request/errors.ts @@ -0,0 +1,46 @@ +export class UnknownModelError extends Error { + readonly availableModels?: string[]; + + constructor(modelId: string, availableModels?: string[]) { + const suffix = + availableModels && availableModels.length > 0 + ? ` Available models: ${availableModels.join(", ")}.` + : ""; + super( + `Unknown model "${modelId}". Update your config to a supported model ID.${suffix}`, + ); + this.name = "UnknownModelError"; + this.availableModels = availableModels; + } +} + +export class ModelCatalogUnavailableError extends Error { + constructor() { + super( + "Model catalog unavailable. Run once with network access to seed the /codex/models cache.", + ); + this.name = "ModelCatalogUnavailableError"; + } +} + +function isErrorLike(err: unknown): err is { name?: unknown } { + return typeof err === "object" && err !== null; +} + +export function isUnknownModelError(err: unknown): err is UnknownModelError { + if (err instanceof UnknownModelError) return true; + return isErrorLike(err) && err.name === "UnknownModelError"; +} + +export function isModelCatalogUnavailableError( + err: unknown, +): err is ModelCatalogUnavailableError { + if (err instanceof ModelCatalogUnavailableError) return true; + return isErrorLike(err) && err.name === "ModelCatalogUnavailableError"; +} + +export function isModelCatalogError( + err: unknown, +): err is UnknownModelError | ModelCatalogUnavailableError { + return isUnknownModelError(err) || isModelCatalogUnavailableError(err); +} diff --git a/lib/request/fetch-helpers.ts b/lib/request/fetch-helpers.ts index 934f38f..680e83a 100644 --- a/lib/request/fetch-helpers.ts +++ b/lib/request/fetch-helpers.ts @@ -10,8 +10,9 @@ import { logRequest, logWarn } from "../logger.js"; import { getCodexInstructions, getModelFamily } from "../prompts/codex.js"; import { getCodexModelRuntimeDefaults } from "../prompts/codex-models.js"; import { transformRequestBody, normalizeModel } from "./request-transformer.js"; +import { isModelCatalogError } from "./errors.js"; import { convertSseToJson, ensureContentType } from "./response-handler.js"; -import type { UserConfig, RequestBody } from "../types.js"; +import type { UserConfig, RequestBody, PluginConfig } from "../types.js"; import { PLUGIN_NAME, HTTP_STATUS, @@ -103,7 +104,11 @@ export async function transformRequestForCodex( init: RequestInit | undefined, url: string, userConfig: UserConfig, - runtimeContext?: { accessToken?: string; accountId?: string }, + runtimeContext?: { + accessToken?: string; + accountId?: string; + pluginConfig?: PluginConfig; + }, ): Promise<{ body: RequestBody; updatedInit: RequestInit } | undefined> { if (!init?.body) return undefined; @@ -140,6 +145,7 @@ export async function transformRequestForCodex( codexInstructions, userConfig, runtimeDefaults, + runtimeContext?.pluginConfig, ); // Log transformed request @@ -162,6 +168,9 @@ export async function transformRequestForCodex( updatedInit: { ...init, body: JSON.stringify(transformedBody) }, }; } catch (e) { + if (isModelCatalogError(e)) { + throw e; + } logWarn(ERROR_MESSAGES.REQUEST_PARSE_ERROR, e); return undefined; } diff --git a/lib/request/helpers/model-map.ts b/lib/request/helpers/model-map.ts index 1eab203..06ce570 100644 --- a/lib/request/helpers/model-map.ts +++ b/lib/request/helpers/model-map.ts @@ -75,37 +75,47 @@ export const MODEL_MAP: Record = { "gpt-5.1-chat-latest": "gpt-5.1", // ============================================================================ - // GPT-5 Codex Models (LEGACY - maps to gpt-5.1-codex as gpt-5 is being phased out) + // GPT-5 lightweight aliases // ============================================================================ - "gpt-5-codex": "gpt-5.1-codex", + "gpt-5-mini": "gpt-5-mini", + "gpt-5-nano": "gpt-5-nano", // ============================================================================ - // GPT-5 Codex Mini Models (LEGACY - maps to gpt-5.1-codex-mini) - // ============================================================================ - "codex-mini-latest": "gpt-5.1-codex-mini", - "gpt-5-codex-mini": "gpt-5.1-codex-mini", - "gpt-5-codex-mini-medium": "gpt-5.1-codex-mini", - "gpt-5-codex-mini-high": "gpt-5.1-codex-mini", - - // ============================================================================ - // GPT-5 General Purpose Models (LEGACY - maps to gpt-5.1 as gpt-5 is being phased out) + // Legacy GPT-5 aliases (map to modern equivalents) // ============================================================================ "gpt-5": "gpt-5.1", - "gpt-5-mini": "gpt-5.1", - "gpt-5-nano": "gpt-5.1", + "gpt-5-none": "gpt-5.1", + "gpt-5-minimal": "gpt-5.1", + "gpt-5-low": "gpt-5.1", + "gpt-5-medium": "gpt-5.1", + "gpt-5-high": "gpt-5.1", + "gpt-5-xhigh": "gpt-5.1", + + "gpt-5-codex": "gpt-5.1-codex", + "gpt-5-codex-none": "gpt-5.1-codex", + "gpt-5-codex-minimal": "gpt-5.1-codex", + "gpt-5-codex-low": "gpt-5.1-codex", + "gpt-5-codex-medium": "gpt-5.1-codex", + "gpt-5-codex-high": "gpt-5.1-codex", + "gpt-5-codex-xhigh": "gpt-5.1-codex", + + "codex-mini-latest": "gpt-5.1-codex-mini", + "codex-mini-latest-none": "gpt-5.1-codex-mini", + "codex-mini-latest-minimal": "gpt-5.1-codex-mini", + "codex-mini-latest-low": "gpt-5.1-codex-mini", + "codex-mini-latest-medium": "gpt-5.1-codex-mini", + "codex-mini-latest-high": "gpt-5.1-codex-mini", + "codex-mini-latest-xhigh": "gpt-5.1-codex-mini", }; const EFFORT_SUFFIX_REGEX = /-(none|minimal|low|medium|high|xhigh)$/i; const GPT_CODEX_DYNAMIC_REGEX = - /^(gpt-\d+(?:\.\d+)*-codex(?:-(?:max|mini))?)(?:-(?:none|minimal|low|medium|high|xhigh))?$/i; + /^(gpt-5\.\d+(?:\.\d+)*-codex(?:-(?:max|mini))?)(?:-(?:none|minimal|low|medium|high|xhigh))?$/i; +const GPT_GENERAL_PRO_DYNAMIC_REGEX = + /^(gpt-5\.\d+(?:\.\d+)*-pro)(?:-(?:none|low|medium|high|xhigh))?$/i; const GPT_GENERAL_DYNAMIC_REGEX = - /^(gpt-\d+(?:\.\d+)*)(?:-(?:none|minimal|low|medium|high|xhigh))$/i; -const LEGACY_DYNAMIC_ALIASES: Record = { - "gpt-5": "gpt-5.1", - "gpt-5-codex": "gpt-5.1-codex", - "gpt-5-codex-max": "gpt-5.1-codex-max", - "gpt-5-codex-mini": "gpt-5.1-codex-mini", -}; + /^(gpt-5\.\d+(?:\.\d+)*)(?:-(?:none|low|medium|high|xhigh))?$/i; +const LEGACY_DYNAMIC_ALIASES: Record = {}; function applyDynamicAlias(baseModel: string): string { return LEGACY_DYNAMIC_ALIASES[baseModel] ?? baseModel; @@ -119,13 +129,20 @@ function getDynamicNormalizedModel(modelId: string): string | undefined { return applyDynamicAlias(codexMatch[1]); } + const proMatch = normalized.match(GPT_GENERAL_PRO_DYNAMIC_REGEX); + if (proMatch?.[1]) { + return applyDynamicAlias(proMatch[1]); + } + const generalMatch = normalized.match(GPT_GENERAL_DYNAMIC_REGEX); if (generalMatch?.[1]) { return applyDynamicAlias(generalMatch[1]); } // Fallback for odd casing/formatting where only effort suffix needs stripping. - if (EFFORT_SUFFIX_REGEX.test(normalized) && normalized.startsWith("gpt-")) { + if (EFFORT_SUFFIX_REGEX.test(normalized) && normalized.startsWith("gpt-5.")) { + const effort = normalized.match(EFFORT_SUFFIX_REGEX)?.[1]?.toLowerCase(); + if (effort === "minimal") return undefined; return applyDynamicAlias(normalized.replace(EFFORT_SUFFIX_REGEX, "")); } diff --git a/lib/request/request-transformer.ts b/lib/request/request-transformer.ts index 6c85dcc..2004ecb 100644 --- a/lib/request/request-transformer.ts +++ b/lib/request/request-transformer.ts @@ -5,15 +5,22 @@ import { normalizeOrphanedToolOutputs } from "./helpers/input-utils.js"; import type { ConfigOptions, InputItem, + PluginConfig, ReasoningConfig, RequestBody, UserConfig, } from "../types.js"; +import { resolveCustomPersonalityDescription } from "../personalities.js"; -type PersonalityOption = NonNullable; - -const PERSONALITY_VALUES = new Set([ +type PersonalityOption = string; +type ResolvedPersonality = { + value: PersonalityOption; + raw: string; +}; +const DEFAULT_PERSONALITY = "pragmatic"; +const PERSONALITY_VALUES = new Set([ "none", + "default", "friendly", "pragmatic", ]); @@ -24,54 +31,72 @@ const PERSONALITY_FALLBACK_TEXT: Record, stri pragmatic: "Adopt a pragmatic, concise, execution-focused tone with direct guidance.", }; +const VERBOSITY_VALUES = new Set(["low", "medium", "high"]); let didLogInvalidPersonality = false; -type PersonalityParseResult = - | { kind: "unset" } - | { kind: "valid"; value: PersonalityOption } - | { kind: "invalid" }; - -function parsePersonalityValue( - value: unknown, - source: "model" | "global", -): PersonalityParseResult { - if (typeof value !== "string") return { kind: "unset" }; +function normalizePersonalityKey(value: unknown): string | undefined { + if (typeof value !== "string") return undefined; const normalized = value.trim().toLowerCase(); - if (!normalized) return { kind: "unset" }; - if (!PERSONALITY_VALUES.has(normalized as PersonalityOption)) { - if (!didLogInvalidPersonality) { - logDebug( - `Invalid ${source} personality "${value}" detected; coercing to "none"`, - ); - didLogInvalidPersonality = true; - } - return { kind: "invalid" }; - } - return { kind: "valid", value: normalized as PersonalityOption }; + return normalized ? normalized : undefined; } -function resolvePersonality( - modelOptions: ConfigOptions, - globalOptions: ConfigOptions, - runtimeDefaults?: CodexModelRuntimeDefaults, -): PersonalityOption { - const modelValue = parsePersonalityValue(modelOptions.personality, "model"); - if (modelValue.kind === "valid") return modelValue.value; - if (modelValue.kind === "invalid") return "none"; - - // Online model default is preferred over global backup when available. - if (runtimeDefaults?.onlineDefaultPersonality) { - return runtimeDefaults.onlineDefaultPersonality; +function applyCustomSettings( + userConfig: UserConfig, + pluginConfig?: PluginConfig, +): UserConfig { + const custom = pluginConfig?.custom_settings; + if (!custom) return userConfig; + + const merged: UserConfig = { + global: { ...userConfig.global, ...(custom.options ?? {}) }, + models: { ...userConfig.models }, + }; + + if (custom.models) { + for (const [modelId, override] of Object.entries(custom.models)) { + const existing = merged.models[modelId] ?? {}; + const mergedOptions = { + ...(existing.options ?? {}), + ...(override.options ?? {}), + }; + const mergedVariants = { + ...(existing.variants ?? {}), + ...(override.variants ?? {}), + }; + merged.models[modelId] = { + ...existing, + ...override, + options: mergedOptions, + variants: mergedVariants, + }; + } } - const globalValue = parsePersonalityValue( - globalOptions.personality, - "global", - ); - if (globalValue.kind === "valid") return globalValue.value; - if (globalValue.kind === "invalid") return "none"; + return merged; +} - return runtimeDefaults?.staticDefaultPersonality ?? "none"; +function normalizeVerbosity( + value: unknown, +): "low" | "medium" | "high" | undefined { + if (typeof value !== "string") return undefined; + const normalized = value.trim().toLowerCase(); + if (!VERBOSITY_VALUES.has(normalized)) return undefined; + return normalized as "low" | "medium" | "high"; +} + +function resolvePersonality( + modelLookupKey: string, + pluginConfig?: PluginConfig, +): ResolvedPersonality { + const custom = pluginConfig?.custom_settings; + const modelOverride = custom?.models?.[modelLookupKey]?.options?.personality; + const globalOverride = custom?.options?.personality; + const rawCandidate = + (typeof modelOverride === "string" && modelOverride.trim()) || + (typeof globalOverride === "string" && globalOverride.trim()) || + DEFAULT_PERSONALITY; + const normalized = normalizePersonalityKey(rawCandidate) ?? DEFAULT_PERSONALITY; + return { value: normalized, raw: rawCandidate }; } function getModelLookupCandidates( @@ -98,31 +123,71 @@ function getModelLookupCandidates( } function resolvePersonalityMessage( - personality: PersonalityOption, + personality: ResolvedPersonality, runtimeDefaults?: CodexModelRuntimeDefaults, ): string { - if (personality === "none") { - return runtimeDefaults?.personalityMessages?.default ?? ""; + const fileDescription = resolveCustomPersonalityDescription(personality.value); + if (fileDescription && fileDescription.trim()) { + return fileDescription; } - return ( - runtimeDefaults?.personalityMessages?.[personality] ?? - PERSONALITY_FALLBACK_TEXT[personality] - ); + + const runtimeMessages = runtimeDefaults?.personalityMessages ?? {}; + if (typeof runtimeMessages[personality.value] === "string") { + return runtimeMessages[personality.value]; + } + if (personality.value === "default") { + if (typeof runtimeMessages.default === "string") { + const directDefault = runtimeMessages.default.trim(); + if (directDefault) return directDefault; + } + const defaultKey = + runtimeDefaults?.onlineDefaultPersonality ?? + runtimeDefaults?.staticDefaultPersonality ?? + DEFAULT_PERSONALITY; + if (defaultKey === "none") return ""; + if (typeof runtimeMessages[defaultKey] === "string") { + return runtimeMessages[defaultKey]; + } + if (defaultKey === "friendly") { + return runtimeMessages.friendly ?? PERSONALITY_FALLBACK_TEXT.friendly; + } + return runtimeMessages.pragmatic ?? PERSONALITY_FALLBACK_TEXT.pragmatic; + } + if (personality.value === "none") return ""; + + if (personality.value === "friendly") { + return runtimeMessages.friendly ?? PERSONALITY_FALLBACK_TEXT.friendly; + } + if (personality.value === "pragmatic") { + return runtimeMessages.pragmatic ?? PERSONALITY_FALLBACK_TEXT.pragmatic; + } + + if (!didLogInvalidPersonality) { + const invalidLabel = personality.raw || personality.value; + logDebug( + `Invalid personality "${invalidLabel}" detected; coercing to "${DEFAULT_PERSONALITY}"`, + ); + didLogInvalidPersonality = true; + } + return runtimeMessages.pragmatic ?? PERSONALITY_FALLBACK_TEXT.pragmatic; } function renderCodexInstructions( baseInstructions: string, - personality: PersonalityOption, + personality: ResolvedPersonality, runtimeDefaults?: CodexModelRuntimeDefaults, ): string { const instructions = runtimeDefaults?.instructionsTemplate ?? baseInstructions; - const personalityMessage = resolvePersonalityMessage(personality, runtimeDefaults); + const personalityMessage = resolvePersonalityMessage( + personality, + runtimeDefaults, + ); if (instructions.includes(PERSONALITY_PLACEHOLDER)) { return instructions.replaceAll(PERSONALITY_PLACEHOLDER, personalityMessage); } - if (personality === "none") return instructions; + if (personality.value === "none") return instructions; const appended = personalityMessage.trim(); if (!appended) return instructions; @@ -142,84 +207,19 @@ function renderCodexInstructions( export function normalizeModel(model: string | undefined): string { if (!model) return "gpt-5.1"; - // Strip provider prefix if present (e.g., "openai/gpt-5-codex" β†’ "gpt-5-codex") + // Strip provider prefix if present (e.g., "openai/gpt-5.3-codex" β†’ "gpt-5.3-codex") const modelId = model.includes("/") ? model.split("/").pop()! : model; + const trimmed = modelId.trim(); + if (!trimmed) return "gpt-5.1"; // Try explicit model map first (handles all known model variants) - const mappedModel = getNormalizedModel(modelId); + const mappedModel = getNormalizedModel(trimmed); if (mappedModel) { return mappedModel; } - // Fallback: Pattern-based matching for unknown/custom model names - // This preserves backwards compatibility with old verbose names - // like "GPT 5 Codex Low (ChatGPT Subscription)" - const normalized = modelId.toLowerCase(); - - // Priority order for pattern matching (most specific first): - // 1. GPT-5.2 Codex (newest codex model) - if ( - normalized.includes("gpt-5.2-codex") || - normalized.includes("gpt 5.2 codex") - ) { - return "gpt-5.2-codex"; - } - - // 2. GPT-5.2 (general purpose) - if (normalized.includes("gpt-5.2") || normalized.includes("gpt 5.2")) { - return "gpt-5.2"; - } - - // 3. GPT-5.1 Codex Max - if ( - normalized.includes("gpt-5.1-codex-max") || - normalized.includes("gpt 5.1 codex max") - ) { - return "gpt-5.1-codex-max"; - } - - // 4. GPT-5.1 Codex Mini - if ( - normalized.includes("gpt-5.1-codex-mini") || - normalized.includes("gpt 5.1 codex mini") - ) { - return "gpt-5.1-codex-mini"; - } - - // 5. Legacy Codex Mini - if ( - normalized.includes("codex-mini-latest") || - normalized.includes("gpt-5-codex-mini") || - normalized.includes("gpt 5 codex mini") - ) { - return "codex-mini-latest"; - } - - // 6. GPT-5.1 Codex - if ( - normalized.includes("gpt-5.1-codex") || - normalized.includes("gpt 5.1 codex") - ) { - return "gpt-5.1-codex"; - } - - // 7. GPT-5.1 (general-purpose) - if (normalized.includes("gpt-5.1") || normalized.includes("gpt 5.1")) { - return "gpt-5.1"; - } - - // 8. GPT-5 Codex family (any variant with "codex") - if (normalized.includes("codex")) { - return "gpt-5.1-codex"; - } - - // 9. GPT-5 family (any variant) - default to 5.1 as 5 is being phased out - if (normalized.includes("gpt-5") || normalized.includes("gpt 5")) { - return "gpt-5.1"; - } - - // Default fallback - use gpt-5.1 as gpt-5 is being phased out - return "gpt-5.1"; + // Leave unknown/legacy models untouched to avoid false positives. + return trimmed.toLowerCase(); } /** @@ -243,12 +243,47 @@ function resolveReasoningConfig( modelName: string, modelConfig: ConfigOptions, body: RequestBody, + runtimeDefaults?: CodexModelRuntimeDefaults, ): ReasoningConfig { const providerOpenAI = body.providerOptions?.openai; const existingEffort = body.reasoning?.effort ?? providerOpenAI?.reasoningEffort; const existingSummary = body.reasoning?.summary ?? providerOpenAI?.reasoningSummary; + const supportedEfforts = runtimeDefaults?.supportedReasoningEfforts; + const defaultEffort = runtimeDefaults?.defaultReasoningEffort; + const summaryUnsupported = + runtimeDefaults?.supportsReasoningSummaries === false || + runtimeDefaults?.reasoningSummaryFormat === "none"; + + if (supportedEfforts && supportedEfforts.length > 0) { + const normalizedEfforts = supportedEfforts.map((effort) => + effort.toLowerCase(), + ); + const effortSet = new Set(normalizedEfforts); + const requested = existingEffort ?? modelConfig.reasoningEffort; + let effort = + requested && effortSet.has(String(requested).toLowerCase()) + ? String(requested).toLowerCase() + : undefined; + if (!effort) { + const defaultCandidate = defaultEffort + ? defaultEffort.toLowerCase() + : undefined; + if (defaultCandidate && effortSet.has(defaultCandidate)) { + effort = defaultCandidate; + } else { + effort = normalizedEfforts[0]; + } + } + let summary = + existingSummary ?? modelConfig.reasoningSummary ?? "auto"; + if (summaryUnsupported) summary = "off"; + return { + effort: effort as ReasoningConfig["effort"], + summary: summary as ReasoningConfig["summary"], + }; + } const mergedConfig: ConfigOptions = { ...modelConfig, @@ -262,14 +297,17 @@ function resolveReasoningConfig( function resolveTextVerbosity( modelConfig: ConfigOptions, body: RequestBody, -): "low" | "medium" | "high" { + runtimeDefaults?: CodexModelRuntimeDefaults, +): "low" | "medium" | "high" | undefined { const providerOpenAI = body.providerOptions?.openai; - return ( + const runtimeVerbosity = normalizeVerbosity(runtimeDefaults?.defaultVerbosity); + const explicit = body.text?.verbosity ?? providerOpenAI?.textVerbosity ?? - modelConfig.textVerbosity ?? - "medium" - ); + modelConfig.textVerbosity; + if (explicit) return explicit; + if (runtimeDefaults?.supportsVerbosity === false) return undefined; + return runtimeVerbosity ?? "medium"; } function resolveInclude(modelConfig: ConfigOptions, body: RequestBody): string[] { @@ -304,10 +342,12 @@ export function getReasoningConfig( ): ReasoningConfig { const normalizedName = modelName?.toLowerCase() ?? ""; - // GPT-5.2 Codex is the newest codex model (supports xhigh, but not "none") + // GPT-5.3/5.2 Codex are the newest codex models (support xhigh, but not "none") const isGpt52Codex = normalizedName.includes("gpt-5.2-codex") || - normalizedName.includes("gpt 5.2 codex"); + normalizedName.includes("gpt 5.2 codex") || + normalizedName.includes("gpt-5.3-codex") || + normalizedName.includes("gpt 5.3 codex"); // GPT-5.2 general purpose (not codex variant) const isGpt52General = @@ -455,25 +495,23 @@ export async function transformRequestBody( codexInstructions: string, userConfig: UserConfig = { global: {}, models: {} }, runtimeDefaults?: CodexModelRuntimeDefaults, + pluginConfig?: PluginConfig, ): Promise { const originalModel = body.model; const normalizedModel = normalizeModel(body.model); - const globalOptions = userConfig.global || {}; + const effectiveConfig = applyCustomSettings(userConfig, pluginConfig); + const globalOptions = effectiveConfig.global || {}; const lookupCandidates = getModelLookupCandidates(originalModel, normalizedModel); const resolvedModelKey = lookupCandidates.find( - (candidate) => !!userConfig.models?.[candidate], + (candidate) => !!effectiveConfig.models?.[candidate], ); const modelLookupKey = resolvedModelKey ?? normalizedModel; - const modelOptions = userConfig.models?.[modelLookupKey]?.options || {}; + const modelOptions = effectiveConfig.models?.[modelLookupKey]?.options || {}; // Get model-specific configuration using ORIGINAL model name (config key) // with fallbacks for provider-prefixed and normalized aliases - const modelConfig = getModelConfig(modelLookupKey, userConfig); - const personality = resolvePersonality( - modelOptions, - globalOptions, - runtimeDefaults, - ); + const modelConfig = getModelConfig(modelLookupKey, effectiveConfig); + const personality = resolvePersonality(modelLookupKey, pluginConfig); logDebug( `Model config lookup: "${modelLookupKey}" β†’ normalized to "${normalizedModel}" for API`, @@ -481,8 +519,8 @@ export async function transformRequestBody( lookupCandidates, hasModelSpecificConfig: !!resolvedModelKey, resolvedConfig: modelConfig, - personality, - }, + personality: personality.value, + }, ); // Normalize model name for API call @@ -537,15 +575,17 @@ export async function transformRequestBody( normalizedModel, modelConfig, body, + runtimeDefaults, ); body.reasoning = { ...body.reasoning, ...reasoningConfig, }; + const verbosity = resolveTextVerbosity(modelConfig, body, runtimeDefaults); body.text = { ...body.text, - verbosity: resolveTextVerbosity(modelConfig, body), + ...(verbosity ? { verbosity } : {}), }; body.include = resolveInclude(modelConfig, body); diff --git a/lib/request/response-handler.ts b/lib/request/response-handler.ts index 866589a..7785573 100644 --- a/lib/request/response-handler.ts +++ b/lib/request/response-handler.ts @@ -108,3 +108,26 @@ export function ensureContentType(headers: Headers): Headers { return responseHeaders; } + +export function createSyntheticErrorResponse( + message: string, + status = 400, + type = "hard_stop", + param?: string, +): Response { + const errorPayload: { error: { message: string; type: string; param?: string } } = { + error: { + message, + type, + }, + }; + + if (param) { + errorPayload.error.param = param; + } + + return new Response(JSON.stringify(errorPayload), { + status, + headers: { "content-type": "application/json; charset=utf-8" }, + }); +} diff --git a/lib/types.ts b/lib/types.ts index db3cb7c..b6a828a 100644 --- a/lib/types.ts +++ b/lib/types.ts @@ -4,13 +4,6 @@ import type { Auth, Provider, Model } from "@opencode-ai/sdk"; * Plugin configuration from ~/.config/opencode/openai-codex-auth-config.json */ export interface PluginConfig { - /** - * Legacy toggle for bridge mode. - * Deprecated: bridge injection has been removed and this flag no longer changes runtime behavior. - * @default false - */ - codexMode?: boolean; - /** * Account selection strategy * - sticky: keep same account until rate-limited (best for caching) @@ -111,6 +104,20 @@ export interface PluginConfig { */ requestJitterMaxMs?: number; + /** + * Custom personality configuration (global). + */ + custom_settings?: { + options?: ConfigOptions; + models?: { + [modelName: string]: { + options?: ConfigOptions; + variants?: Record; + [key: string]: unknown; + }; + }; + }; + /** * Retry when all accounts rate-limited. * @default false @@ -129,6 +136,30 @@ export interface PluginConfig { * @default 1 */ retryAllAccountsMaxRetries?: number; + + /** + * Hard-stop max wait before returning synthetic error (ms). + * @default 10000 + */ + hardStopMaxWaitMs?: number; + + /** + * Hard-stop when model is not in the server catalog. + * @default true + */ + hardStopOnUnknownModel?: boolean; + + /** + * Hard-stop when all accounts are in auth-failure cooldown. + * @default true + */ + hardStopOnAllAuthFailed?: boolean; + + /** + * Max consecutive failures before hard-stop. + * @default 5 + */ + hardStopMaxConsecutiveFailures?: number; } export type AccountSelectionStrategy = "sticky" | "round-robin" | "hybrid"; @@ -183,7 +214,7 @@ export interface ConfigOptions { reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh"; reasoningSummary?: "auto" | "concise" | "detailed" | "off" | "on"; textVerbosity?: "low" | "medium" | "high"; - personality?: "none" | "friendly" | "pragmatic"; + personality?: string; include?: string[]; } diff --git a/package-lock.json b/package-lock.json index 8863a80..11a72a8 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,7 +10,6 @@ "license": "MIT", "dependencies": { "@openauthjs/openauth": "^0.4.3", - "hono": "^4.10.4", "jsonc-parser": "^3.3.1", "proper-lockfile": "^4.1.2" }, @@ -23,6 +22,7 @@ "@opencode-ai/sdk": "^1.0.150", "@types/node": "^24.6.2", "@types/proper-lockfile": "^4.1.2", + "@vitest/coverage-v8": "^3.2.4", "@vitest/ui": "^3.2.4", "typescript": "^5.9.3", "vitest": "^3.2.4" @@ -34,6 +34,80 @@ "@opencode-ai/plugin": "^1.0.150" } }, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.0.tgz", + "integrity": "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-1.0.2.tgz", + "integrity": "sha512-6zABk/ECA/QYSCQ1NGiVwwbQerUCZ+TQbp64Q3AgmfNvurHH0j8TtXa1qbShXA6qqkpAj4V5W8pP6mLe1mcMqA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, "node_modules/@esbuild/aix-ppc64": { "version": "0.25.10", "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.10.tgz", @@ -476,6 +550,55 @@ "node": ">=18" } }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, "node_modules/@jridgewell/sourcemap-codec": { "version": "1.5.5", "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", @@ -483,6 +606,17 @@ "dev": true, "license": "MIT" }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, "node_modules/@openauthjs/openauth": { "version": "0.4.3", "resolved": "https://registry.npmjs.org/@openauthjs/openauth/-/openauth-0.4.3.tgz", @@ -565,6 +699,17 @@ "license": "MIT", "peer": true }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, "node_modules/@polka/url": { "version": "1.0.0-next.29", "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.29.tgz", @@ -937,6 +1082,40 @@ "dev": true, "license": "MIT" }, + "node_modules/@vitest/coverage-v8": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-3.2.4.tgz", + "integrity": "sha512-EyF9SXU6kS5Ku/U82E259WSnvg6c8KTjppUncuNdm5QHpe17mwREHnjDzozC8x9MZ0xfBUFSaLkRv4TMA75ALQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@ampproject/remapping": "^2.3.0", + "@bcoe/v8-coverage": "^1.0.2", + "ast-v8-to-istanbul": "^0.3.3", + "debug": "^4.4.1", + "istanbul-lib-coverage": "^3.2.2", + "istanbul-lib-report": "^3.0.1", + "istanbul-lib-source-maps": "^5.0.6", + "istanbul-reports": "^3.1.7", + "magic-string": "^0.30.17", + "magicast": "^0.3.5", + "std-env": "^3.9.0", + "test-exclude": "^7.0.1", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@vitest/browser": "3.2.4", + "vitest": "3.2.4" + }, + "peerDependenciesMeta": { + "@vitest/browser": { + "optional": true + } + } + }, "node_modules/@vitest/expect": { "version": "3.2.4", "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.2.4.tgz", @@ -1095,6 +1274,32 @@ "url": "https://opencollective.com/vitest" } }, + "node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, "node_modules/arctic": { "version": "2.3.4", "resolved": "https://registry.npmjs.org/arctic/-/arctic-2.3.4.tgz", @@ -1117,12 +1322,48 @@ "node": ">=12" } }, + "node_modules/ast-v8-to-istanbul": { + "version": "0.3.11", + "resolved": "https://registry.npmjs.org/ast-v8-to-istanbul/-/ast-v8-to-istanbul-0.3.11.tgz", + "integrity": "sha512-Qya9fkoofMjCBNVdWINMjB5KZvkYfaO9/anwkWnjxibpWUxo5iHl2sOdP7/uAqaRuUYuoo8rDwnbaaKVFxoUvw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.31", + "estree-walker": "^3.0.3", + "js-tokens": "^10.0.0" + } + }, + "node_modules/ast-v8-to-istanbul/node_modules/js-tokens": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-10.0.0.tgz", + "integrity": "sha512-lM/UBzQmfJRo9ABXbPWemivdCW8V2G8FHaHdypQaIy523snUjog0W71ayWXTjiR+ixeMyVHN2XcpnTd/liPg/Q==", + "dev": true, + "license": "MIT" + }, "node_modules/aws4fetch": { "version": "1.0.20", "resolved": "https://registry.npmjs.org/aws4fetch/-/aws4fetch-1.0.20.tgz", "integrity": "sha512-/djoAN709iY65ETD6LKCtyyEI04XIBP5xVvfmNxsEP0uJB5tyaGBztSryRr4HqMStr9R06PisQE7m9zDTXKu6g==", "license": "MIT" }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, "node_modules/cac": { "version": "6.7.14", "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", @@ -1160,6 +1401,41 @@ "node": ">= 16" } }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/debug": { "version": "4.4.3", "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", @@ -1188,6 +1464,20 @@ "node": ">=6" } }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true, + "license": "MIT" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true, + "license": "MIT" + }, "node_modules/es-module-lexer": { "version": "1.7.0", "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", @@ -1289,6 +1579,36 @@ "dev": true, "license": "ISC" }, + "node_modules/foreground-child": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", + "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", + "dev": true, + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.6", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/foreground-child/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/fsevents": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", @@ -1304,21 +1624,148 @@ "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, + "node_modules/glob": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", + "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/graceful-fs": { "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", "license": "ISC" }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/hono": { - "version": "4.11.5", - "resolved": "https://registry.npmjs.org/hono/-/hono-4.11.5.tgz", - "integrity": "sha512-WemPi9/WfyMwZs+ZUXdiwcCh9Y+m7L+8vki9MzDw3jJ+W9Lc+12HGsd368Qc1vZi1xwW8BWMMsnK5efYKPdt4g==", + "version": "4.11.8", + "resolved": "https://registry.npmjs.org/hono/-/hono-4.11.8.tgz", + "integrity": "sha512-eVkB/CYCCei7K2WElZW9yYQFWssG0DhaDhVvr7wy5jJ22K+ck8fWW0EsLpB0sITUTvPnc97+rrbQqIr5iqiy9Q==", "license": "MIT", + "peer": true, "engines": { "node": ">=16.9.0" } }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "5.0.6", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-5.0.6.tgz", + "integrity": "sha512-yg2d+Em4KizZC5niWhQaIomgf5WlL4vOOjZ5xGCmF8SnPE/mDWWXgvRExdcpCgh9lLRRa1/fSYp2ymmbJ1pI+A==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.23", + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", + "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, "node_modules/jose": { "version": "5.9.6", "resolved": "https://registry.npmjs.org/jose/-/jose-5.9.6.tgz", @@ -1348,6 +1795,13 @@ "dev": true, "license": "MIT" }, + "node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, "node_modules/magic-string": { "version": "0.30.19", "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.19.tgz", @@ -1358,6 +1812,60 @@ "@jridgewell/sourcemap-codec": "^1.5.5" } }, + "node_modules/magicast": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/magicast/-/magicast-0.3.5.tgz", + "integrity": "sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.25.4", + "@babel/types": "^7.25.4", + "source-map-js": "^1.2.0" + } + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/mrmime": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.1.tgz", @@ -1394,6 +1902,40 @@ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" } }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "dev": true, + "license": "BlueOak-1.0.0" + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/pathval": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", @@ -1515,6 +2057,42 @@ "fsevents": "~2.3.2" } }, + "node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/siginfo": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", @@ -1567,6 +2145,110 @@ "dev": true, "license": "MIT" }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/string-width-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/strip-literal": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-3.1.0.tgz", @@ -1580,6 +2262,34 @@ "url": "https://github.com/sponsors/antfu" } }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/test-exclude": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-7.0.1.tgz", + "integrity": "sha512-pFYqmTw68LXVjeWJMST4+borgQP2AyMNbg1BpZh9LbyhUeNkeaPF9gzfPGUAnSMV3qPYdWUwDIjjCLiSDOl7vg==", + "dev": true, + "license": "ISC", + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^10.4.1", + "minimatch": "^9.0.4" + }, + "engines": { + "node": ">=18" + } + }, "node_modules/tinybench": { "version": "2.9.0", "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", @@ -1857,6 +2567,22 @@ "dev": true, "license": "MIT" }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/why-is-node-running": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", @@ -1874,6 +2600,104 @@ "node": ">=8" } }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/zod": { "version": "4.1.8", "resolved": "https://registry.npmjs.org/zod/-/zod-4.1.8.tgz", diff --git a/package.json b/package.json index fe07337..f2633eb 100644 --- a/package.json +++ b/package.json @@ -71,18 +71,18 @@ "@opencode-ai/sdk": "^1.0.150", "@types/node": "^24.6.2", "@types/proper-lockfile": "^4.1.2", + "@vitest/coverage-v8": "^3.2.4", "@vitest/ui": "^3.2.4", "typescript": "^5.9.3", "vitest": "^3.2.4" }, "dependencies": { "@openauthjs/openauth": "^0.4.3", - "hono": "^4.10.4", "jsonc-parser": "^3.3.1", "proper-lockfile": "^4.1.2" }, "overrides": { - "hono": "^4.10.4", + "hono": "^4.11.8", "vite": "^7.1.12" } } diff --git a/test/README.md b/test/README.md index 93c3ecc..1098b31 100644 --- a/test/README.md +++ b/test/README.md @@ -51,7 +51,7 @@ Tests configuration parsing and merging: ### request-transformer.test.ts (30 tests) Tests request body transformations: -- Model name normalization (all variants β†’ gpt-5 or gpt-5-codex) +- Model name normalization (known variants collapse to gpt-5.x bases; unknown/legacy slugs are preserved) - Input filtering (removing stored conversation history) - Tool remap message injection - Reasoning configuration application diff --git a/test/catalog-defaults.test.ts b/test/catalog-defaults.test.ts new file mode 100644 index 0000000..035ed8e --- /dev/null +++ b/test/catalog-defaults.test.ts @@ -0,0 +1,138 @@ +import { afterEach, describe, expect, it } from "vitest"; +import { mkdtempSync, mkdirSync, rmSync, writeFileSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; + +import { + buildInternalModelDefaults, + mergeModelDefaults, +} from "../lib/catalog-defaults.js"; + +describe("catalog internal defaults", () => { + const originalXdg = process.env.XDG_CONFIG_HOME; + + afterEach(() => { + if (originalXdg === undefined) { + delete process.env.XDG_CONFIG_HOME; + } else { + process.env.XDG_CONFIG_HOME = originalXdg; + } + }); + + it("adds catalog models using template defaults", () => { + const root = mkdtempSync(join(tmpdir(), "catalog-defaults-")); + process.env.XDG_CONFIG_HOME = root; + try { + const cacheDir = join(root, "opencode", "cache"); + mkdirSync(cacheDir, { recursive: true }); + writeFileSync( + join(cacheDir, "codex-models-cache.json"), + JSON.stringify({ + fetchedAt: Date.now(), + source: "server", + models: [{ slug: "gpt-5.3-codex" }], + }), + "utf8", + ); + + const defaults = buildInternalModelDefaults(); + + expect(defaults["gpt-5.3-codex"]).toBeDefined(); + expect(defaults["gpt-5.3-codex"].name).toBe( + "GPT 5.3 Codex (OAuth)", + ); + expect(defaults["gpt-5.3-codex"].limit?.context).toBe( + defaults["gpt-5.2-codex"].limit?.context, + ); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); + + it("uses gpt-5.3-codex as template for unknown codex models", () => { + const root = mkdtempSync(join(tmpdir(), "catalog-defaults-unknown-")); + process.env.XDG_CONFIG_HOME = root; + try { + const cacheDir = join(root, "opencode", "cache"); + mkdirSync(cacheDir, { recursive: true }); + writeFileSync( + join(cacheDir, "codex-models-cache.json"), + JSON.stringify({ + fetchedAt: Date.now(), + source: "server", + models: [{ slug: "gpt-5.9-codex" }], + }), + "utf8", + ); + + const defaults = buildInternalModelDefaults(); + + expect(defaults["gpt-5.9-codex"]).toBeDefined(); + expect(defaults["gpt-5.9-codex"].name).toBe( + "GPT 5.9 Codex (OAuth)", + ); + // Should have variants from gpt-5.3-codex, not gpt-5.2-codex + // (They are currently identical in opencode-modern.json, but 5.3 is the better template) + expect(defaults["gpt-5.9-codex"].limit?.context).toBe( + defaults["gpt-5.3-codex"].limit?.context, + ); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); + + it("overrides template defaults with live metadata", () => { + const root = mkdtempSync(join(tmpdir(), "catalog-defaults-live-")); + process.env.XDG_CONFIG_HOME = root; + try { + const cacheDir = join(root, "opencode", "cache"); + mkdirSync(cacheDir, { recursive: true }); + writeFileSync( + join(cacheDir, "codex-models-cache.json"), + JSON.stringify({ + fetchedAt: Date.now(), + source: "server", + models: [ + { + slug: "gpt-5.3-codex", + display_name: "Codex 5.3", + context_window: 123456, + truncation_policy: { + mode: "tokens", + limit: 4242, + }, + input_modalities: ["text"], + }, + ], + }), + "utf8", + ); + + const defaults = buildInternalModelDefaults(); + + expect(defaults["gpt-5.3-codex"].name).toBe("Codex 5.3"); + expect(defaults["gpt-5.3-codex"].limit?.context).toBe(123456); + expect(defaults["gpt-5.3-codex"].limit?.output).toBe(4242); + expect(defaults["gpt-5.3-codex"].modalities?.input).toEqual(["text"]); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); + + it("merges config overrides above internal defaults", () => { + const defaults = { + "gpt-5.2-codex": { name: "Default" }, + "gpt-5.1": { name: "Default 5.1" }, + }; + const overrides = { + "gpt-5.2-codex": { name: "Custom" }, + "custom-model": { name: "Custom" }, + }; + + const merged = mergeModelDefaults(overrides, defaults); + + expect(merged["gpt-5.2-codex"].name).toBe("Custom"); + expect(merged["gpt-5.1"].name).toBe("Default 5.1"); + expect(merged["custom-model"].name).toBe("Custom"); + }); +}); diff --git a/test/codex-instructions.test.ts b/test/codex-instructions.test.ts new file mode 100644 index 0000000..58d52c6 --- /dev/null +++ b/test/codex-instructions.test.ts @@ -0,0 +1,192 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; +import { mkdtempSync, mkdirSync, readFileSync, rmSync, writeFileSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; + +const originalXdg = process.env.XDG_CONFIG_HOME; + +async function loadModule() { + vi.resetModules(); + return import("../lib/prompts/codex.js"); +} + +describe("codex instructions cache", () => { + afterEach(() => { + if (originalXdg === undefined) { + delete process.env.XDG_CONFIG_HOME; + } else { + process.env.XDG_CONFIG_HOME = originalXdg; + } + vi.restoreAllMocks(); + vi.unstubAllGlobals(); + }); + + it("warms instructions cache on startup and reuses in-session", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-instructions-warm-")); + process.env.XDG_CONFIG_HOME = root; + const cacheDir = join(root, "opencode", "cache"); + mkdirSync(cacheDir, { recursive: true }); + + const cacheFile = join(cacheDir, "codex-instructions.md"); + const metaFile = join(cacheDir, "codex-instructions-meta.json"); + const cachedValue = "cached instructions"; + writeFileSync(cacheFile, cachedValue, "utf8"); + writeFileSync( + metaFile, + JSON.stringify({ + etag: null, + tag: "rust-v1.0.0", + lastChecked: Date.now(), + url: "https://example.test", + }), + "utf8", + ); + + const fetchSpy = vi.fn(async () => { + throw new Error("network"); + }); + vi.stubGlobal("fetch", fetchSpy); + + const { warmCodexInstructions, getCodexInstructions } = await loadModule(); + + await warmCodexInstructions(); + + expect(fetchSpy).not.toHaveBeenCalled(); + + const first = await getCodexInstructions("codex"); + expect(first).toBe(cachedValue); + + writeFileSync(cacheFile, "tampered", "utf8"); + + const second = await getCodexInstructions("codex"); + expect(second).toBe(cachedValue); + + rmSync(root, { recursive: true, force: true }); + }); + + it("refreshes stale cache on warm and overwrites cache atomically", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-instructions-stale-")); + process.env.XDG_CONFIG_HOME = root; + const cacheDir = join(root, "opencode", "cache"); + mkdirSync(cacheDir, { recursive: true }); + + const cacheFile = join(cacheDir, "codex-instructions.md"); + const metaFile = join(cacheDir, "codex-instructions-meta.json"); + const staleChecked = Date.now() - 16 * 60 * 1000; + writeFileSync(cacheFile, "stale instructions", "utf8"); + writeFileSync( + metaFile, + JSON.stringify({ + etag: '"old"', + tag: "rust-v1.0.0", + lastChecked: staleChecked, + url: "https://example.test", + }), + "utf8", + ); + + const newInstructions = "fresh instructions"; + const fetchMock = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + if (url.includes("api.github.com/repos/openai/codex/releases/latest")) { + return new Response(JSON.stringify({ tag_name: "rust-v9.9.9" }), { + status: 200, + }); + } + if (url.includes("raw.githubusercontent.com/openai/codex/rust-v9.9.9")) { + return new Response(newInstructions, { + status: 200, + headers: { etag: '"next"' }, + }); + } + throw new Error(`Unexpected URL: ${url}`); + }); + vi.stubGlobal("fetch", fetchMock); + + const { warmCodexInstructions, getCodexInstructions } = await loadModule(); + + await warmCodexInstructions(); + + expect(fetchMock).toHaveBeenCalled(); + expect(readFileSync(cacheFile, "utf8")).toBe(newInstructions); + const meta = JSON.parse(readFileSync(metaFile, "utf8")) as { + tag?: string; + etag?: string; + lastChecked?: number; + }; + expect(meta.tag).toBe("rust-v9.9.9"); + expect(meta.etag).toBe('"next"'); + expect(meta.lastChecked).toBeGreaterThan(staleChecked); + + const warmed = await getCodexInstructions("codex"); + expect(warmed).toBe(newInstructions); + + rmSync(root, { recursive: true, force: true }); + }); + + it("re-fetches when GitHub returns 304 but cache file is missing", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-instructions-304-missing-")); + process.env.XDG_CONFIG_HOME = root; + const cacheDir = join(root, "opencode", "cache"); + mkdirSync(cacheDir, { recursive: true }); + + const cacheFile = join(cacheDir, "codex-instructions.md"); + const metaFile = join(cacheDir, "codex-instructions-meta.json"); + const staleChecked = Date.now() - 16 * 60 * 1000; + writeFileSync( + metaFile, + JSON.stringify({ + etag: '"stale"', + tag: "rust-v9.9.9", + lastChecked: staleChecked, + url: "https://example.test", + }), + "utf8", + ); + + const newInstructions = "fresh instructions after refetch"; + let rawFetchCount = 0; + const fetchMock = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + if (url.includes("api.github.com/repos/openai/codex/releases/latest")) { + return new Response(JSON.stringify({ tag_name: "rust-v9.9.9" }), { + status: 200, + }); + } + if (url.includes("raw.githubusercontent.com/openai/codex/rust-v9.9.9")) { + rawFetchCount += 1; + if (rawFetchCount === 1) { + return { + status: 304, + ok: false, + text: async () => "", + headers: new Headers(), + } as Response; + } + return new Response(newInstructions, { + status: 200, + headers: { etag: '"next"' }, + }); + } + throw new Error(`Unexpected URL: ${url}`); + }); + vi.stubGlobal("fetch", fetchMock); + + const { getCodexInstructions } = await loadModule(); + const instructions = await getCodexInstructions("codex"); + + expect(instructions).toBe(newInstructions); + expect(rawFetchCount).toBe(2); + expect(readFileSync(cacheFile, "utf8")).toBe(newInstructions); + const meta = JSON.parse(readFileSync(metaFile, "utf8")) as { + tag?: string; + etag?: string; + lastChecked?: number; + }; + expect(meta.tag).toBe("rust-v9.9.9"); + expect(meta.etag).toBe('"next"'); + expect(meta.lastChecked).toBeGreaterThan(staleChecked); + + rmSync(root, { recursive: true, force: true }); + }); +}); diff --git a/test/codex-models.test.ts b/test/codex-models.test.ts index bd1a8c8..8e3568a 100644 --- a/test/codex-models.test.ts +++ b/test/codex-models.test.ts @@ -1,13 +1,33 @@ import { afterEach, describe, expect, it, vi } from "vitest"; -import { mkdtempSync, mkdirSync, rmSync, writeFileSync } from "node:fs"; +import { + existsSync, + mkdtempSync, + mkdirSync, + readFileSync, + rmSync, + writeFileSync, +} from "node:fs"; import { tmpdir } from "node:os"; -import { join } from "node:path"; +import { dirname, join } from "node:path"; const originalXdg = process.env.XDG_CONFIG_HOME; +function maybeReleaseTagResponse(url: string): Response | null { + if (!url.includes("api.github.com/repos/openai/codex/releases/latest")) { + return null; + } + return new Response(JSON.stringify({ tag_name: "rust-v9.8.7" }), { + status: 200, + }); +} + async function loadModule() { vi.resetModules(); - return import("../lib/prompts/codex-models.js"); + const [models, errors] = await Promise.all([ + import("../lib/prompts/codex-models.js"), + import("../lib/request/errors.js"), + ]); + return { ...models, ...errors }; } describe("codex model metadata resolver", () => { @@ -24,15 +44,21 @@ describe("codex model metadata resolver", () => { const root = mkdtempSync(join(tmpdir(), "codex-models-server-")); process.env.XDG_CONFIG_HOME = root; const { getCodexModelRuntimeDefaults } = await loadModule(); + let capturedUrl = ""; const mockFetch = vi.fn(async (input: RequestInfo | URL) => { const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; if (url.includes("/codex/models")) { + capturedUrl = url; return new Response( JSON.stringify({ models: [ { slug: "gpt-5.3-codex", + base_instructions: "Server base instructions", + apply_patch_tool_type: "freeform", model_messages: { instructions_template: "Base {{ personality }}", instructions_variables: { @@ -57,10 +83,437 @@ describe("codex model metadata resolver", () => { }); expect(mockFetch).toHaveBeenCalled(); - expect(mockFetch.mock.calls[0]?.[0]?.toString()).toContain("/codex/models"); + expect(capturedUrl).toContain("/codex/models"); expect(defaults.onlineDefaultPersonality).toBeUndefined(); expect(defaults.personalityMessages?.friendly).toBe("Friendly from server"); + expect(defaults.baseInstructions).toBe("Server base instructions"); + expect(defaults.applyPatchToolType).toBe("freeform"); + + rmSync(root, { recursive: true, force: true }); + }); + + it("seeds friendly/pragmatic personality cache from server data", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-personality-")); + process.env.XDG_CONFIG_HOME = root; + const { getCodexModelRuntimeDefaults } = await loadModule(); + + const fetchMock = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; + if (url.includes("/codex/models")) { + return new Response( + JSON.stringify({ + models: [ + { + slug: "gpt-5.3-codex", + model_messages: { + instructions_variables: { + personality_friendly: "Friendly from server", + personality_pragmatic: "Pragmatic from server", + }, + }, + }, + ], + }), + { status: 200 }, + ); + } + throw new Error(`Unexpected URL: ${url}`); + }); + + await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: fetchMock as unknown as typeof fetch, + }); + + const personalityDir = join(root, "opencode", "Personalities"); + const friendly = readFileSync(join(personalityDir, "Friendly.md"), "utf8"); + const pragmatic = readFileSync(join(personalityDir, "Pragmatic.md"), "utf8"); + expect(friendly).toContain("Friendly from server"); + expect(pragmatic).toContain("Pragmatic from server"); + + rmSync(root, { recursive: true, force: true }); + }); + + it("does not overwrite personality cache when catalog is unavailable", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-personality-static-")); + process.env.XDG_CONFIG_HOME = root; + const { getCodexModelRuntimeDefaults, ModelCatalogUnavailableError } = await loadModule(); + + const personalityDir = join(root, "opencode", "Personalities"); + mkdirSync(personalityDir, { recursive: true }); + writeFileSync(join(personalityDir, "Friendly.md"), "Old friendly", "utf8"); + writeFileSync( + join(personalityDir, "Pragmatic.md"), + "Old pragmatic", + "utf8", + ); + + const fetchMock = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; + if (url.includes("/codex/models")) { + throw new Error("offline"); + } + if (url.includes("raw.githubusercontent.com/openai/codex/")) { + throw new Error("github offline"); + } + throw new Error(`Unexpected URL: ${url}`); + }); + + await expect( + getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: fetchMock as unknown as typeof fetch, + }), + ).rejects.toThrow(ModelCatalogUnavailableError); + + expect(readFileSync(join(personalityDir, "Friendly.md"), "utf8")).toBe( + "Old friendly", + ); + expect(readFileSync(join(personalityDir, "Pragmatic.md"), "utf8")).toBe( + "Old pragmatic", + ); + + rmSync(root, { recursive: true, force: true }); + }); + + it("does not overwrite user-managed personality files", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-personality-user-")); + process.env.XDG_CONFIG_HOME = root; + const { getCodexModelRuntimeDefaults } = await loadModule(); + + const personalityDir = join(root, "opencode", "Personalities"); + mkdirSync(personalityDir, { recursive: true }); + writeFileSync(join(personalityDir, "Friendly.md"), "User friendly", "utf8"); + writeFileSync( + join(personalityDir, "Pragmatic.md"), + "User pragmatic", + "utf8", + ); + + const fetchMock = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; + if (url.includes("/codex/models")) { + return new Response( + JSON.stringify({ + models: [ + { + slug: "gpt-5.3-codex", + model_messages: { + instructions_variables: { + personality_friendly: "Friendly from server", + personality_pragmatic: "Pragmatic from server", + }, + }, + }, + ], + }), + { status: 200 }, + ); + } + throw new Error(`Unexpected URL: ${url}`); + }); + + await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: fetchMock as unknown as typeof fetch, + }); + + expect(readFileSync(join(personalityDir, "Friendly.md"), "utf8")).toBe( + "User friendly", + ); + expect(readFileSync(join(personalityDir, "Pragmatic.md"), "utf8")).toBe( + "User pragmatic", + ); + + rmSync(root, { recursive: true, force: true }); + }); + + it("does not seed personalities from GitHub-sourced cache", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-personality-github-")); + process.env.XDG_CONFIG_HOME = root; + const { __internal, getCodexModelRuntimeDefaults, ModelCatalogUnavailableError } = + await loadModule(); + + mkdirSync(dirname(__internal.getModelsCacheFile("account")), { recursive: true }); + writeFileSync( + __internal.getModelsCacheFile("account"), + JSON.stringify({ + fetchedAt: Date.now(), + source: "github", + models: [ + { + slug: "gpt-5.3-codex", + model_messages: { + instructions_variables: { + personality_friendly: "Friendly from github", + personality_pragmatic: "Pragmatic from github", + }, + }, + }, + ], + }), + "utf8", + ); + + await expect( + getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accountId: "account", + }), + ).rejects.toThrow(ModelCatalogUnavailableError); + + const personalityDir = join(root, "opencode", "Personalities"); + expect(existsSync(personalityDir)).toBe(false); + + rmSync(root, { recursive: true, force: true }); + }); + + it("seeds personalities from legacy cache without source", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-personality-legacy-")); + process.env.XDG_CONFIG_HOME = root; + const { __internal, getCodexModelRuntimeDefaults } = await loadModule(); + + mkdirSync(dirname(__internal.getModelsCacheFile("account")), { recursive: true }); + writeFileSync( + __internal.getModelsCacheFile("account"), + JSON.stringify({ + fetchedAt: Date.now(), + models: [ + { + slug: "gpt-5.3-codex", + model_messages: { + instructions_variables: { + personality_friendly: "Friendly from legacy cache", + personality_pragmatic: "Pragmatic from legacy cache", + }, + }, + }, + ], + }), + "utf8", + ); + + await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accountId: "account", + }); + + const personalityDir = join(root, "opencode", "Personalities"); + const friendly = readFileSync(join(personalityDir, "Friendly.md"), "utf8"); + const pragmatic = readFileSync(join(personalityDir, "Pragmatic.md"), "utf8"); + expect(friendly).toContain("Friendly from legacy cache"); + expect(pragmatic).toContain("Pragmatic from legacy cache"); + + rmSync(root, { recursive: true, force: true }); + }); + + it("warns on personality cache write failures but returns defaults", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-personality-warn-")); + const previousLogging = process.env.ENABLE_PLUGIN_REQUEST_LOGGING; + process.env.ENABLE_PLUGIN_REQUEST_LOGGING = "1"; + process.env.XDG_CONFIG_HOME = root; + const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {}); + try { + const { getCodexModelRuntimeDefaults } = await loadModule(); + const personalityDir = join(root, "opencode", "Personalities"); + mkdirSync(join(root, "opencode"), { recursive: true }); + writeFileSync(personalityDir, "not-a-directory", "utf8"); + + const fetchMock = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; + if (url.includes("/codex/models")) { + return new Response( + JSON.stringify({ + models: [ + { + slug: "gpt-5.3-codex", + model_messages: { + instructions_variables: { + personality_friendly: "Friendly from server", + personality_pragmatic: "Pragmatic from server", + }, + }, + }, + ], + }), + { status: 200 }, + ); + } + throw new Error(`Unexpected URL: ${url}`); + }); + + const defaults = await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: fetchMock as unknown as typeof fetch, + }); + + expect(defaults.personalityMessages?.friendly).toBe( + "Friendly from server", + ); + expect(warnSpy).toHaveBeenCalled(); + } finally { + if (previousLogging === undefined) { + delete process.env.ENABLE_PLUGIN_REQUEST_LOGGING; + } else { + process.env.ENABLE_PLUGIN_REQUEST_LOGGING = previousLogging; + } + warnSpy.mockRestore(); + rmSync(root, { recursive: true, force: true }); + } + }); + + it("reads personalities map from instructions_variables", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-personality-map-")); + process.env.XDG_CONFIG_HOME = root; + const { getCodexModelRuntimeDefaults } = await loadModule(); + + const mockFetch = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; + if (url.includes("/codex/models")) { + return new Response( + JSON.stringify({ + models: [ + { + slug: "gpt-5.3-codex", + model_messages: { + instructions_template: "Base {{ personality }}", + instructions_variables: { + personalities: { + default: "", + friendly: "Friendly from map", + pragmatic: "Pragmatic from map", + }, + }, + }, + }, + ], + }), + { status: 200, headers: { etag: '"abc"' } }, + ); + } + throw new Error(`Unexpected URL: ${url}`); + }); + + const defaults = await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: mockFetch as unknown as typeof fetch, + }); + + expect(defaults.personalityMessages?.friendly).toBe("Friendly from map"); + expect(defaults.personalityMessages?.pragmatic).toBe("Pragmatic from map"); + rmSync(root, { recursive: true, force: true }); + }); + + it("uses codex release semver for client_version", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-client-version-")); + process.env.XDG_CONFIG_HOME = root; + const { getCodexModelRuntimeDefaults } = await loadModule(); + let capturedUrl = ""; + + const mockFetch = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + if (url.includes("api.github.com/repos/openai/codex/releases/latest")) { + return new Response(JSON.stringify({ tag_name: "rust-v9.8.7" }), { + status: 200, + }); + } + if (url.includes("/codex/models")) { + capturedUrl = url; + return new Response( + JSON.stringify({ + models: [ + { + slug: "gpt-5.3-codex", + model_messages: { + instructions_template: "Base {{ personality }}", + instructions_variables: { + personality_default: "", + personality_friendly: "Friendly from server", + personality_pragmatic: "Pragmatic from server", + }, + }, + }, + ], + }), + { status: 200, headers: { etag: '"abc"' } }, + ); + } + throw new Error(`Unexpected URL: ${url}`); + }); + + await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: mockFetch as unknown as typeof fetch, + }); + + expect(capturedUrl).toContain("client_version=9.8.7"); + rmSync(root, { recursive: true, force: true }); + }); + + it("falls back to cached client_version when release lookup fails", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-client-version-cache-")); + process.env.XDG_CONFIG_HOME = root; + const { __internal, getCodexModelRuntimeDefaults } = await loadModule(); + let capturedUrl = ""; + + const cacheDir = dirname(__internal.getModelsCacheFile("account")); + mkdirSync(cacheDir, { recursive: true }); + writeFileSync( + __internal.CLIENT_VERSION_CACHE_FILE, + JSON.stringify({ version: "2.3.4", fetchedAt: Date.now() - 1000 }), + "utf8", + ); + + const mockFetch = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + if (url.includes("api.github.com/repos/openai/codex/releases/latest")) { + throw new Error("offline"); + } + if (url.includes("/codex/models")) { + capturedUrl = url; + return new Response( + JSON.stringify({ + models: [ + { + slug: "gpt-5.3-codex", + model_messages: { + instructions_template: "Base {{ personality }}", + instructions_variables: { + personality_default: "", + personality_friendly: "Friendly from server", + personality_pragmatic: "Pragmatic from server", + }, + }, + }, + ], + }), + { status: 200, headers: { etag: '"abc"' } }, + ); + } + throw new Error(`Unexpected URL: ${url}`); + }); + + await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: mockFetch as unknown as typeof fetch, + }); + expect(capturedUrl).toContain("client_version=2.3.4"); rmSync(root, { recursive: true, force: true }); }); @@ -96,90 +549,417 @@ describe("codex model metadata resolver", () => { fetchImpl: seedFetch as unknown as typeof fetch, }); - const failingFetch = vi.fn(async (input: RequestInfo | URL) => { + const failingFetch = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; + if (url.includes("/codex/models")) { + throw new Error("offline"); + } + throw new Error(`unexpected URL: ${url}`); + }); + + const defaults = await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: failingFetch as unknown as typeof fetch, + forceRefresh: true, + }); + + expect(defaults.personalityMessages?.friendly).toBe("Friendly from cache seed"); + rmSync(root, { recursive: true, force: true }); + }); + + it("removes invalid models cache files", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-cache-invalid-")); + process.env.XDG_CONFIG_HOME = root; + const { __internal } = await loadModule(); + + const cacheFile = __internal.getModelsCacheFile("account"); + mkdirSync(dirname(cacheFile), { recursive: true }); + writeFileSync( + cacheFile, + JSON.stringify({ fetchedAt: Date.now(), source: "server" }), + "utf8", + ); + + const cached = __internal.readModelsCache("account"); + expect(cached).toBeNull(); + expect(existsSync(cacheFile)).toBe(false); + + rmSync(root, { recursive: true, force: true }); + }); + + it("refreshes stale cache with ETag and uses cached models on 304", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-online-first-")); + process.env.XDG_CONFIG_HOME = root; + const { __internal, getCodexModelRuntimeDefaults } = await loadModule(); + // Use account-scoped cache file + const accountId = "account"; + const cacheDir = dirname(__internal.getModelsCacheFile(accountId)); + mkdirSync(cacheDir, { recursive: true }); + writeFileSync( + __internal.getModelsCacheFile(accountId), + JSON.stringify({ + fetchedAt: 0, + source: "server", + etag: '"etag-123"', + models: [ + { + slug: "gpt-5.3-codex", + model_messages: { + instructions_template: "Base {{ personality }}", + instructions_variables: { + personality_default: "", + personality_friendly: "Friendly from stale cache", + personality_pragmatic: "Pragmatic from stale cache", + }, + }, + }, + ], + }), + "utf8", + ); + + const refreshFetch = vi.fn(async (input: RequestInfo | URL, init?: RequestInit) => { + const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; + if (url.includes("/codex/models")) { + const headers = init?.headers as Record | undefined; + expect(headers?.["If-None-Match"]).toBe('"etag-123"'); + return new Response(null, { status: 304, headers: { etag: '"etag-123"' } }); + } + throw new Error(`Unexpected URL: ${url}`); + }); + + const defaults = await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: accountId, + fetchImpl: refreshFetch as unknown as typeof fetch, + }); + + expect(refreshFetch).toHaveBeenCalled(); + expect(defaults.personalityMessages?.friendly).toBe("Friendly from stale cache"); + rmSync(root, { recursive: true, force: true }); + }); + + it("warms model catalog into memory from cache", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-warm-cache-")); + process.env.XDG_CONFIG_HOME = root; + const { __internal, warmCodexModelCatalog, getCodexModelRuntimeDefaults } = await loadModule(); + + // Seed unauthenticated cache (no accountId) + const cacheDir = dirname(__internal.getModelsCacheFile()); + mkdirSync(cacheDir, { recursive: true }); + writeFileSync( + __internal.getModelsCacheFile(), + JSON.stringify({ + fetchedAt: Date.now(), + source: "server", + etag: '"warm-etag"', + models: [ + { + slug: "gpt-5.3-codex", + model_messages: { + instructions_template: "Base {{ personality }}", + instructions_variables: { + personality_default: "", + personality_friendly: "Friendly from warm cache", + personality_pragmatic: "Pragmatic from warm cache", + }, + }, + }, + ], + }), + "utf8", + ); + + const mockFetch = vi.fn(async () => { + throw new Error("unexpected fetch"); + }); + + // Warm unauthenticated cache + await warmCodexModelCatalog(); + + // Call without accountId to use the same unauthenticated cache + const defaults = await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + fetchImpl: mockFetch as unknown as typeof fetch, + }); + + expect(mockFetch).not.toHaveBeenCalled(); + expect(defaults.personalityMessages?.friendly).toBe("Friendly from warm cache"); + rmSync(root, { recursive: true, force: true }); + }); + + it("reuses in-memory catalog for repeated calls", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-memoized-")); + process.env.XDG_CONFIG_HOME = root; + const { getCodexModelRuntimeDefaults } = await loadModule(); + + const mockFetch = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; + if (url.includes("/codex/models")) { + return new Response( + JSON.stringify({ + models: [ + { + slug: "gpt-5.3-codex", + model_messages: { + instructions_template: "Base {{ personality }}", + instructions_variables: { + personality_default: "", + personality_friendly: "Friendly from first fetch", + personality_pragmatic: "Pragmatic from first fetch", + }, + }, + }, + ], + }), + { status: 200, headers: { etag: '"fresh"' } }, + ); + } + throw new Error(`Unexpected URL: ${url}`); + }); + + await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: mockFetch as unknown as typeof fetch, + }); + const defaults = await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: mockFetch as unknown as typeof fetch, + }); + + const serverCalls = mockFetch.mock.calls.filter((call) => + call[0]?.toString().includes("/codex/models"), + ).length; + expect(serverCalls).toBe(1); + expect(defaults.personalityMessages?.friendly).toBe("Friendly from first fetch"); + rmSync(root, { recursive: true, force: true }); + }); + + it("avoids repeated server fetches when stale cache exists", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-stale-retry-")); + process.env.XDG_CONFIG_HOME = root; + const { __internal, getCodexModelRuntimeDefaults } = await loadModule(); + + const cacheDir = dirname(__internal.getModelsCacheFile("account")); + mkdirSync(cacheDir, { recursive: true }); + writeFileSync( + __internal.getModelsCacheFile("account"), + JSON.stringify({ + fetchedAt: 0, + source: "server", + etag: "etag-stale", + models: [ + { + slug: "gpt-5.3-codex", + model_messages: { + instructions_template: "Base {{ personality }}", + instructions_variables: { + personality_default: "", + personality_friendly: "Friendly from stale cache", + personality_pragmatic: "Pragmatic from stale cache", + }, + }, + }, + ], + }), + "utf8", + ); + + const fetchMock = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; + if (url.includes("/codex/models")) { + throw new Error("offline"); + } + throw new Error(`Unexpected URL: ${url}`); + }); + + const defaultsFirst = await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: fetchMock as unknown as typeof fetch, + }); + const defaultsSecond = await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: fetchMock as unknown as typeof fetch, + }); + + const serverCalls = fetchMock.mock.calls.filter((call) => + call[0]?.toString().includes("/codex/models"), + ).length; + expect(serverCalls).toBe(1); + expect(defaultsFirst.personalityMessages?.friendly).toBe("Friendly from stale cache"); + expect(defaultsSecond.personalityMessages?.friendly).toBe("Friendly from stale cache"); + rmSync(root, { recursive: true, force: true }); + }); + + it("allows authenticated refresh after unauthenticated failure", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-auth-guard-")); + process.env.XDG_CONFIG_HOME = root; + const { __internal, getCodexModelRuntimeDefaults } = await loadModule(); + + const cacheDir = dirname(__internal.getModelsCacheFile("account")); + mkdirSync(cacheDir, { recursive: true }); + writeFileSync( + __internal.getModelsCacheFile("account"), + JSON.stringify({ + fetchedAt: 0, + source: "server", + etag: "etag-stale", + models: [ + { + slug: "gpt-5.3-codex", + model_messages: { + instructions_template: "Base {{ personality }}", + instructions_variables: { + personality_default: "", + personality_friendly: "Friendly from stale cache", + personality_pragmatic: "Pragmatic from stale cache", + }, + }, + }, + ], + }), + "utf8", + ); + + const fetchMock = vi.fn(async (input: RequestInfo | URL) => { const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; if (url.includes("/codex/models")) { throw new Error("offline"); } - throw new Error(`unexpected URL: ${url}`); + throw new Error(`Unexpected URL: ${url}`); }); - const defaults = await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accountId: "account", + fetchImpl: fetchMock as unknown as typeof fetch, + }); + await getCodexModelRuntimeDefaults("gpt-5.3-codex", { accessToken: "token", accountId: "account", - fetchImpl: failingFetch as unknown as typeof fetch, - forceRefresh: true, + fetchImpl: fetchMock as unknown as typeof fetch, }); - expect(defaults.personalityMessages?.friendly).toBe("Friendly from cache seed"); + const serverCalls = fetchMock.mock.calls.filter((call) => + call[0]?.toString().includes("/codex/models"), + ).length; + expect(serverCalls).toBe(2); rmSync(root, { recursive: true, force: true }); }); - it("attempts server refresh before using fresh cache", async () => { - const root = mkdtempSync(join(tmpdir(), "codex-models-online-first-")); + it("retries server fetch after short backoff", async () => { + vi.useFakeTimers(); + const root = mkdtempSync(join(tmpdir(), "codex-models-backoff-")); process.env.XDG_CONFIG_HOME = root; - const { getCodexModelRuntimeDefaults } = await loadModule(); + const { __internal, getCodexModelRuntimeDefaults } = await loadModule(); - const seedFetch = vi.fn(async () => { - return new Response( - JSON.stringify({ - models: [ - { - slug: "gpt-5.3-codex", - model_messages: { - instructions_template: "Base {{ personality }}", - instructions_variables: { - personality_default: "", - personality_friendly: "Friendly from stale cache", - personality_pragmatic: "Pragmatic from stale cache", - }, + const cacheDir = dirname(__internal.getModelsCacheFile("account")); + mkdirSync(cacheDir, { recursive: true }); + writeFileSync( + __internal.getModelsCacheFile("account"), + JSON.stringify({ + fetchedAt: 0, + source: "server", + etag: "etag-stale", + models: [ + { + slug: "gpt-5.3-codex", + model_messages: { + instructions_template: "Base {{ personality }}", + instructions_variables: { + personality_default: "", + personality_friendly: "Friendly from stale cache", + personality_pragmatic: "Pragmatic from stale cache", }, }, - ], - }), - { status: 200 }, - ); + }, + ], + }), + "utf8", + ); + + const fetchMock = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; + if (url.includes("/codex/models")) { + throw new Error("offline"); + } + throw new Error(`Unexpected URL: ${url}`); }); + const start = new Date("2026-02-06T00:00:00.000Z"); + vi.setSystemTime(start); + await getCodexModelRuntimeDefaults("gpt-5.3-codex", { accessToken: "token", accountId: "account", - fetchImpl: seedFetch as unknown as typeof fetch, + fetchImpl: fetchMock as unknown as typeof fetch, + }); + vi.setSystemTime(new Date(start.getTime() + 2 * 60 * 1000)); + await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: fetchMock as unknown as typeof fetch, }); - const refreshFetch = vi.fn(async (input: RequestInfo | URL) => { + const serverCalls = fetchMock.mock.calls.filter((call) => + call[0]?.toString().includes("/codex/models"), + ).length; + expect(serverCalls).toBe(2); + vi.useRealTimers(); + rmSync(root, { recursive: true, force: true }); + }); + + it("suppresses repeated server fetches without cache during backoff", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-backoff-nocache-")); + process.env.XDG_CONFIG_HOME = root; + const { getCodexModelRuntimeDefaults, ModelCatalogUnavailableError } = await loadModule(); + + let serverCalls = 0; + const fetchMock = vi.fn(async (input: RequestInfo | URL) => { const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; if (url.includes("/codex/models")) { - return new Response( - JSON.stringify({ - models: [ - { - slug: "gpt-5.3-codex", - model_messages: { - instructions_template: "Base {{ personality }}", - instructions_variables: { - personality_default: "", - personality_friendly: "Friendly from server refresh", - personality_pragmatic: "Pragmatic from server refresh", - }, - }, - }, - ], - }), - { status: 200 }, - ); + serverCalls += 1; + throw new Error("offline"); + } + if (url.includes("raw.githubusercontent.com/openai/codex/")) { + throw new Error("github offline"); } throw new Error(`Unexpected URL: ${url}`); }); - const defaults = await getCodexModelRuntimeDefaults("gpt-5.3-codex", { - accessToken: "token", - accountId: "account", - fetchImpl: refreshFetch as unknown as typeof fetch, - }); + await expect( + getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: fetchMock as unknown as typeof fetch, + }), + ).rejects.toThrow(ModelCatalogUnavailableError); + await expect( + getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: fetchMock as unknown as typeof fetch, + }), + ).rejects.toThrow(ModelCatalogUnavailableError); - expect(refreshFetch).toHaveBeenCalled(); - expect(defaults.personalityMessages?.friendly).toBe("Friendly from server refresh"); + expect(serverCalls).toBe(1); rmSync(root, { recursive: true, force: true }); }); @@ -253,7 +1033,7 @@ describe("codex model metadata resolver", () => { it("falls back to GitHub when server catalog succeeds but lacks requested model", async () => { const root = mkdtempSync(join(tmpdir(), "codex-models-server-miss-github-hit-")); process.env.XDG_CONFIG_HOME = root; - const { getCodexModelRuntimeDefaults } = await loadModule(); + const { getCodexModelRuntimeDefaults, UnknownModelError } = await loadModule(); const mockFetch = vi.fn(async (input: RequestInfo | URL) => { const url = input.toString(); @@ -305,22 +1085,20 @@ describe("codex model metadata resolver", () => { throw new Error(`Unexpected URL: ${url}`); }); - const defaults = await getCodexModelRuntimeDefaults("gpt-5.4-codex", { - accessToken: "token", - accountId: "account", - fetchImpl: mockFetch as unknown as typeof fetch, - }); - - expect(defaults.personalityMessages?.friendly).toBe( - "Friendly from GitHub targeted fallback", - ); + await expect( + getCodexModelRuntimeDefaults("gpt-5.4-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: mockFetch as unknown as typeof fetch, + }), + ).rejects.toThrow(UnknownModelError); rmSync(root, { recursive: true, force: true }); }); it("falls back to GitHub models when cache is missing and server fails", async () => { const root = mkdtempSync(join(tmpdir(), "codex-models-github-fallback-")); process.env.XDG_CONFIG_HOME = root; - const { getCodexModelRuntimeDefaults } = await loadModule(); + const { getCodexModelRuntimeDefaults, ModelCatalogUnavailableError } = await loadModule(); const mockFetch = vi.fn(async (input: RequestInfo | URL) => { const url = input.toString(); @@ -355,19 +1133,18 @@ describe("codex model metadata resolver", () => { throw new Error(`Unexpected URL: ${url}`); }); - const defaults = await getCodexModelRuntimeDefaults("gpt-5.4-codex", { - fetchImpl: mockFetch as unknown as typeof fetch, - }); - - expect(defaults.onlineDefaultPersonality).toBeUndefined(); - expect(defaults.personalityMessages?.friendly).toBe("Friendly from GitHub"); + await expect( + getCodexModelRuntimeDefaults("gpt-5.4-codex", { + fetchImpl: mockFetch as unknown as typeof fetch, + }), + ).rejects.toThrow(ModelCatalogUnavailableError); rmSync(root, { recursive: true, force: true }); }); it("falls back to GitHub main when release tag lookup fails", async () => { const root = mkdtempSync(join(tmpdir(), "codex-models-github-main-fallback-")); process.env.XDG_CONFIG_HOME = root; - const { getCodexModelRuntimeDefaults } = await loadModule(); + const { getCodexModelRuntimeDefaults, ModelCatalogUnavailableError } = await loadModule(); const mockFetch = vi.fn(async (input: RequestInfo | URL) => { const url = input.toString(); @@ -400,11 +1177,11 @@ describe("codex model metadata resolver", () => { throw new Error(`Unexpected URL: ${url}`); }); - const defaults = await getCodexModelRuntimeDefaults("gpt-5.4-codex", { - fetchImpl: mockFetch as unknown as typeof fetch, - }); - - expect(defaults.personalityMessages?.friendly).toBe("Friendly from GitHub main"); + await expect( + getCodexModelRuntimeDefaults("gpt-5.4-codex", { + fetchImpl: mockFetch as unknown as typeof fetch, + }), + ).rejects.toThrow(ModelCatalogUnavailableError); rmSync(root, { recursive: true, force: true }); }); @@ -448,18 +1225,17 @@ describe("codex model metadata resolver", () => { it("falls back to static template defaults when server/cache/GitHub are unavailable", async () => { const root = mkdtempSync(join(tmpdir(), "codex-models-static-fallback-")); process.env.XDG_CONFIG_HOME = root; - const { getCodexModelRuntimeDefaults } = await loadModule(); + const { getCodexModelRuntimeDefaults, ModelCatalogUnavailableError } = await loadModule(); const failingFetch = vi.fn(async () => { throw new Error("offline"); }); - const defaults = await getCodexModelRuntimeDefaults("gpt-5.9-codex", { - fetchImpl: failingFetch as unknown as typeof fetch, - }); - - expect(defaults.onlineDefaultPersonality).toBeUndefined(); - expect(defaults.staticDefaultPersonality).toBe("none"); + await expect( + getCodexModelRuntimeDefaults("gpt-5.9-codex", { + fetchImpl: failingFetch as unknown as typeof fetch, + }), + ).rejects.toThrow(ModelCatalogUnavailableError); rmSync(root, { recursive: true, force: true }); }); @@ -489,7 +1265,314 @@ describe("codex model metadata resolver", () => { const { __internal } = await loadModule(); const defaults = __internal.readStaticTemplateDefaults(moduleDir); expect(defaults.get("gpt-5.9-codex")?.personality).toBe("friendly"); + expect(__internal.readStaticTemplateDefaults(moduleDir)).toBe(defaults); + + rmSync(root, { recursive: true, force: true }); + }); + + it("applies backoff guard even on cold start with no cache", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-backoff-coldstart-")); + process.env.XDG_CONFIG_HOME = root; + const { getCodexModelRuntimeDefaults, ModelCatalogUnavailableError } = await loadModule(); + let serverCallCount = 0; + + const mockFetch = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; + if (url.includes("/codex/models")) { + serverCallCount++; + throw new Error("Server unavailable"); + } + if (url.includes("github.com")) { + throw new Error("GitHub also down"); + } + throw new Error(`Unexpected URL: ${url}`); + }); + + // First call - should attempt server and fail + await expect( + getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account1", + fetchImpl: mockFetch as unknown as typeof fetch, + }), + ).rejects.toThrow(ModelCatalogUnavailableError); + expect(serverCallCount).toBe(1); + + // Second call - should be gated by backoff (no server call) + await expect( + getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account1", + fetchImpl: mockFetch as unknown as typeof fetch, + }), + ).rejects.toThrow(ModelCatalogUnavailableError); + expect(serverCallCount).toBe(1); // Still 1 - backoff prevented call + + rmSync(root, { recursive: true, force: true }); + }); + + it("scopes model cache by account identity", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-account-scope-")); + process.env.XDG_CONFIG_HOME = root; + const { getCodexModelRuntimeDefaults, __internal } = await loadModule(); + + // Account 1 gets one set of models + const mockFetchAccount1 = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; + if (url.includes("/codex/models")) { + return new Response( + JSON.stringify({ + models: [ + { + slug: "gpt-5.3-codex", + base_instructions: "Account1 instructions", + }, + ], + }), + { status: 200, headers: { etag: '"acc1"' } }, + ); + } + throw new Error(`Unexpected URL: ${url}`); + }); + + // Account 2 gets different models (e.g., pro tier) + const mockFetchAccount2 = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; + if (url.includes("/codex/models")) { + return new Response( + JSON.stringify({ + models: [ + { + slug: "gpt-5.3-codex", + base_instructions: "Account2 PRO instructions", + }, + { + slug: "gpt-5.2-pro", + base_instructions: "Pro-only model", + }, + ], + }), + { status: 200, headers: { etag: '"acc2"' } }, + ); + } + throw new Error(`Unexpected URL: ${url}`); + }); + + // Fetch for account1 + const defaults1 = await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token1", + accountId: "account1", + fetchImpl: mockFetchAccount1 as unknown as typeof fetch, + forceRefresh: true, + }); + expect(defaults1.baseInstructions).toBe("Account1 instructions"); + + // Fetch for account2 (should get different cache) + const defaults2 = await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token2", + accountId: "account2", + fetchImpl: mockFetchAccount2 as unknown as typeof fetch, + forceRefresh: true, + }); + expect(defaults2.baseInstructions).toBe("Account2 PRO instructions"); + + // Verify cache files are separate + const cacheFile1 = __internal.getModelsCacheFile("account1"); + const cacheFile2 = __internal.getModelsCacheFile("account2"); + const hash1 = __internal.hashAccountId("account1"); + const hash2 = __internal.hashAccountId("account2"); + expect(cacheFile1).not.toBe(cacheFile2); + expect(hash1).not.toBe(hash2); + expect(cacheFile1).toContain(hash1); + expect(cacheFile2).toContain(hash2); + expect(cacheFile1).not.toContain("account1"); + expect(cacheFile2).not.toContain("account2"); + + rmSync(root, { recursive: true, force: true }); + }); + + it("scopes cached variant efforts by account", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-efforts-")); + process.env.XDG_CONFIG_HOME = root; + const { __internal, getCachedVariantEfforts } = await loadModule(); + + const cacheDir = join(root, "opencode", "cache"); + mkdirSync(cacheDir, { recursive: true }); + + const account1Cache = { + fetchedAt: Date.now(), + source: "server", + models: [ + { + slug: "gpt-5.3-codex", + supported_reasoning_levels: [{ effort: "high" }, { effort: "medium" }], + }, + ], + }; + const account2Cache = { + fetchedAt: Date.now(), + source: "server", + models: [ + { + slug: "gpt-5.2-codex", + supported_reasoning_levels: [{ effort: "low" }], + }, + ], + }; + + writeFileSync( + __internal.getModelsCacheFile("account1"), + JSON.stringify(account1Cache), + "utf8", + ); + writeFileSync( + __internal.getModelsCacheFile("account2"), + JSON.stringify(account2Cache), + "utf8", + ); + + const efforts1 = getCachedVariantEfforts("account1"); + const efforts2 = getCachedVariantEfforts("account2"); + + expect(efforts1.get("gpt-5.3-codex")).toEqual(["high", "medium"]); + expect(efforts2.get("gpt-5.2-codex")).toEqual(["low"]); + + rmSync(root, { recursive: true, force: true }); + }); + + it("applies session TTL hard limit to in-memory cache", async () => { + const root = mkdtempSync(join(tmpdir(), "codex-models-session-ttl-")); + process.env.XDG_CONFIG_HOME = root; + + // Create stale cache on disk (older than session max age) + const cacheDir = join(root, "opencode", "cache"); + mkdirSync(cacheDir, { recursive: true }); + const staleCache = { + fetchedAt: Date.now() - (61 * 60 * 1000), // 61 minutes ago (past 1hr limit) + source: "server", + models: [{ slug: "gpt-5.3-codex", base_instructions: "Stale instructions" }], + etag: '"stale"', + }; + writeFileSync( + join(cacheDir, "codex-models-cache.json"), + JSON.stringify(staleCache), + "utf8", + ); + + const { getCodexModelRuntimeDefaults } = await loadModule(); + let serverCalled = false; + + const mockFetch = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; + if (url.includes("/codex/models")) { + serverCalled = true; + return new Response( + JSON.stringify({ + models: [ + { + slug: "gpt-5.3-codex", + base_instructions: "Fresh instructions", + }, + ], + }), + { status: 200, headers: { etag: '"fresh"' } }, + ); + } + throw new Error(`Unexpected URL: ${url}`); + }); + + const defaults = await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + fetchImpl: mockFetch as unknown as typeof fetch, + }); + + // Should have fetched fresh data (cache was beyond session TTL) + expect(serverCalled).toBe(true); + expect(defaults.baseInstructions).toBe("Fresh instructions"); + + rmSync(root, { recursive: true, force: true }); + }); + + it("evicts stale in-memory cache and reuses fresh disk cache", async () => { + vi.useFakeTimers(); + const root = mkdtempSync(join(tmpdir(), "codex-models-session-evict-")); + process.env.XDG_CONFIG_HOME = root; + const { __internal, getCodexModelRuntimeDefaults } = await loadModule(); + + const cacheDir = dirname(__internal.getModelsCacheFile("account")); + mkdirSync(cacheDir, { recursive: true }); + + const fetchMock = vi.fn(async (input: RequestInfo | URL) => { + const url = input.toString(); + const release = maybeReleaseTagResponse(url); + if (release) return release; + if (url.includes("/codex/models")) { + return new Response( + JSON.stringify({ + models: [ + { + slug: "gpt-5.3-codex", + base_instructions: "Memory instructions", + }, + ], + }), + { status: 200, headers: { etag: '"mem"' } }, + ); + } + throw new Error(`Unexpected URL: ${url}`); + }); + + const start = new Date("2026-02-06T00:00:00.000Z"); + vi.setSystemTime(start); + + await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: fetchMock as unknown as typeof fetch, + }); + const initialServerCalls = fetchMock.mock.calls.filter((call) => + call[0]?.toString().includes("/codex/models"), + ).length; + expect(initialServerCalls).toBe(1); + + const expired = new Date(start.getTime() + 61 * 60 * 1000); + vi.setSystemTime(expired); + writeFileSync( + __internal.getModelsCacheFile("account"), + JSON.stringify({ + fetchedAt: Date.now(), + source: "server", + etag: '"disk"', + models: [ + { + slug: "gpt-5.3-codex", + base_instructions: "Disk instructions", + }, + ], + }), + "utf8", + ); + + const defaults = await getCodexModelRuntimeDefaults("gpt-5.3-codex", { + accessToken: "token", + accountId: "account", + fetchImpl: fetchMock as unknown as typeof fetch, + }); + expect(defaults.baseInstructions).toBe("Disk instructions"); + const finalServerCalls = fetchMock.mock.calls.filter((call) => + call[0]?.toString().includes("/codex/models"), + ).length; + expect(finalServerCalls).toBe(1); + vi.useRealTimers(); rmSync(root, { recursive: true, force: true }); }); }); diff --git a/test/codex.test.ts b/test/codex.test.ts index d05b14e..9142f2b 100644 --- a/test/codex.test.ts +++ b/test/codex.test.ts @@ -3,6 +3,20 @@ import { getModelFamily } from "../lib/prompts/codex.js"; describe("Codex Module", () => { describe("getModelFamily", () => { + describe("GPT-5.3 Codex family", () => { + it("should return gpt-5.3-codex for gpt-5.3-codex", () => { + expect(getModelFamily("gpt-5.3-codex")).toBe("gpt-5.3-codex"); + }); + + it("should return gpt-5.3-codex for gpt-5.3-codex-low", () => { + expect(getModelFamily("gpt-5.3-codex-low")).toBe("gpt-5.3-codex"); + }); + + it("should return gpt-5.3-codex for gpt-5.3-codex-xhigh", () => { + expect(getModelFamily("gpt-5.3-codex-xhigh")).toBe("gpt-5.3-codex"); + }); + }); + describe("GPT-5.2 Codex family", () => { it("should return gpt-5.2-codex for gpt-5.2-codex", () => { expect(getModelFamily("gpt-5.2-codex")).toBe("gpt-5.2-codex"); diff --git a/test/config.test.ts b/test/config.test.ts index 026c58a..4dbf31d 100644 --- a/test/config.test.ts +++ b/test/config.test.ts @@ -147,5 +147,11 @@ describe('Configuration Parsing', () => { const gpt5Reasoning = getReasoningConfig('gpt-5', {}); expect(gpt5Reasoning.effort).toBe('medium'); }); + + it('should treat gpt-5.3-codex as a first-class codex model supporting xhigh', () => { + const config = { reasoningEffort: 'xhigh' as const }; + const reasoning = getReasoningConfig('gpt-5.3-codex', config); + expect(reasoning.effort).toBe('xhigh'); + }); }); }); diff --git a/test/fetch-orchestrator.test.ts b/test/fetch-orchestrator.test.ts index 9ace90c..d61c5b0 100644 --- a/test/fetch-orchestrator.test.ts +++ b/test/fetch-orchestrator.test.ts @@ -1,16 +1,55 @@ import { describe, it, expect, vi, beforeEach, afterEach, Mock } from 'vitest'; -import { FetchOrchestrator, FetchOrchestratorConfig } from '../lib/fetch-orchestrator.js'; +import { mkdtempSync, mkdirSync, readFileSync, rmSync, writeFileSync } from 'node:fs'; +import { tmpdir } from 'node:os'; +import { dirname, join } from 'node:path'; +import { FetchOrchestrator, FetchOrchestratorConfig, __internal } from '../lib/fetch-orchestrator.js'; import { AccountManager, formatAccountLabel } from '../lib/accounts.js'; import { RateLimitTracker } from '../lib/rate-limit.js'; import { CodexStatusManager } from '../lib/codex-status.js'; import { TokenBucketTracker, HealthScoreTracker } from '../lib/rotation.js'; import { PluginConfig } from '../lib/types.js'; +import { __internal as modelsInternal } from '../lib/prompts/codex-models.js'; +import { getOpencodeCacheDir } from '../lib/paths.js'; vi.mock('../lib/storage.js', () => ({ quarantineAccountsByRefreshToken: vi.fn(), replaceAccountsFile: vi.fn(), })); +const MODELS_CACHE_FIXTURE = JSON.parse( + readFileSync(new URL('./fixtures/codex-models-cache.json', import.meta.url), 'utf-8'), +); +const DEFAULT_CACHE_DIR = getOpencodeCacheDir(); + +function seedModelsCache(accountId: string, fetchedAt = Date.now()): void { + const cacheFile = modelsInternal.getModelsCacheFile(accountId); + mkdirSync(dirname(cacheFile), { recursive: true }); + const payload = { ...MODELS_CACHE_FIXTURE, fetchedAt }; + writeFileSync(cacheFile, JSON.stringify(payload), 'utf8'); +} + +function seedInstructionsCache(modelFamily: 'gpt-5.3-codex' | 'gpt-5.1'): void { + const cacheDir = DEFAULT_CACHE_DIR; + mkdirSync(cacheDir, { recursive: true }); + const fileName = + modelFamily === 'gpt-5.3-codex' + ? 'gpt-5.3-codex-instructions.md' + : 'gpt-5.1-instructions.md'; + const cacheFile = join(cacheDir, fileName); + const metaFile = join(cacheDir, fileName.replace('.md', '-meta.json')); + writeFileSync(cacheFile, `# ${modelFamily} instructions`, 'utf8'); + writeFileSync( + metaFile, + JSON.stringify({ + etag: null, + tag: 'test', + lastChecked: Date.now(), + url: 'https://example.com', + }), + 'utf8', + ); +} + describe('FetchOrchestrator', () => { let config: FetchOrchestratorConfig; let orchestrator: FetchOrchestrator; @@ -21,12 +60,30 @@ describe('FetchOrchestrator', () => { let codexStatus: any; let pluginConfig: PluginConfig; let quarantineAccountsByRefreshToken: any; + let configRoot: string; + let primaryAccountId: string; + let secondaryAccountId: string; + let previousXdgConfigHome: string | undefined; + let previousOpencodeHome: string | undefined; + let accountCounter = 0; const mockFetch = vi.fn(); beforeEach(async () => { vi.useFakeTimers(); vi.resetAllMocks(); + accountCounter += 1; + primaryAccountId = `acc-${accountCounter}`; + secondaryAccountId = `acc-${accountCounter}-2`; + configRoot = mkdtempSync(join(tmpdir(), 'fetch-orchestrator-config-')); + previousXdgConfigHome = process.env.XDG_CONFIG_HOME; + previousOpencodeHome = process.env.OPENCODE_HOME; + process.env.XDG_CONFIG_HOME = configRoot; + delete process.env.OPENCODE_HOME; + seedModelsCache(primaryAccountId); + seedModelsCache(secondaryAccountId); + seedInstructionsCache('gpt-5.1'); + seedInstructionsCache('gpt-5.3-codex'); const storageModule = (await import('../lib/storage.js')) as any; quarantineAccountsByRefreshToken = vi.mocked( storageModule.quarantineAccountsByRefreshToken, @@ -59,6 +116,7 @@ describe('FetchOrchestrator', () => { getMinTokenWaitMsForFamily: vi.fn().mockReturnValue(0), getAccountsSnapshot: vi.fn().mockReturnValue([]), getActiveIndexForFamily: vi.fn(), + allAccountsCoolingDown: vi.fn().mockReturnValue(false), }; rateLimitTracker = { @@ -111,11 +169,22 @@ describe('FetchOrchestrator', () => { afterEach(() => { vi.clearAllMocks(); vi.useRealTimers(); + if (previousXdgConfigHome === undefined) { + delete process.env.XDG_CONFIG_HOME; + } else { + process.env.XDG_CONFIG_HOME = previousXdgConfigHome; + } + if (previousOpencodeHome === undefined) { + delete process.env.OPENCODE_HOME; + } else { + process.env.OPENCODE_HOME = previousOpencodeHome; + } + rmSync(configRoot, { recursive: true, force: true }); }); it('should execute a successful request', async () => { accountManager.getAccountCount.mockReturnValue(1); - accountManager.getCurrentOrNextForFamily.mockReturnValue({ index: 0, accountId: 'acc1', email: 'test@example.com' }); + accountManager.getCurrentOrNextForFamily.mockReturnValue({ index: 0, accountId: primaryAccountId, email: 'test@example.com' }); accountManager.toAuthDetails.mockReturnValue({ access: 'valid-token', expires: Date.now() + 100000, @@ -136,7 +205,7 @@ describe('FetchOrchestrator', () => { it('should handle 401 Unauthorized and recover', async () => { accountManager.getAccountCount.mockReturnValue(1); - accountManager.getCurrentOrNextForFamily.mockReturnValue({ index: 0, accountId: 'acc1', email: 'test@example.com' }); + accountManager.getCurrentOrNextForFamily.mockReturnValue({ index: 0, accountId: primaryAccountId, email: 'test@example.com' }); accountManager.toAuthDetails.mockReturnValue({ access: 'expired-token', expires: Date.now() + 100000, @@ -167,8 +236,8 @@ describe('FetchOrchestrator', () => { // First account accountManager.getCurrentOrNextForFamily - .mockReturnValueOnce({ index: 0, accountId: 'acc1', email: 'acc1@example.com' }) - .mockReturnValueOnce({ index: 1, accountId: 'acc2', email: 'acc2@example.com' }); + .mockReturnValueOnce({ index: 0, accountId: primaryAccountId, email: 'acc1@example.com' }) + .mockReturnValueOnce({ index: 1, accountId: secondaryAccountId, email: 'acc2@example.com' }); accountManager.toAuthDetails.mockReturnValue({ access: 'valid-token', @@ -209,13 +278,60 @@ describe('FetchOrchestrator', () => { expect(mockFetch).toHaveBeenCalledTimes(2); }); + it('recomputes request transform when account switches', async () => { + accountManager.getAccountCount.mockReturnValue(2); + accountManager.getCurrentOrNextForFamily + .mockReturnValueOnce({ index: 0, accountId: primaryAccountId, email: 'acc1@example.com' }) + .mockReturnValueOnce({ index: 1, accountId: secondaryAccountId, email: 'acc2@example.com' }); + accountManager.toAuthDetails.mockReturnValue({ + access: 'valid-token', + expires: Date.now() + 100000, + }); + + const fetchHelpers = await import('../lib/request/fetch-helpers.js'); + const transformSpy = vi + .spyOn(fetchHelpers, 'transformRequestForCodex') + .mockResolvedValue({ + body: { model: 'gpt-5.3-codex', prompt_cache_key: 'session' }, + updatedInit: { method: 'POST', body: '{}' }, + } as any); + + mockFetch.mockResolvedValueOnce(new Response('Rate limit', { + status: 429, + headers: { 'retry-after': '60' } + })); + rateLimitTracker.getBackoff.mockReturnValue({ + delayMs: 60000, + attempt: 1, + isDuplicate: false, + }); + mockFetch.mockResolvedValueOnce(new Response('{"success":true}', { + status: 200, + headers: { 'Content-Type': 'application/json' } + })); + + const response = await orchestrator.execute('https://api.openai.com/v1/chat/completions', { + method: 'POST', + body: JSON.stringify({ model: 'gpt-5.3-codex' }), + }); + + expect(response.status).toBe(200); + expect(transformSpy).toHaveBeenCalledTimes(2); + expect(transformSpy.mock.calls[0]?.[3]).toEqual( + expect.objectContaining({ accountId: primaryAccountId }), + ); + expect(transformSpy.mock.calls[1]?.[3]).toEqual( + expect.objectContaining({ accountId: secondaryAccountId }), + ); + }); + it('passes quiet mode to rate-limit toast', async () => { accountManager.getAccountCount.mockReturnValue(2); accountManager.shouldShowAccountToast.mockReturnValue(true); accountManager.getCurrentOrNextForFamily - .mockReturnValueOnce({ index: 0, accountId: 'acc1', email: 'acc1@example.com' }) - .mockReturnValueOnce({ index: 1, accountId: 'acc2', email: 'acc2@example.com' }); + .mockReturnValueOnce({ index: 0, accountId: primaryAccountId, email: 'acc1@example.com' }) + .mockReturnValueOnce({ index: 1, accountId: secondaryAccountId, email: 'acc2@example.com' }); accountManager.toAuthDetails.mockReturnValue({ access: 'valid-token', @@ -243,9 +359,54 @@ describe('FetchOrchestrator', () => { expect(config.showToast).toHaveBeenCalledWith('Rate limited - switching account', 'warning', true); }); + it('caps seen session keys by max size and TTL', async () => { + accountManager.getAccountCount.mockReturnValue(1); + accountManager.getCurrentOrNextForFamily.mockReturnValue({ + index: 0, + accountId: primaryAccountId, + email: 'test@example.com', + }); + accountManager.toAuthDetails.mockReturnValue({ + access: 'valid-token', + expires: Date.now() + 100000, + }); + + mockFetch.mockImplementation(() => + Promise.resolve( + new Response('{"success":true}', { + status: 200, + headers: { 'Content-Type': 'application/json' }, + }), + ), + ); + + const start = new Date('2026-02-06T00:00:00.000Z'); + vi.setSystemTime(start); + + for (let i = 0; i < __internal.MAX_SESSION_KEYS + 1; i += 1) { + await orchestrator.execute('https://api.openai.com/v1/chat/completions', { + method: 'POST', + body: JSON.stringify({ prompt_cache_key: `session-${i}` }), + }); + } + + const seenSize = (orchestrator as any).seenSessionKeys.size; + expect(seenSize).toBeLessThanOrEqual(__internal.MAX_SESSION_KEYS); + + vi.setSystemTime( + new Date(start.getTime() + __internal.SESSION_KEY_TTL_MS + 1), + ); + await orchestrator.execute('https://api.openai.com/v1/chat/completions', { + method: 'POST', + body: JSON.stringify({ prompt_cache_key: 'session-new' }), + }); + const postTtlSize = (orchestrator as any).seenSessionKeys.size; + expect(postTtlSize).toBe(1); + }); + it('shows a toast when a new chat starts', async () => { accountManager.getAccountCount.mockReturnValue(1); - const account = { index: 0, accountId: 'acc1', email: 'test@example.com', plan: 'Pro' }; + const account = { index: 0, accountId: primaryAccountId, email: 'test@example.com', plan: 'Pro' }; accountManager.getCurrentOrNextForFamily.mockReturnValue(account); accountManager.toAuthDetails.mockReturnValue({ access: 'token', expires: Date.now() + 100000 }); mockFetch.mockImplementation(() => Promise.resolve(new Response('{"success":true}', { @@ -264,7 +425,7 @@ describe('FetchOrchestrator', () => { it('shows a toast when switching to an existing session', async () => { accountManager.getAccountCount.mockReturnValue(1); - const account = { index: 0, accountId: 'acc1', email: 'test@example.com', plan: 'Pro' }; + const account = { index: 0, accountId: primaryAccountId, email: 'test@example.com', plan: 'Pro' }; accountManager.getCurrentOrNextForFamily.mockReturnValue(account); accountManager.toAuthDetails.mockReturnValue({ access: 'token', expires: Date.now() + 100000 }); mockFetch.mockImplementation(() => Promise.resolve(new Response('{"success":true}', { @@ -291,8 +452,8 @@ describe('FetchOrchestrator', () => { it('shows a toast when the account changes', async () => { accountManager.getAccountCount.mockReturnValue(2); - const first = { index: 0, accountId: 'acc1', email: 'one@example.com', plan: 'Pro' }; - const second = { index: 1, accountId: 'acc2', email: 'two@example.com', plan: 'Pro' }; + const first = { index: 0, accountId: primaryAccountId, email: 'one@example.com', plan: 'Pro' }; + const second = { index: 1, accountId: secondaryAccountId, email: 'two@example.com', plan: 'Pro' }; accountManager.getCurrentOrNextForFamily .mockReturnValueOnce(first) .mockReturnValueOnce(second); @@ -316,15 +477,16 @@ describe('FetchOrchestrator', () => { }); it('should return 429 if all accounts are exhausted', async () => { + pluginConfig.hardStopMaxWaitMs = 0; // Use 2 accounts to force "switch" action instead of "wait" (infinite loop for 1 account) accountManager.getAccountCount.mockReturnValue(2); accountManager.getCurrentOrNextForFamily - .mockReturnValueOnce({ index: 0, accountId: 'acc1', email: 'acc1@example.com' }) - .mockReturnValueOnce({ index: 1, accountId: 'acc2', email: 'acc2@example.com' }); + .mockReturnValueOnce({ index: 0, accountId: primaryAccountId, email: 'acc1@example.com' }) + .mockReturnValueOnce({ index: 1, accountId: secondaryAccountId, email: 'acc2@example.com' }); accountManager.getAccountsSnapshot.mockReturnValue([ - { index: 0, accountId: 'acc1', email: 'acc1@example.com', enabled: true }, - { index: 1, accountId: 'acc2', email: 'acc2@example.com', enabled: true } + { index: 0, accountId: primaryAccountId, email: 'acc1@example.com', enabled: true }, + { index: 1, accountId: secondaryAccountId, email: 'acc2@example.com', enabled: true } ]); accountManager.toAuthDetails.mockReturnValue({ access: 'valid-token', @@ -346,9 +508,105 @@ describe('FetchOrchestrator', () => { expect(body.error.message).toContain('All 2 account(s) unavailable'); }); + it('hard-stops when all accounts rate-limited beyond max wait', async () => { + accountManager.getAccountCount.mockReturnValue(2); + accountManager.getCurrentOrNextForFamily + .mockReturnValueOnce({ index: 0, accountId: primaryAccountId, email: 'acc1@example.com' }) + .mockReturnValueOnce({ index: 1, accountId: secondaryAccountId, email: 'acc2@example.com' }); + accountManager.getAccountsSnapshot.mockReturnValue([ + { index: 0, accountId: primaryAccountId, email: 'acc1@example.com', enabled: true }, + { index: 1, accountId: secondaryAccountId, email: 'acc2@example.com', enabled: true } + ]); + accountManager.toAuthDetails.mockReturnValue({ + access: 'valid-token', + expires: Date.now() + 100000, + }); + mockFetch.mockImplementation(() => Promise.resolve(new Response('Rate limit', { + status: 429, + headers: { 'retry-after': '60' } + }))); + rateLimitTracker.getBackoff.mockReturnValue({ delayMs: 60000, attempt: 1 }); + accountManager.getMinWaitTimeForFamilyWithHydration.mockResolvedValue(60000); + + const response = await orchestrator.execute('https://api.openai.com/v1/chat/completions', { method: 'POST' }); + + expect(response.status).toBe(429); + const body = await response.json(); + expect(body.error.type).toBe('all_accounts_rate_limited'); + expect(body.error.message).toContain('rate-limited'); + }); + + it('hard-stops when all accounts auth-failed', async () => { + accountManager.getAccountCount.mockReturnValue(1); + accountManager.getCurrentOrNextForFamily.mockReturnValue(null); + accountManager.getAccountsSnapshot.mockReturnValue([ + { index: 0, accountId: primaryAccountId, email: 'acc1@example.com', enabled: true, cooldownReason: 'auth-failure' } + ]); + accountManager.getMinWaitTimeForFamilyWithHydration.mockResolvedValue(60000); + accountManager.allAccountsCoolingDown.mockReturnValue(true); + + const response = await orchestrator.execute('https://api.openai.com/v1/chat/completions', { method: 'POST' }); + + expect(response.status).toBe(401); + const body = await response.json(); + expect(body.error.type).toBe('all_accounts_auth_failed'); + expect(body.error.message).toContain('auth'); + }); + + it('hard-stops on unsupported model errors', async () => { + accountManager.getAccountCount.mockReturnValue(1); + accountManager.getCurrentOrNextForFamily.mockReturnValue({ + index: 0, + accountId: primaryAccountId, + email: 'acc1@example.com', + }); + accountManager.toAuthDetails.mockReturnValue({ + access: 'valid-token', + expires: Date.now() + 100000, + }); + + const response = await orchestrator.execute('https://api.openai.com/v1/chat/completions', { + method: 'POST', + body: JSON.stringify({ model: 'gpt-0.0-bad' }), + }); + + expect(response.status).toBe(400); + const body = await response.json(); + expect(body.error.type).toBe('unsupported_model'); + expect(body.error.param).toBe('model'); + expect(body.error.message).toContain('gpt-0.0-bad'); + }); + + it('hard-stops when model catalog is unavailable', async () => { + const missingAccountId = `${primaryAccountId}-missing`; + accountManager.getAccountCount.mockReturnValue(1); + accountManager.getCurrentOrNextForFamily.mockReturnValue({ + index: 0, + accountId: missingAccountId, + email: 'acc1@example.com', + }); + accountManager.toAuthDetails.mockReturnValue({ + access: 'valid-token', + expires: Date.now() + 100000, + }); + mockFetch.mockRejectedValue(new Error('offline')); + + const response = await orchestrator.execute('https://api.openai.com/v1/chat/completions', { + method: 'POST', + body: JSON.stringify({ model: 'gpt-5.1' }), + }); + + expect(response.status).toBe(400); + const body = await response.json(); + expect(body.error.type).toBe('unsupported_model'); + expect(body.error.param).toBe('model'); + expect(body.error.message).toContain('gpt-5.1'); + expect(body.error.message).toContain('catalog'); + }); + it('should not loop infinitely on persistent 401', async () => { accountManager.getAccountCount.mockReturnValue(1); - accountManager.getCurrentOrNextForFamily.mockReturnValue({ index: 0, accountId: 'acc1', email: 'test@example.com' }); + accountManager.getCurrentOrNextForFamily.mockReturnValue({ index: 0, accountId: primaryAccountId, email: 'test@example.com' }); accountManager.toAuthDetails.mockReturnValue({ access: 'bad-token', expires: Date.now() + 100000 }); // Use mockImplementation to return a NEW Response each time @@ -370,7 +628,7 @@ describe('FetchOrchestrator', () => { it('shows a toast when auth fails', async () => { accountManager.getAccountCount.mockReturnValue(1); - const account = { index: 0, accountId: 'acc1', email: 'fail@example.com', plan: 'Pro' }; + const account = { index: 0, accountId: primaryAccountId, email: 'fail@example.com', plan: 'Pro' }; accountManager.getCurrentOrNextForFamily.mockReturnValue(account); accountManager.toAuthDetails.mockReturnValue({ access: 'bad-token', expires: Date.now() + 100000 }); accountManager.getAccountsSnapshot.mockReturnValue([account]); @@ -401,7 +659,7 @@ describe('FetchOrchestrator', () => { .mockReturnValueOnce(1) .mockReturnValueOnce(0); accountManager.getCurrentOrNextForFamily - .mockReturnValueOnce({ index: 0, accountId: 'acc1', email: 'acc1@example.com' }) + .mockReturnValueOnce({ index: 0, accountId: primaryAccountId, email: 'acc1@example.com' }) .mockReturnValueOnce(null); accountManager.toAuthDetails.mockReturnValue({ access: 'token', expires: Date.now() + 100000 }); accountManager.getAccountsSnapshot.mockReturnValue([]); @@ -480,7 +738,7 @@ describe('FetchOrchestrator', () => { it('should handle non-JSON or non-string bodies gracefully', async () => { accountManager.getAccountCount.mockReturnValue(1); - accountManager.getCurrentOrNextForFamily.mockReturnValue({ index: 0, accountId: 'acc1', email: 'test@example.com' }); + accountManager.getCurrentOrNextForFamily.mockReturnValue({ index: 0, accountId: primaryAccountId, email: 'test@example.com' }); accountManager.toAuthDetails.mockReturnValue({ access: 'token', expires: Date.now() + 100000 }); mockFetch.mockImplementation(() => Promise.resolve(new Response('{}', { status: 200 }))); diff --git a/test/fixtures/codex-models-cache.json b/test/fixtures/codex-models-cache.json new file mode 100644 index 0000000..b176301 --- /dev/null +++ b/test/fixtures/codex-models-cache.json @@ -0,0 +1,11 @@ +{ + "source": "server", + "models": [ + { + "slug": "gpt-5.1" + }, + { + "slug": "gpt-5.3-codex" + } + ] +} diff --git a/test/logger.test.ts b/test/logger.test.ts index 416d64a..086b9e2 100644 --- a/test/logger.test.ts +++ b/test/logger.test.ts @@ -1,4 +1,7 @@ import { describe, expect, it, vi } from 'vitest'; +import { mkdtempSync, readFileSync, readdirSync, rmSync } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; import { LOGGING_ENABLED, logRequest } from '../lib/logger.js'; @@ -65,6 +68,29 @@ describe('Logger Module', () => { }); }).not.toThrow(); }); + + it('redacts prompt_cache_key in request logs', async () => { + const root = mkdtempSync(join(tmpdir(), 'opencode-logs-')); + await withEnv({ ENABLE_PLUGIN_REQUEST_LOGGING: '1', XDG_CONFIG_HOME: root }, async () => { + vi.resetModules(); + const { logRequest: logRequestWithEnv } = await import('../lib/logger.js'); + logRequestWithEnv('after-transform', { + body: { + prompt_cache_key: 'sess_123', + kept: 'ok', + }, + }); + + const logDir = join(root, 'opencode', 'logs', 'codex-plugin'); + const files = readdirSync(logDir); + expect(files.length).toBe(1); + const payload = JSON.parse( + readFileSync(join(logDir, files[0]!), 'utf8'), + ) as { body?: { prompt_cache_key?: string } }; + expect(payload.body?.prompt_cache_key).toBe('[redacted]'); + }); + rmSync(root, { recursive: true, force: true }); + }); }); describe('debug env flags', () => { diff --git a/test/models-gpt-5.3-codex.test.ts b/test/models-gpt-5.3-codex.test.ts index cf919c8..badc2a5 100644 --- a/test/models-gpt-5.3-codex.test.ts +++ b/test/models-gpt-5.3-codex.test.ts @@ -4,6 +4,7 @@ import { tmpdir } from "node:os"; import { join } from "node:path"; import type { Auth } from "@opencode-ai/sdk"; import { DEFAULT_MODEL_FAMILY } from "../lib/constants.js"; +import { AccountManager } from "../lib/accounts.js"; vi.mock("@opencode-ai/plugin", () => { const describe = () => ({ @@ -236,13 +237,18 @@ describe("gpt-5.3-codex model metadata", () => { ); expect(provider.models["gpt-5.3-codex"]).toBeDefined(); - expect(provider.models["gpt-5.3-codex-xhigh"]).toBeUndefined(); expect(provider.models["gpt-5.3-codex"]?.variants?.xhigh).toBeDefined(); } finally { rmSync(root, { recursive: true, force: true }); } }); + it("initializes gpt-5.3-codex family in AccountManager", async () => { + const manager = new AccountManager(); + expect(manager.getActiveIndexForFamily("gpt-5.3-codex")).toBeDefined(); + // If it's not initialized, it might throw or return -1 depending on how it's handled. + }); + it("does not synthesize gpt-5.3-codex from gpt-5.2-codex", async () => { const root = mkdtempSync(join(tmpdir(), "opencode-gpt53-no52clone-")); process.env.XDG_CONFIG_HOME = root; diff --git a/test/plugin-config-hook.test.ts b/test/plugin-config-hook.test.ts index c3f8a11..4c2c2e9 100644 --- a/test/plugin-config-hook.test.ts +++ b/test/plugin-config-hook.test.ts @@ -1,5 +1,5 @@ import { afterEach, describe, expect, it, vi } from "vitest"; -import { mkdtempSync, rmSync } from "node:fs"; +import { mkdirSync, mkdtempSync, rmSync, writeFileSync } from "node:fs"; import { tmpdir } from "node:os"; import { join } from "node:path"; @@ -37,7 +37,14 @@ describe("OpenAIAuthPlugin config hook", () => { process.env.XDG_CONFIG_HOME = root; try { - const plugin = await OpenAIAuthPlugin({ + vi.resetModules(); + const { + OpenAIAuthPlugin: FreshPlugin, + } = await import("../index.js"); + const { getCachedVariantEfforts } = await import( + "../lib/prompts/codex-models.js" + ); + const plugin = await FreshPlugin({ client: { tui: { showToast: vi.fn() }, auth: { set: vi.fn() }, @@ -91,7 +98,12 @@ describe("OpenAIAuthPlugin config hook", () => { process.env.XDG_CONFIG_HOME = root; try { - const plugin = await OpenAIAuthPlugin({ + vi.resetModules(); + const { OpenAIAuthPlugin: FreshPlugin } = await import("../index.js"); + const { getCachedVariantEfforts } = await import( + "../lib/prompts/codex-models.js" + ); + const plugin = await FreshPlugin({ client: { tui: { showToast: vi.fn() }, auth: { set: vi.fn() }, @@ -124,7 +136,12 @@ describe("OpenAIAuthPlugin config hook", () => { process.env.XDG_CONFIG_HOME = root; try { - const plugin = await OpenAIAuthPlugin({ + vi.resetModules(); + const { OpenAIAuthPlugin: FreshPlugin } = await import("../index.js"); + const { getCachedVariantEfforts } = await import( + "../lib/prompts/codex-models.js" + ); + const plugin = await FreshPlugin({ client: { tui: { showToast: vi.fn() }, auth: { set: vi.fn() }, @@ -147,7 +164,102 @@ describe("OpenAIAuthPlugin config hook", () => { await (plugin as any).config(cfg); - expect(cfg.provider.openai.models["gpt-5.3-codex"]).toBeUndefined(); + expect(cfg.provider.openai.models["gpt-5.3-codex"]).toBeDefined(); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); + + it("preserves effort-suffixed models when base entry is missing", async () => { + const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-legacy-")); + process.env.XDG_CONFIG_HOME = root; + + try { + vi.resetModules(); + const { OpenAIAuthPlugin: FreshPlugin } = await import("../index.js"); + const plugin = await FreshPlugin({ + client: { + tui: { showToast: vi.fn() }, + auth: { set: vi.fn() }, + } as any, + } as any); + + const cfg: any = { + provider: { + openai: { + models: { + "gpt-5.3-codex-low": { id: "gpt-5.3-codex-low" }, + "gpt-5.3-codex-high": { id: "gpt-5.3-codex-high" }, + }, + }, + }, + experimental: {}, + }; + + await (plugin as any).config(cfg); + + expect(cfg.provider.openai.models["gpt-5.3-codex-low"]).toBeDefined(); + expect(cfg.provider.openai.models["gpt-5.3-codex-high"]).toBeDefined(); + } finally { + rmSync(root, { recursive: true, force: true }); + } + }); + + it("uses cached supported_reasoning_levels for codex variants", async () => { + const root = mkdtempSync(join(tmpdir(), "opencode-config-hook-cache-")); + process.env.XDG_CONFIG_HOME = root; + + try { + const cacheDir = join(root, "opencode", "cache"); + mkdirSync(cacheDir, { recursive: true }); + writeFileSync( + join(cacheDir, "codex-models-cache.json"), + JSON.stringify({ + fetchedAt: Date.now(), + source: "server", + models: [ + { + slug: "gpt-5.3-codex", + supported_reasoning_levels: [ + { effort: "low" }, + { effort: "medium" }, + ], + }, + ], + }), + "utf8", + ); + + vi.resetModules(); + const { OpenAIAuthPlugin: FreshPlugin } = await import("../index.js"); + const { getCachedVariantEfforts } = await import( + "../lib/prompts/codex-models.js" + ); + const plugin = await FreshPlugin({ + client: { + tui: { showToast: vi.fn() }, + auth: { set: vi.fn() }, + } as any, + } as any); + + const cfg: any = { + provider: { + openai: { + models: { + "gpt-5.3-codex": { id: "gpt-5.3-codex" }, + }, + }, + }, + experimental: {}, + }; + + const efforts = getCachedVariantEfforts(); + expect(efforts.get("gpt-5.3-codex")).toEqual(["low", "medium"]); + + await (plugin as any).config(cfg); + + const variants = cfg.provider.openai.models["gpt-5.3-codex"].variants; + expect(Object.keys(variants)).toEqual(["low", "medium"]); } finally { rmSync(root, { recursive: true, force: true }); } @@ -253,7 +365,7 @@ describe("OpenAIAuthPlugin config hook", () => { }); expect(cfg.provider.openai.models["gpt-5.3-codex"].variants.high).toMatchObject({ reasoningEffort: "high", - textVerbosity: "high", + textVerbosity: "medium", reasoningSummary: "detailed", disabled: true, }); diff --git a/test/plugin-config-schema.test.ts b/test/plugin-config-schema.test.ts index d9527ee..a47193c 100644 --- a/test/plugin-config-schema.test.ts +++ b/test/plugin-config-schema.test.ts @@ -15,7 +15,6 @@ describe("plugin config schema parity", () => { const keys = new Set(Object.keys(schema.properties ?? {})); const expectedKeys = [ - "codexMode", "accountSelectionStrategy", "pidOffsetEnabled", "quietMode", diff --git a/test/plugin-config.test.ts b/test/plugin-config.test.ts index 5f2c436..6af1a58 100644 --- a/test/plugin-config.test.ts +++ b/test/plugin-config.test.ts @@ -1,10 +1,13 @@ import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; import { loadPluginConfig, - getCodexMode, getPerProjectAccounts, getSchedulingMode, getMaxCacheFirstWaitSeconds, + getHardStopMaxWaitMs, + getHardStopOnUnknownModel, + getHardStopOnAllAuthFailed, + getHardStopMaxConsecutiveFailures, getAuthDebugEnabled, getNoBrowser, } from '../lib/config.js'; @@ -32,6 +35,10 @@ describe('Plugin Configuration', () => { 'CODEX_AUTH_PER_PROJECT_ACCOUNTS', 'CODEX_AUTH_SCHEDULING_MODE', 'CODEX_AUTH_MAX_CACHE_FIRST_WAIT_SECONDS', + 'CODEX_AUTH_HARD_STOP_MAX_WAIT_MS', + 'CODEX_AUTH_HARD_STOP_ON_UNKNOWN_MODEL', + 'CODEX_AUTH_HARD_STOP_ON_ALL_AUTH_FAILED', + 'CODEX_AUTH_HARD_STOP_MAX_CONSECUTIVE_FAILURES', 'CODEX_AUTH_DEBUG', 'OPENCODE_OPENAI_AUTH_DEBUG', 'DEBUG_CODEX_PLUGIN', @@ -59,20 +66,23 @@ describe('Plugin Configuration', () => { } }); - describe('loadPluginConfig', () => { - const expectedDefault = { - codexMode: false, - accountSelectionStrategy: 'sticky', - pidOffsetEnabled: true, - quietMode: false, - perProjectAccounts: false, - retryAllAccountsRateLimited: false, - retryAllAccountsMaxWaitMs: 30_000, - retryAllAccountsMaxRetries: 1, - tokenRefreshSkewMs: 60_000, - proactiveTokenRefresh: false, - authDebug: false, - rateLimitToastDebounceMs: 60_000, + describe('loadPluginConfig', () => { + const expectedDefault = { + accountSelectionStrategy: 'sticky', + pidOffsetEnabled: true, + quietMode: false, + perProjectAccounts: false, + retryAllAccountsRateLimited: false, + retryAllAccountsMaxWaitMs: 30_000, + retryAllAccountsMaxRetries: 1, + hardStopMaxWaitMs: 10_000, + hardStopOnUnknownModel: true, + hardStopOnAllAuthFailed: true, + hardStopMaxConsecutiveFailures: 5, + tokenRefreshSkewMs: 60_000, + proactiveTokenRefresh: false, + authDebug: false, + rateLimitToastDebounceMs: 60_000, schedulingMode: 'cache_first', maxCacheFirstWaitSeconds: 60, switchOnFirstRateLimit: true, @@ -94,15 +104,6 @@ describe('Plugin Configuration', () => { ); }); - it('should load config from file when it exists', () => { - mockExistsSync.mockReturnValue(true); - mockReadFileSync.mockReturnValue(JSON.stringify({ codexMode: true })); - - const config = loadPluginConfig(); - - expect(config).toEqual({ ...expectedDefault, codexMode: true }); - }); - it('should merge user config with defaults', () => { mockExistsSync.mockReturnValue(true); mockReadFileSync.mockReturnValue(JSON.stringify({})); @@ -168,19 +169,6 @@ describe('Plugin Configuration', () => { }); }); - describe('getCodexMode', () => { - it('should always return false by default', () => { - expect(getCodexMode({})).toBe(false); - }); - - it('should ignore legacy env vars and config values', () => { - process.env.CODEX_AUTH_MODE = '1'; - process.env.CODEX_MODE = '1'; - expect(getCodexMode({ codexMode: true })).toBe(false); - expect(getCodexMode({ codexMode: false })).toBe(false); - }); - }); - describe('getSchedulingMode', () => { it('should prioritize env var when valid', () => { process.env.CODEX_AUTH_SCHEDULING_MODE = 'balance'; @@ -238,6 +226,57 @@ describe('Plugin Configuration', () => { }); }); + describe('hard-stop settings', () => { + it('should default hard-stop max wait to 10 seconds', () => { + delete process.env.CODEX_AUTH_HARD_STOP_MAX_WAIT_MS; + const config: PluginConfig = {}; + const result = getHardStopMaxWaitMs(config); + expect(result).toBe(10_000); + }); + + it('should clamp negative hard-stop max wait to zero', () => { + process.env.CODEX_AUTH_HARD_STOP_MAX_WAIT_MS = '-1'; + const result = getHardStopMaxWaitMs({}); + expect(result).toBe(0); + }); + + it('should default hard-stop unknown-model to true', () => { + delete process.env.CODEX_AUTH_HARD_STOP_ON_UNKNOWN_MODEL; + const result = getHardStopOnUnknownModel({}); + expect(result).toBe(true); + }); + + it('should allow env override for hard-stop unknown-model', () => { + process.env.CODEX_AUTH_HARD_STOP_ON_UNKNOWN_MODEL = '0'; + const result = getHardStopOnUnknownModel({ hardStopOnUnknownModel: true }); + expect(result).toBe(false); + }); + + it('should default hard-stop all-auth-failed to true', () => { + delete process.env.CODEX_AUTH_HARD_STOP_ON_ALL_AUTH_FAILED; + const result = getHardStopOnAllAuthFailed({}); + expect(result).toBe(true); + }); + + it('should allow env override for hard-stop all-auth-failed', () => { + process.env.CODEX_AUTH_HARD_STOP_ON_ALL_AUTH_FAILED = '0'; + const result = getHardStopOnAllAuthFailed({ hardStopOnAllAuthFailed: true }); + expect(result).toBe(false); + }); + + it('should default hard-stop max consecutive failures to 5', () => { + delete process.env.CODEX_AUTH_HARD_STOP_MAX_CONSECUTIVE_FAILURES; + const result = getHardStopMaxConsecutiveFailures({}); + expect(result).toBe(5); + }); + + it('should clamp negative hard-stop max consecutive failures to zero', () => { + process.env.CODEX_AUTH_HARD_STOP_MAX_CONSECUTIVE_FAILURES = '-2'; + const result = getHardStopMaxConsecutiveFailures({}); + expect(result).toBe(0); + }); + }); + describe('getAuthDebugEnabled', () => { it('should return false by default', () => { delete process.env.CODEX_AUTH_DEBUG; @@ -296,13 +335,5 @@ describe('Plugin Configuration', () => { }); }); - describe('Priority order', () => { - it('keeps codexMode as a legacy no-op', () => { - process.env.CODEX_AUTH_MODE = '1'; - process.env.CODEX_MODE = '1'; - expect(getCodexMode({ codexMode: true })).toBe(false); - expect(getCodexMode({ codexMode: false })).toBe(false); - expect(getCodexMode({})).toBe(false); - }); - }); + }); diff --git a/test/request-transformer.test.ts b/test/request-transformer.test.ts index 0ba3c79..5793cf3 100644 --- a/test/request-transformer.test.ts +++ b/test/request-transformer.test.ts @@ -1,71 +1,60 @@ import { describe, it, expect, vi } from 'vitest'; import { - normalizeModel, - getModelConfig, - filterInput, - transformRequestBody, + normalizeModel, + getModelConfig, + filterInput, + transformRequestBody, } from '../lib/request/request-transformer.js'; +import { createSyntheticErrorResponse } from '../lib/request/response-handler.js'; import type { RequestBody, UserConfig, InputItem } from '../lib/types.js'; +import { mkdtempSync, mkdirSync, rmSync, writeFileSync } from 'node:fs'; +import { tmpdir } from 'node:os'; +import { join } from 'node:path'; describe('Request Transformer Module', () => { - describe('normalizeModel', () => { - // NOTE: All gpt-5 models now normalize to gpt-5.1 as gpt-5 is being phased out - it('should normalize gpt-5-codex to gpt-5.1-codex', async () => { - expect(normalizeModel('gpt-5-codex')).toBe('gpt-5.1-codex'); + describe('synthetic error responses', () => { + it('creates JSON error payloads', async () => { + const response = createSyntheticErrorResponse('Bad model', 400, 'unsupported_model'); + const payload = await response.json(); + expect(payload.error.message).toContain('Bad model'); + expect(payload.error.type).toBe('unsupported_model'); }); + }); - it('should normalize gpt-5 to gpt-5.1', async () => { - expect(normalizeModel('gpt-5')).toBe('gpt-5.1'); + describe('normalizeModel', () => { + it('should normalize known gpt-5.x codex models', async () => { + expect(normalizeModel('gpt-5.3-codex')).toBe('gpt-5.3-codex'); + expect(normalizeModel('openai/gpt-5.2-codex-high')).toBe('gpt-5.2-codex'); }); - it('should normalize variants containing "codex" to gpt-5.1-codex', async () => { - expect(normalizeModel('openai/gpt-5-codex')).toBe('gpt-5.1-codex'); - expect(normalizeModel('custom-gpt-5-codex-variant')).toBe('gpt-5.1-codex'); + it('should normalize known gpt-5.x general models', async () => { + expect(normalizeModel('gpt-5.2')).toBe('gpt-5.2'); + expect(normalizeModel('openai/gpt-5.1-high')).toBe('gpt-5.1'); }); - it('should normalize variants containing "gpt-5" to gpt-5.1', async () => { - expect(normalizeModel('gpt-5-mini')).toBe('gpt-5.1'); - expect(normalizeModel('gpt-5-nano')).toBe('gpt-5.1'); + it('should normalize legacy gpt-5 aliases', async () => { + expect(normalizeModel('gpt-5')).toBe('gpt-5.1'); + expect(normalizeModel('gpt-5-codex')).toBe('gpt-5.1-codex'); + expect(normalizeModel('gpt-5-codex-low')).toBe('gpt-5.1-codex'); + expect(normalizeModel('codex-mini-latest')).toBe('gpt-5.1-codex-mini'); }); - it('should return gpt-5.1 as default for unknown models', async () => { - expect(normalizeModel('unknown-model')).toBe('gpt-5.1'); - expect(normalizeModel('gpt-4')).toBe('gpt-5.1'); + it('should leave unknown models untouched', async () => { + expect(normalizeModel('unknown-model')).toBe('unknown-model'); + expect(normalizeModel('gpt-4')).toBe('gpt-4'); }); - it('should return gpt-5.1 for undefined', async () => { + it('should default to gpt-5.1 when model is missing', async () => { expect(normalizeModel(undefined)).toBe('gpt-5.1'); + expect(normalizeModel('')).toBe('gpt-5.1'); }); - // Codex CLI preset name tests - legacy gpt-5 models now map to gpt-5.1 + // Codex CLI preset name tests - gpt-5.x only describe('Codex CLI preset names', () => { - it('should normalize all gpt-5-codex presets to gpt-5.1-codex', async () => { - expect(normalizeModel('gpt-5-codex-low')).toBe('gpt-5.1-codex'); - expect(normalizeModel('gpt-5-codex-medium')).toBe('gpt-5.1-codex'); - expect(normalizeModel('gpt-5-codex-high')).toBe('gpt-5.1-codex'); - }); - - it('should normalize all gpt-5 presets to gpt-5.1', async () => { - expect(normalizeModel('gpt-5-minimal')).toBe('gpt-5.1'); - expect(normalizeModel('gpt-5-low')).toBe('gpt-5.1'); - expect(normalizeModel('gpt-5-medium')).toBe('gpt-5.1'); - expect(normalizeModel('gpt-5-high')).toBe('gpt-5.1'); - }); - - it('should prioritize codex over gpt-5 in model name', async () => { - // Model name contains BOTH "codex" and "gpt-5" - // Should return "gpt-5.1-codex" (codex checked first, maps to 5.1) - expect(normalizeModel('gpt-5-codex-low')).toBe('gpt-5.1-codex'); - expect(normalizeModel('my-gpt-5-codex-model')).toBe('gpt-5.1-codex'); - }); - - it('should normalize codex mini presets to gpt-5.1-codex-mini', async () => { - expect(normalizeModel('gpt-5-codex-mini')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('gpt-5-codex-mini-medium')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('gpt-5-codex-mini-high')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('openai/gpt-5-codex-mini-high')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('codex-mini-latest')).toBe('gpt-5.1-codex-mini'); - expect(normalizeModel('openai/codex-mini-latest')).toBe('gpt-5.1-codex-mini'); + it('should normalize gpt-5.1 codex mini presets', async () => { + expect(normalizeModel('gpt-5.1-codex-mini')).toBe('gpt-5.1-codex-mini'); + expect(normalizeModel('gpt-5.1-codex-mini-high')).toBe('gpt-5.1-codex-mini'); + expect(normalizeModel('openai/gpt-5.1-codex-mini-medium')).toBe('gpt-5.1-codex-mini'); }); it('should normalize gpt-5.1 codex max presets', async () => { @@ -75,7 +64,7 @@ describe('Request Transformer Module', () => { expect(normalizeModel('openai/gpt-5.1-codex-max-medium')).toBe('gpt-5.1-codex-max'); }); - it('should normalize gpt-5.3 and gpt-5.2 codex presets', async () => { + it('should normalize gpt-5.3 and gpt-5.2 codex presets', async () => { expect(normalizeModel('gpt-5.2-codex')).toBe('gpt-5.2-codex'); expect(normalizeModel('gpt-5.2-codex-low')).toBe('gpt-5.2-codex'); expect(normalizeModel('gpt-5.2-codex-medium')).toBe('gpt-5.2-codex'); @@ -99,10 +88,16 @@ describe('Request Transformer Module', () => { expect(normalizeModel('openai/gpt-5.1-codex-mini-medium')).toBe('gpt-5.1-codex-mini'); }); + it('should normalize gpt-5.2 pro presets', async () => { + expect(normalizeModel('gpt-5.2-pro')).toBe('gpt-5.2-pro'); + expect(normalizeModel('gpt-5.2-pro-low')).toBe('gpt-5.2-pro'); + expect(normalizeModel('openai/gpt-5.2-pro-high')).toBe('gpt-5.2-pro'); + }); + it('should normalize gpt-5.1 general-purpose slugs', async () => { expect(normalizeModel('gpt-5.1')).toBe('gpt-5.1'); expect(normalizeModel('openai/gpt-5.1')).toBe('gpt-5.1'); - expect(normalizeModel('GPT 5.1 High')).toBe('gpt-5.1'); + expect(normalizeModel('GPT 5.1 High')).toBe('gpt 5.1 high'); }); it('should normalize future codex model variants without explicit map entries', async () => { @@ -115,31 +110,19 @@ describe('Request Transformer Module', () => { }); }); - // Edge case tests - legacy gpt-5 models now map to gpt-5.1 + // Edge case tests - avoid legacy or nonstandard coercion describe('Edge cases', () => { - it('should handle uppercase model names', async () => { - expect(normalizeModel('GPT-5-CODEX')).toBe('gpt-5.1-codex'); - expect(normalizeModel('GPT-5-HIGH')).toBe('gpt-5.1'); - expect(normalizeModel('CODEx-MINI-LATEST')).toBe('gpt-5.1-codex-mini'); - }); - - it('should handle mixed case', async () => { - expect(normalizeModel('Gpt-5-Codex-Low')).toBe('gpt-5.1-codex'); - expect(normalizeModel('GpT-5-MeDiUm')).toBe('gpt-5.1'); - }); - - it('should handle special characters', async () => { - expect(normalizeModel('my_gpt-5_codex')).toBe('gpt-5.1-codex'); - expect(normalizeModel('gpt.5.high')).toBe('gpt-5.1'); - }); - - it('should handle old verbose names', async () => { - expect(normalizeModel('GPT 5 Codex Low (ChatGPT Subscription)')).toBe('gpt-5.1-codex'); - expect(normalizeModel('GPT 5 High (ChatGPT Subscription)')).toBe('gpt-5.1'); + it('should handle uppercase and mixed case for known models', async () => { + expect(normalizeModel('GPT-5.3-CODEX')).toBe('gpt-5.3-codex'); + expect(normalizeModel('GpT-5.1-HiGh')).toBe('gpt-5.1'); }); - it('should handle empty string', async () => { - expect(normalizeModel('')).toBe('gpt-5.1'); + it('should not coerce legacy or verbose names', async () => { + expect(normalizeModel('GPT 5 Codex Low (ChatGPT Subscription)')).toBe( + 'gpt 5 codex low (chatgpt subscription)', + ); + expect(normalizeModel('my_gpt-5_codex')).toBe('my_gpt-5_codex'); + expect(normalizeModel('gpt.5.high')).toBe('gpt.5.high'); }); }); }); @@ -360,7 +343,6 @@ describe('Request Transformer Module', () => { model: 'gpt-5-codex', input: [], // Host-provided key (OpenCode session id) - // @ts-expect-error extra field allowed prompt_cache_key: 'ses_host_key_123', }; const result: any = await transformRequestBody(body, codexInstructions); @@ -385,7 +367,7 @@ describe('Request Transformer Module', () => { expect(result.store).toBe(false); expect(result.stream).toBe(true); - expect(result.instructions).toBe(codexInstructions); + expect(result.instructions).toContain(codexInstructions); }); it('should normalize model name', async () => { @@ -394,7 +376,16 @@ describe('Request Transformer Module', () => { input: [], }; const result = await transformRequestBody(body, codexInstructions); - expect(result.model).toBe('gpt-5.1'); // gpt-5 now maps to gpt-5.1 + expect(result.model).toBe('gpt-5-mini'); + }); + + it('accepts base gpt-5.x model slugs', async () => { + const body: RequestBody = { + model: 'gpt-5.3', + input: [], + }; + const result = await transformRequestBody(body, codexInstructions); + expect(result.model).toBe('gpt-5.3'); }); it('should apply default reasoning config', async () => { @@ -822,7 +813,7 @@ describe('Request Transformer Module', () => { input: [], }; const result = await transformRequestBody(body, codexInstructions); - expect(result.reasoning?.effort).toBe('medium'); + expect(result.reasoning?.effort).toBe('low'); }); it('should normalize minimal to low when provided by the host', async () => { @@ -980,65 +971,261 @@ describe('Request Transformer Module', () => { tools: [{ name: 'test_tool' }], }; const result = await transformRequestBody(body, codexInstructions); - expect(result.instructions).toBe(codexInstructions); + expect(result.instructions).toContain(codexInstructions); }); }); - describe('personality resolution', () => { - it('applies model-level friendly personality override', async () => { + describe('personality resolution', () => { + it('applies custom personality from local file', async () => { + const root = mkdtempSync(join(tmpdir(), 'personality-local-')); + const cwd = process.cwd(); + process.chdir(root); + try { + const localDir = join(root, '.opencode', 'Personalities'); + mkdirSync(localDir, { recursive: true }); + writeFileSync( + join(localDir, 'Idiot.md'), + 'Chaotic friendly override', + 'utf8', + ); + const body: RequestBody = { + model: 'gpt-5.3-codex', + input: [], + }; + const userConfig: UserConfig = { global: {}, models: {} }; + const pluginConfig = { + custom_settings: { + options: { personality: 'Idiot' }, + models: {}, + }, + }; + const runtimeDefaults = { + instructionsTemplate: 'BASE INSTRUCTIONS\n\n{{ personality }}', + personalityMessages: { + friendly: 'Friendly from runtime', + pragmatic: 'Pragmatic from runtime', + }, + staticDefaultPersonality: 'pragmatic', + }; + const result = await transformRequestBody( + body, + 'BASE INSTRUCTIONS', + userConfig, + runtimeDefaults as any, + pluginConfig as any, + ); + expect(result.instructions).toContain('Chaotic friendly override'); + } finally { + process.chdir(cwd); + rmSync(root, { recursive: true, force: true }); + } + }); + + it('strips cache marker from personality files', async () => { + const root = mkdtempSync(join(tmpdir(), 'personality-marker-')); + const cwd = process.cwd(); + process.chdir(root); + try { + const localDir = join(root, '.opencode', 'Personalities'); + mkdirSync(localDir, { recursive: true }); + writeFileSync( + join(localDir, 'Friendly.md'), + '\nFriendly from cache', + 'utf8', + ); + const body: RequestBody = { + model: 'gpt-5.3-codex', + input: [], + }; + const result = await transformRequestBody( + body, + 'BASE INSTRUCTIONS', + { global: {}, models: {} }, + undefined, + { custom_settings: { options: { personality: 'friendly' }, models: {} } } as any, + ); + expect(result.instructions).toContain('Friendly from cache'); + expect(result.instructions).not.toContain('opencode personality cache'); + } finally { + process.chdir(cwd); + rmSync(root, { recursive: true, force: true }); + } + }); + + it('rejects personality names with path traversal', async () => { + const root = mkdtempSync(join(tmpdir(), 'personality-traversal-')); + const cwd = process.cwd(); + process.chdir(root); + try { + const localDir = join(root, '.opencode', 'Personalities'); + mkdirSync(localDir, { recursive: true }); + writeFileSync( + join(root, '.opencode', 'evil.md'), + 'do not load', + 'utf8', + ); + const body: RequestBody = { + model: 'gpt-5.3-codex', + input: [], + }; + const userConfig: UserConfig = { global: {}, models: {} }; + const pluginConfig = { + custom_settings: { + options: { personality: '../evil' }, + models: {}, + }, + }; + const runtimeDefaults = { + instructionsTemplate: 'BASE INSTRUCTIONS\n\n{{ personality }}', + personalityMessages: { + pragmatic: 'Pragmatic from runtime', + }, + staticDefaultPersonality: 'pragmatic', + }; + const result = await transformRequestBody( + body, + 'BASE INSTRUCTIONS', + userConfig, + runtimeDefaults as any, + pluginConfig as any, + ); + expect(result.instructions).not.toContain('do not load'); + } finally { + process.chdir(cwd); + rmSync(root, { recursive: true, force: true }); + } + }); + + it('rejects personality names with Windows-style traversal', async () => { + const root = mkdtempSync(join(tmpdir(), 'personality-traversal-win-')); + const cwd = process.cwd(); + process.chdir(root); + try { + const localDir = join(root, '.opencode', 'Personalities'); + mkdirSync(localDir, { recursive: true }); + writeFileSync( + join(root, '.opencode', 'evil.md'), + 'do not load', + 'utf8', + ); + const body: RequestBody = { + model: 'gpt-5.3-codex', + input: [], + }; + const userConfig: UserConfig = { global: {}, models: {} }; + const pluginConfig = { + custom_settings: { + options: { personality: '..\\evil' }, + models: {}, + }, + }; + const runtimeDefaults = { + instructionsTemplate: 'BASE INSTRUCTIONS\n\n{{ personality }}', + personalityMessages: { + pragmatic: 'Pragmatic from runtime', + }, + staticDefaultPersonality: 'pragmatic', + }; + const result = await transformRequestBody( + body, + 'BASE INSTRUCTIONS', + userConfig, + runtimeDefaults as any, + pluginConfig as any, + ); + expect(result.instructions).not.toContain('do not load'); + } finally { + process.chdir(cwd); + rmSync(root, { recursive: true, force: true }); + } + }); + + it('defaults to pragmatic when no custom personality set', async () => { const body: RequestBody = { model: 'gpt-5.3-codex', input: [], }; - const userConfig: UserConfig = { - global: { personality: 'pragmatic' } as any, - models: { - 'gpt-5.3-codex': { - options: { personality: 'friendly' } as any, - }, + const userConfig: UserConfig = { global: {}, models: {} }; + const runtimeDefaults = { + instructionsTemplate: 'BASE INSTRUCTIONS\n\n{{ personality }}', + personalityMessages: { + friendly: 'Friendly from runtime', + pragmatic: 'Pragmatic from runtime', }, + staticDefaultPersonality: 'pragmatic', }; - const result = await transformRequestBody(body, 'BASE INSTRUCTIONS', userConfig); - expect(result.instructions).toContain('BASE INSTRUCTIONS'); - expect(result.instructions?.toLowerCase()).toContain('friendly'); + const result = await transformRequestBody( + body, + 'BASE INSTRUCTIONS', + userConfig, + runtimeDefaults as any, + {} as any, + ); + expect(result.instructions).toContain('Pragmatic from runtime'); }); - it('applies model options when model id is provider-prefixed', async () => { + it('uses runtime default when personality is set to default', async () => { const body: RequestBody = { - model: 'openai/gpt-5.3-codex', + model: 'gpt-5.3-codex', input: [], }; - const userConfig: UserConfig = { - global: { personality: 'pragmatic' } as any, - models: { - 'gpt-5.3-codex': { - options: { personality: 'friendly' } as any, - }, + const userConfig: UserConfig = { global: {}, models: {} }; + const pluginConfig = { + custom_settings: { + options: { personality: 'default' }, + models: {}, + }, + }; + const runtimeDefaults = { + instructionsTemplate: 'BASE INSTRUCTIONS\n\n{{ personality }}', + personalityMessages: { + friendly: 'Friendly from runtime', + pragmatic: 'Pragmatic from runtime', }, + onlineDefaultPersonality: 'friendly', + staticDefaultPersonality: 'pragmatic', }; - const result = await transformRequestBody(body, 'BASE INSTRUCTIONS', userConfig); - expect(result.instructions?.toLowerCase()).toContain('friendly'); - expect(result.instructions?.toLowerCase()).not.toContain('pragmatic'); + const result = await transformRequestBody( + body, + 'BASE INSTRUCTIONS', + userConfig, + runtimeDefaults as any, + pluginConfig as any, + ); + expect(result.instructions).toContain('Friendly from runtime'); }); - it('normalizes mixed-case personality and coerces invalid to none', async () => { + it('uses explicit runtime default message when provided', async () => { const body: RequestBody = { - model: 'gpt-5.4-codex', + model: 'gpt-5.3-codex', input: [], }; - const userConfig: UserConfig = { - global: { personality: 'PrAgMaTiC' } as any, - models: { - 'gpt-5.4-codex': { - options: { personality: 'INVALID_STYLE' } as any, - }, + const userConfig: UserConfig = { global: {}, models: {} }; + const pluginConfig = { + custom_settings: { + options: { personality: 'default' }, + models: {}, }, }; - const result = await transformRequestBody(body, 'BASE INSTRUCTIONS', userConfig); - expect(result.instructions).toBe('BASE INSTRUCTIONS'); + const runtimeDefaults = { + instructionsTemplate: 'BASE INSTRUCTIONS\n\n{{ personality }}', + personalityMessages: { + default: 'Default from runtime', + pragmatic: 'Pragmatic from runtime', + }, + staticDefaultPersonality: 'pragmatic', + }; + const result = await transformRequestBody( + body, + 'BASE INSTRUCTIONS', + userConfig, + runtimeDefaults as any, + pluginConfig as any, + ); + expect(result.instructions).toContain('Default from runtime'); }); - it('logs invalid personality once per process while coercing to none', async () => { + it('logs invalid personality once per process while coercing to pragmatic', async () => { const previousLogging = process.env.ENABLE_PLUGIN_REQUEST_LOGGING; process.env.ENABLE_PLUGIN_REQUEST_LOGGING = '1'; const logSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); @@ -1048,68 +1235,38 @@ describe('Request Transformer Module', () => { const dynamicModule = await import('../lib/request/request-transformer.js'); const dynamicTransform = dynamicModule.transformRequestBody; const body: RequestBody = { - model: 'gpt-5.4-codex', + model: 'gpt-5.3-codex', input: [], }; const userConfig: UserConfig = { - global: { personality: 'INVALID_GLOBAL' } as any, - models: { - 'gpt-5.4-codex': { - options: { personality: 'INVALID_MODEL' } as any, - }, + global: {}, + models: {}, + }; + const pluginConfig = { + custom_settings: { + options: { personality: 'INVALID' }, + models: {}, }, }; - const first = await dynamicTransform(body, 'BASE INSTRUCTIONS', userConfig); - const second = await dynamicTransform(body, 'BASE INSTRUCTIONS', userConfig); - - expect(first.instructions).toBe('BASE INSTRUCTIONS'); - expect(second.instructions).toBe('BASE INSTRUCTIONS'); - - const invalidLogs = logSpy.mock.calls.filter((call) => - call.some((part) => - String(part).includes('Invalid model personality "INVALID_MODEL" detected; coercing to "none"'), - ), + await dynamicTransform( + body, + 'BASE INSTRUCTIONS', + userConfig, + undefined, + pluginConfig as any, + ); + await dynamicTransform( + body, + 'BASE INSTRUCTIONS', + userConfig, + undefined, + pluginConfig as any, ); - expect(invalidLogs).toHaveLength(1); - } finally { - if (previousLogging === undefined) { - delete process.env.ENABLE_PLUGIN_REQUEST_LOGGING; - } else { - process.env.ENABLE_PLUGIN_REQUEST_LOGGING = previousLogging; - } - vi.restoreAllMocks(); - vi.resetModules(); - } - }); - - it('logs invalid global personality once per process while coercing to none', async () => { - const previousLogging = process.env.ENABLE_PLUGIN_REQUEST_LOGGING; - process.env.ENABLE_PLUGIN_REQUEST_LOGGING = '1'; - const logSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - - try { - vi.resetModules(); - const dynamicModule = await import('../lib/request/request-transformer.js'); - const dynamicTransform = dynamicModule.transformRequestBody; - const body: RequestBody = { - model: 'gpt-5.4-codex', - input: [], - }; - const userConfig: UserConfig = { - global: { personality: 'INVALID_GLOBAL' } as any, - models: {}, - }; - - const first = await dynamicTransform(body, 'BASE INSTRUCTIONS', userConfig); - const second = await dynamicTransform(body, 'BASE INSTRUCTIONS', userConfig); - - expect(first.instructions).toBe('BASE INSTRUCTIONS'); - expect(second.instructions).toBe('BASE INSTRUCTIONS'); const invalidLogs = logSpy.mock.calls.filter((call) => call.some((part) => - String(part).includes('Invalid global personality "INVALID_GLOBAL" detected; coercing to "none"'), + String(part).includes('Invalid personality "INVALID" detected; coercing to "pragmatic"'), ), ); expect(invalidLogs).toHaveLength(1); @@ -1123,51 +1280,30 @@ describe('Request Transformer Module', () => { vi.resetModules(); } }); - - it('applies global personality when runtime defaults only provide template messages', async () => { - const body: RequestBody = { - model: 'gpt-5.3-codex', - input: [], - }; - const userConfig: UserConfig = { - global: { personality: 'friendly' } as any, - models: {}, - }; - const result = await transformRequestBody( - body, - 'BASE INSTRUCTIONS', - userConfig, - { - instructionsTemplate: 'Template {{ personality }}', - personalityMessages: { - default: '', - friendly: 'Friendly from runtime defaults', - pragmatic: 'Pragmatic from runtime defaults', - }, - staticDefaultPersonality: 'none', - }, - ); - expect(result.instructions).toContain('Friendly from runtime defaults'); - expect(result.instructions).not.toContain('Pragmatic from runtime defaults'); - }); }); + // Unknown model validation happens when runtime defaults are resolved from the server catalog. + // NEW: Integration tests for all config scenarios describe('Integration: Complete Config Scenarios', () => { describe('Scenario 1: Default models (no custom config)', () => { it('should handle gpt-5-codex with global options only', async () => { const body: RequestBody = { model: 'gpt-5-codex', - input: [] + input: [], }; const userConfig: UserConfig = { global: { reasoningEffort: 'high' }, - models: {} + models: {}, }; - const result = await transformRequestBody(body, codexInstructions, userConfig); + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); - expect(result.model).toBe('gpt-5.1-codex'); // gpt-5-codex now maps to gpt-5.1-codex + expect(result.model).toBe('gpt-5.1-codex'); expect(result.reasoning?.effort).toBe('high'); // From global expect(result.store).toBe(false); }); @@ -1175,13 +1311,13 @@ describe('Request Transformer Module', () => { it('should handle gpt-5-mini normalizing to gpt-5.1', async () => { const body: RequestBody = { model: 'gpt-5-mini', - input: [] + input: [], }; const result = await transformRequestBody(body, codexInstructions); - expect(result.model).toBe('gpt-5.1'); // gpt-5 now maps to gpt-5.1 - expect(result.reasoning?.effort).toBe('medium'); // Default for normalized gpt-5.1 + expect(result.model).toBe('gpt-5-mini'); + expect(result.reasoning?.effort).toBe('low'); // Lightweight defaults }); }); @@ -1190,23 +1326,27 @@ describe('Request Transformer Module', () => { global: { reasoningEffort: 'medium', include: ['reasoning.encrypted_content'] }, models: { 'gpt-5-codex-low': { - options: { reasoningEffort: 'low' } + options: { reasoningEffort: 'low' }, }, 'gpt-5-codex-high': { - options: { reasoningEffort: 'high', reasoningSummary: 'detailed' } - } - } + options: { reasoningEffort: 'high', reasoningSummary: 'detailed' }, + }, + }, }; it('should apply per-model options for gpt-5-codex-low', async () => { const body: RequestBody = { model: 'gpt-5-codex-low', - input: [] + input: [], }; - const result = await transformRequestBody(body, codexInstructions, userConfig); + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); - expect(result.model).toBe('gpt-5.1-codex'); // gpt-5-codex now maps to gpt-5.1-codex + expect(result.model).toBe('gpt-5.1-codex'); expect(result.reasoning?.effort).toBe('low'); // From per-model expect(result.include).toEqual(['reasoning.encrypted_content']); // From global }); @@ -1214,12 +1354,16 @@ describe('Request Transformer Module', () => { it('should apply per-model options for gpt-5-codex-high', async () => { const body: RequestBody = { model: 'gpt-5-codex-high', - input: [] + input: [], }; - const result = await transformRequestBody(body, codexInstructions, userConfig); + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); - expect(result.model).toBe('gpt-5.1-codex'); // gpt-5-codex now maps to gpt-5.1-codex + expect(result.model).toBe('gpt-5.1-codex'); expect(result.reasoning?.effort).toBe('high'); // From per-model expect(result.reasoning?.summary).toBe('detailed'); // From per-model }); @@ -1227,12 +1371,16 @@ describe('Request Transformer Module', () => { it('should use global options for default gpt-5-codex', async () => { const body: RequestBody = { model: 'gpt-5-codex', - input: [] + input: [], }; - const result = await transformRequestBody(body, codexInstructions, userConfig); + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); - expect(result.model).toBe('gpt-5.1-codex'); // gpt-5-codex now maps to gpt-5.1-codex + expect(result.model).toBe('gpt-5.1-codex'); expect(result.reasoning?.effort).toBe('medium'); // From global (no per-model) }); }); @@ -1242,21 +1390,25 @@ describe('Request Transformer Module', () => { global: {}, models: { 'GPT 5 Codex Low (ChatGPT Subscription)': { - options: { reasoningEffort: 'low', textVerbosity: 'low' } - } - } + options: { reasoningEffort: 'low', textVerbosity: 'low' }, + }, + }, }; it('should find and apply old config format', async () => { const body: RequestBody = { model: 'GPT 5 Codex Low (ChatGPT Subscription)', - input: [] + input: [], }; - const result = await transformRequestBody(body, codexInstructions, userConfig); + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); - expect(result.model).toBe('gpt-5.1-codex'); // gpt-5-codex now maps to gpt-5.1-codex - expect(result.reasoning?.effort).toBe('low'); // From per-model (old format) + expect(result.model).toBe('gpt 5 codex low (chatgpt subscription)'); + expect(result.reasoning?.effort).toBe('low'); expect(result.text?.verbosity).toBe('low'); }); }); @@ -1266,18 +1418,22 @@ describe('Request Transformer Module', () => { global: { reasoningEffort: 'medium' }, models: { 'gpt-5-codex-low': { - options: { reasoningEffort: 'low' } - } - } + options: { reasoningEffort: 'low' }, + }, + }, }; it('should use per-model for custom variant', async () => { const body: RequestBody = { model: 'gpt-5-codex-low', - input: [] + input: [], }; - const result = await transformRequestBody(body, codexInstructions, userConfig); + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); expect(result.reasoning?.effort).toBe('low'); // Per-model }); @@ -1285,16 +1441,20 @@ describe('Request Transformer Module', () => { it('should use global for default model', async () => { const body: RequestBody = { model: 'gpt-5', - input: [] + input: [], }; - const result = await transformRequestBody(body, codexInstructions, userConfig); + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); expect(result.reasoning?.effort).toBe('medium'); // Global }); }); - describe('Scenario 5: Message ID filtering with multi-turn', () => { + describe('Scenario 5: Message ID filtering with multi-turn', () => { it('should remove ALL IDs in multi-turn conversation', async () => { const body: RequestBody = { model: 'gpt-5-codex', @@ -1303,20 +1463,20 @@ describe('Request Transformer Module', () => { { id: 'rs_response1', type: 'message', role: 'assistant', content: 'response' }, { id: 'msg_turn2', type: 'message', role: 'user', content: 'second' }, { id: 'assistant_123', type: 'message', role: 'assistant', content: 'reply' }, - ] + ], }; const result = await transformRequestBody(body, codexInstructions); // All items kept, ALL IDs removed expect(result.input).toHaveLength(4); - expect(result.input!.every(item => !item.id)).toBe(true); + expect(result.input!.every((item) => !item.id)).toBe(true); expect(result.store).toBe(false); // Stateless mode expect(result.include).toEqual(['reasoning.encrypted_content']); }); }); - describe('Scenario 6: Complete end-to-end transformation', () => { + describe('Scenario 6: Complete end-to-end transformation', () => { it('should handle full transformation: custom model + IDs + tools', async () => { const userConfig: UserConfig = { global: { include: ['reasoning.encrypted_content'] }, @@ -1325,28 +1485,32 @@ describe('Request Transformer Module', () => { options: { reasoningEffort: 'low', textVerbosity: 'low', - reasoningSummary: 'auto' - } - } - } + reasoningSummary: 'auto', + }, + }, + }, }; const body: RequestBody = { model: 'gpt-5-codex-low', input: [ { id: 'msg_1', type: 'message', role: 'user', content: 'test' }, - { id: 'rs_2', type: 'message', role: 'assistant', content: 'reply' } + { id: 'rs_2', type: 'message', role: 'assistant', content: 'reply' }, ], - tools: [{ name: 'edit' }] + tools: [{ name: 'edit' }], }; - const result = await transformRequestBody(body, codexInstructions, userConfig); + const result = await transformRequestBody( + body, + codexInstructions, + userConfig, + ); - // Model normalized (gpt-5-codex now maps to gpt-5.1-codex) + // Model normalized for legacy identifiers expect(result.model).toBe('gpt-5.1-codex'); // IDs removed - expect(result.input!.every(item => !item.id)).toBe(true); + expect(result.input!.every((item) => !item.id)).toBe(true); // Per-model options applied expect(result.reasoning?.effort).toBe('low'); @@ -1356,7 +1520,7 @@ describe('Request Transformer Module', () => { // Codex fields set expect(result.store).toBe(false); expect(result.stream).toBe(true); - expect(result.instructions).toBe(codexInstructions); + expect(result.instructions).toContain(codexInstructions); expect(result.include).toEqual(['reasoning.encrypted_content']); }); });